diff -pruN 2.33+dfsg-1/Changes.txt 3.0+dfsg-1/Changes.txt
--- 2.33+dfsg-1/Changes.txt	2018-12-28 19:36:44.000000000 +0000
+++ 3.0+dfsg-1/Changes.txt	2019-02-09 09:55:42.000000000 +0000
@@ -1,3 +1,29 @@
+2019-02-09, S3QL 3.0
+
+  * Added a new `--systemd` option to simplify running mount.s3ql
+    as a systemd unit.
+
+  * Dropped the `--upstart` option - upstart seems to be unused and
+    unmaintained.
+
+  * Dropped support for legacy ("API key") authentication for Google
+    Storage. Only oauth2 is supported now. This was necessitated by
+    the switch to Google's native API (before S3QL was using Google's
+    S3 compatibility layer).
+
+  * Command line options specified in the authinfo file (in particular
+    --backend-options) are now parsed correctly.
+
+  * S3QL now uses python-cryptography instead of the (no longer
+    maintained) pycrypto module.
+
+  * The Google Storage backend now supports Application Default
+    Credentials (ADC). To use this, install the google.auth module
+    and use ``adc`` as your backend login.
+
+  * `umount.s3ql` now works correctly on systems where `ps` doesn't
+    accept a `-p` option (as long as /proc is available).
+
 2018-12-28, S3QL 2.33
 
   * Fixed a sporadic test failure in t5_cache.py.
diff -pruN 2.33+dfsg-1/contrib/expire_backups.1 3.0+dfsg-1/contrib/expire_backups.1
--- 2.33+dfsg-1/contrib/expire_backups.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/contrib/expire_backups.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "EXPIRE_BACKUPS" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "EXPIRE_BACKUPS" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 expire_backups \- Intelligently expire old backups
 .
diff -pruN 2.33+dfsg-1/contrib/pcp.1 3.0+dfsg-1/contrib/pcp.1
--- 2.33+dfsg-1/contrib/pcp.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/contrib/pcp.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "PCP" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "PCP" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 pcp \- Recursive, parallel copy of directory trees
 .
diff -pruN 2.33+dfsg-1/debian/changelog 3.0+dfsg-1/debian/changelog
--- 2.33+dfsg-1/debian/changelog	2018-12-28 20:04:28.000000000 +0000
+++ 3.0+dfsg-1/debian/changelog	2019-02-09 10:09:05.000000000 +0000
@@ -1,3 +1,9 @@
+s3ql (3.0+dfsg-1) unstable; urgency=medium
+
+  * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org>  Sat, 09 Feb 2019 10:09:05 +0000
+
 s3ql (2.33+dfsg-1) unstable; urgency=medium
 
   * New upstream release.
diff -pruN 2.33+dfsg-1/debian/control 3.0+dfsg-1/debian/control
--- 2.33+dfsg-1/debian/control	2018-12-28 20:04:28.000000000 +0000
+++ 3.0+dfsg-1/debian/control	2019-02-09 10:09:05.000000000 +0000
@@ -12,7 +12,7 @@ Build-Depends: debhelper (>= 9),
                python3-sphinx (>= 1.2),
                python3-llfuse (>= 1.0), python3-llfuse (<< 2.0),
                python3-llfuse-dbg,
-               python3-crypto,
+               python3-cryptography,
                python3-requests,
                python3-dugong (>= 3.4),
                python3-pytest (>= 3.3.0),
diff -pruN 2.33+dfsg-1/debian/patches/proc_mount.diff 3.0+dfsg-1/debian/patches/proc_mount.diff
--- 2.33+dfsg-1/debian/patches/proc_mount.diff	2018-12-28 20:04:28.000000000 +0000
+++ 3.0+dfsg-1/debian/patches/proc_mount.diff	2019-02-09 10:09:05.000000000 +0000
@@ -10,7 +10,7 @@ Patch-Name: proc_mount.diff
  1 file changed, 2 insertions(+)
 
 diff --git a/tests/t4_adm.py b/tests/t4_adm.py
-index 005c91b..1d8ad7a 100755
+index a38e0fa..4a3f509 100755
 --- a/tests/t4_adm.py
 +++ b/tests/t4_adm.py
 @@ -22,6 +22,7 @@ import tempfile
@@ -19,7 +19,7 @@ index 005c91b..1d8ad7a 100755
  import pytest
 +import os
  
- @pytest.mark.usefixtures('s3ql_cmd_argv', 'pass_reg_output')
+ @pytest.mark.usefixtures('pass_s3ql_cmd_argv', 'pass_reg_output')
  class AdmTests(unittest.TestCase):
 @@ -56,6 +57,7 @@ class AdmTests(unittest.TestCase):
  
diff -pruN 2.33+dfsg-1/debian/patches/show_pdflatex_output.diff 3.0+dfsg-1/debian/patches/show_pdflatex_output.diff
--- 2.33+dfsg-1/debian/patches/show_pdflatex_output.diff	2018-12-28 20:04:28.000000000 +0000
+++ 3.0+dfsg-1/debian/patches/show_pdflatex_output.diff	2019-02-09 10:09:05.000000000 +0000
@@ -12,7 +12,7 @@ Upstream is not interested in this patch
  1 file changed, 2 insertions(+), 3 deletions(-)
 
 diff --git a/setup.py b/setup.py
-index 93a8e52..3ea858f 100755
+index 4f28bd3..7d2dc48 100755
 --- a/setup.py
 +++ b/setup.py
 @@ -98,9 +98,8 @@ class build_docs(setuptools.Command):
diff -pruN 2.33+dfsg-1/doc/latex/manual.aux 3.0+dfsg-1/doc/latex/manual.aux
--- 2.33+dfsg-1/doc/latex/manual.aux	2018-12-28 19:37:21.000000000 +0000
+++ 3.0+dfsg-1/doc/latex/manual.aux	2019-02-09 09:56:37.000000000 +0000
@@ -41,9 +41,9 @@
 \newlabel{installation::doc}{{2}{3}{Installation}{chapter.2}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {2.1}Dependencies}{3}{section.2.1}}
 \newlabel{installation:dependencies}{{2.1}{3}{Dependencies}{section.2.1}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {2.2}Installing S3QL}{3}{section.2.2}}
-\newlabel{installation:installing-s3ql}{{2.2}{3}{Installing S3QL}{section.2.2}{}}
-\newlabel{installation:inst-s3ql}{{2.2}{3}{Installing S3QL}{section.2.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {2.2}Installing S3QL}{4}{section.2.2}}
+\newlabel{installation:installing-s3ql}{{2.2}{4}{Installing S3QL}{section.2.2}{}}
+\newlabel{installation:inst-s3ql}{{2.2}{4}{Installing S3QL}{section.2.2}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {2.3}Development Version}{4}{section.2.3}}
 \newlabel{installation:development-version}{{2.3}{4}{Development Version}{section.2.3}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {2.4}Running tests requiring remote servers}{4}{section.2.4}}
@@ -56,34 +56,33 @@
 \newlabel{backends::doc}{{3}{7}{Storage Backends}{chapter.3}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {3.1}Google Storage}{7}{section.3.1}}
 \newlabel{backends:google-storage}{{3.1}{7}{Google Storage}{section.3.1}{}}
-\newlabel{backends:cmdoption-gs-backend-arg-no-ssl}{{3.1}{7}{Google Storage}{section*.3}{}}
-\newlabel{backends:cmdoption-gs-backend-arg-ssl-ca-path}{{3.1}{8}{Google Storage}{section*.4}{}}
-\newlabel{backends:cmdoption-gs-backend-arg-tcp-timeout}{{3.1}{8}{Google Storage}{section*.5}{}}
+\newlabel{backends:cmdoption-gs-backend-arg-ssl-ca-path}{{3.1}{7}{Google Storage}{section*.3}{}}
+\newlabel{backends:cmdoption-gs-backend-arg-tcp-timeout}{{3.1}{8}{Google Storage}{section*.4}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {3.2}Amazon S3}{8}{section.3.2}}
 \newlabel{backends:amazon-s3}{{3.2}{8}{Amazon S3}{section.3.2}{}}
-\newlabel{backends:cmdoption-s3-backend-arg-no-ssl}{{3.2}{8}{Amazon S3}{section*.6}{}}
-\newlabel{backends:cmdoption-s3-backend-arg-ssl-ca-path}{{3.2}{8}{Amazon S3}{section*.7}{}}
-\newlabel{backends:cmdoption-s3-backend-arg-tcp-timeout}{{3.2}{8}{Amazon S3}{section*.8}{}}
-\newlabel{backends:cmdoption-s3-backend-arg-sse}{{3.2}{8}{Amazon S3}{section*.9}{}}
-\newlabel{backends:cmdoption-s3-backend-arg-ia}{{3.2}{8}{Amazon S3}{section*.10}{}}
-\newlabel{backends:cmdoption-s3-backend-arg-rrs}{{3.2}{8}{Amazon S3}{section*.11}{}}
+\newlabel{backends:cmdoption-s3-backend-arg-no-ssl}{{3.2}{8}{Amazon S3}{section*.5}{}}
+\newlabel{backends:cmdoption-s3-backend-arg-ssl-ca-path}{{3.2}{8}{Amazon S3}{section*.6}{}}
+\newlabel{backends:cmdoption-s3-backend-arg-tcp-timeout}{{3.2}{8}{Amazon S3}{section*.7}{}}
+\newlabel{backends:cmdoption-s3-backend-arg-sse}{{3.2}{8}{Amazon S3}{section*.8}{}}
+\newlabel{backends:cmdoption-s3-backend-arg-ia}{{3.2}{8}{Amazon S3}{section*.9}{}}
+\newlabel{backends:cmdoption-s3-backend-arg-rrs}{{3.2}{8}{Amazon S3}{section*.10}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {3.3}OpenStack/Swift}{9}{section.3.3}}
 \newlabel{backends:openstack-swift}{{3.3}{9}{OpenStack/Swift}{section.3.3}{}}
 \newlabel{backends:openstack-backend}{{3.3}{9}{OpenStack/Swift}{section.3.3}{}}
-\newlabel{backends:cmdoption-swift-backend-arg-no-ssl}{{3.3}{9}{OpenStack/Swift}{section*.12}{}}
-\newlabel{backends:cmdoption-swift-backend-arg-ssl-ca-path}{{3.3}{9}{OpenStack/Swift}{section*.13}{}}
-\newlabel{backends:cmdoption-swift-backend-arg-tcp-timeout}{{3.3}{9}{OpenStack/Swift}{section*.14}{}}
-\newlabel{backends:cmdoption-swift-backend-arg-disable-expect100}{{3.3}{9}{OpenStack/Swift}{section*.15}{}}
-\newlabel{backends:cmdoption-swift-backend-arg-no-feature-detection}{{3.3}{9}{OpenStack/Swift}{section*.16}{}}
+\newlabel{backends:cmdoption-swift-backend-arg-no-ssl}{{3.3}{9}{OpenStack/Swift}{section*.11}{}}
+\newlabel{backends:cmdoption-swift-backend-arg-ssl-ca-path}{{3.3}{9}{OpenStack/Swift}{section*.12}{}}
+\newlabel{backends:cmdoption-swift-backend-arg-tcp-timeout}{{3.3}{9}{OpenStack/Swift}{section*.13}{}}
+\newlabel{backends:cmdoption-swift-backend-arg-disable-expect100}{{3.3}{9}{OpenStack/Swift}{section*.14}{}}
+\newlabel{backends:cmdoption-swift-backend-arg-no-feature-detection}{{3.3}{9}{OpenStack/Swift}{section*.15}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {3.4}Rackspace CloudFiles}{10}{section.3.4}}
 \newlabel{backends:rackspace-cloudfiles}{{3.4}{10}{Rackspace CloudFiles}{section.3.4}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {3.5}S3 compatible}{10}{section.3.5}}
 \newlabel{backends:s3-compatible}{{3.5}{10}{S3 compatible}{section.3.5}{}}
-\newlabel{backends:cmdoption-s3c-backend-arg-no-ssl}{{3.5}{10}{S3 compatible}{section*.17}{}}
-\newlabel{backends:cmdoption-s3c-backend-arg-ssl-ca-path}{{3.5}{10}{S3 compatible}{section*.18}{}}
-\newlabel{backends:cmdoption-s3c-backend-arg-tcp-timeout}{{3.5}{10}{S3 compatible}{section*.19}{}}
-\newlabel{backends:cmdoption-s3c-backend-arg-disable-expect100}{{3.5}{10}{S3 compatible}{section*.20}{}}
-\newlabel{backends:cmdoption-s3c-backend-arg-dumb-copy}{{3.5}{10}{S3 compatible}{section*.21}{}}
+\newlabel{backends:cmdoption-s3c-backend-arg-no-ssl}{{3.5}{10}{S3 compatible}{section*.16}{}}
+\newlabel{backends:cmdoption-s3c-backend-arg-ssl-ca-path}{{3.5}{10}{S3 compatible}{section*.17}{}}
+\newlabel{backends:cmdoption-s3c-backend-arg-tcp-timeout}{{3.5}{10}{S3 compatible}{section*.18}{}}
+\newlabel{backends:cmdoption-s3c-backend-arg-disable-expect100}{{3.5}{10}{S3 compatible}{section*.19}{}}
+\newlabel{backends:cmdoption-s3c-backend-arg-dumb-copy}{{3.5}{10}{S3 compatible}{section*.20}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {3.6}Local}{11}{section.3.6}}
 \newlabel{backends:local}{{3.6}{11}{Local}{section.3.6}{}}
 \@writefile{toc}{\contentsline {chapter}{\numberline {4}Important Rules to Avoid Losing Data}{13}{chapter.4}}
@@ -276,8 +275,8 @@
 \newlabel{man/adm::doc}{{16.2}{52}{The \sphinxstyleliteralstrong {\sphinxupquote {s3qladm}} command}{section.16.2}{}}
 \@writefile{toc}{\contentsline {subsection}{\numberline {16.2.1}Synopsis}{52}{subsection.16.2.1}}
 \newlabel{man/adm:synopsis}{{16.2.1}{52}{Synopsis}{subsection.16.2.1}{}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {16.2.2}Description}{53}{subsection.16.2.2}}
-\newlabel{man/adm:description}{{16.2.2}{53}{Description}{subsection.16.2.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {16.2.2}Description}{52}{subsection.16.2.2}}
+\newlabel{man/adm:description}{{16.2.2}{52}{Description}{subsection.16.2.2}{}}
 \@writefile{toc}{\contentsline {subsection}{\numberline {16.2.3}Options}{53}{subsection.16.2.3}}
 \newlabel{man/adm:options}{{16.2.3}{53}{Options}{subsection.16.2.3}{}}
 \@writefile{toc}{\contentsline {subsection}{\numberline {16.2.4}Actions}{53}{subsection.16.2.4}}
@@ -332,8 +331,8 @@
 \newlabel{man/cp:synopsis}{{16.6.1}{58}{Synopsis}{subsection.16.6.1}{}}
 \@writefile{toc}{\contentsline {subsection}{\numberline {16.6.2}Description}{59}{subsection.16.6.2}}
 \newlabel{man/cp:description}{{16.6.2}{59}{Description}{subsection.16.6.2}{}}
-\@writefile{toc}{\contentsline {subsubsection}{Snapshotting vs Hardlinking}{59}{subsubsection*.22}}
-\newlabel{man/cp:snapshotting-vs-hardlinking}{{16.6.2}{59}{Snapshotting vs Hardlinking}{subsubsection*.22}{}}
+\@writefile{toc}{\contentsline {subsubsection}{Snapshotting vs Hardlinking}{59}{subsubsection*.21}}
+\newlabel{man/cp:snapshotting-vs-hardlinking}{{16.6.2}{59}{Snapshotting vs Hardlinking}{subsubsection*.21}{}}
 \@writefile{toc}{\contentsline {subsection}{\numberline {16.6.3}Options}{59}{subsection.16.6.3}}
 \newlabel{man/cp:options}{{16.6.3}{59}{Options}{subsection.16.6.3}{}}
 \@writefile{toc}{\contentsline {subsection}{\numberline {16.6.4}Exit Codes}{59}{subsection.16.6.4}}
diff -pruN 2.33+dfsg-1/doc/latex/manual.idx 3.0+dfsg-1/doc/latex/manual.idx
--- 2.33+dfsg-1/doc/latex/manual.idx	2018-12-28 19:37:21.000000000 +0000
+++ 3.0+dfsg-1/doc/latex/manual.idx	2019-02-09 09:56:37.000000000 +0000
@@ -1,7 +1,5 @@
-\indexentry{gs\_backend command line option!no-ssl|hyperpage}{7}
-\indexentry{no-ssl!gs\_backend command line option|hyperpage}{7}
-\indexentry{gs\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}|hyperpage}{8}
-\indexentry{ssl-ca-path=\textless{}path\textgreater{}!gs\_backend command line option|hyperpage}{8}
+\indexentry{gs\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}|hyperpage}{7}
+\indexentry{ssl-ca-path=\textless{}path\textgreater{}!gs\_backend command line option|hyperpage}{7}
 \indexentry{gs\_backend command line option!tcp-timeout|hyperpage}{8}
 \indexentry{tcp-timeout!gs\_backend command line option|hyperpage}{8}
 \indexentry{s3\_backend command line option!no-ssl|hyperpage}{8}
diff -pruN 2.33+dfsg-1/doc/latex/manual.tex 3.0+dfsg-1/doc/latex/manual.tex
--- 2.33+dfsg-1/doc/latex/manual.tex	2018-12-28 19:37:16.000000000 +0000
+++ 3.0+dfsg-1/doc/latex/manual.tex	2019-02-09 09:56:31.000000000 +0000
@@ -52,8 +52,8 @@
 
 
 \title{S3QL Documentation}
-\date{Dec 28, 2018}
-\release{2.33}
+\date{Feb 09, 2019}
+\release{3.0}
 \author{Nikolaus Rath}
 \newcommand{\sphinxlogo}{\vbox{}}
 \renewcommand{\releasename}{Release}
@@ -210,22 +210,12 @@ The following Python modules:
 \sphinxhref{https://pypi.python.org/pypi/setuptools}{setuptools}, version 1.0 or newer.
 
 \item {} 
-\sphinxhref{https://www.dlitz.net/software/pycrypto/}{pycrypto}
+\sphinxhref{https://cryptography.io/en/latest/installation/}{cryptography}
 
 \item {} 
 \sphinxhref{https://pypi.python.org/pypi/defusedxml/}{defusedxml}
 
 \item {} 
-\sphinxhref{https://pypi.python.org/pypi/requests/}{requests} (optional,
-required for OAuth2 authentication with Google Storage)
-
-\item {} 
-\sphinxhref{https://github.com/systemd/python-systemd}{systemd} (optional,
-for enabling systemd support). Do \sphinxstyleemphasis{not} install the module from
-PyPi, this is from a third-party developer and incompatible with
-the official module from the systemd developers.
-
-\item {} 
 \sphinxhref{https://github.com/rogerbinns/apsw}{apsw}, version 3.7.0 or
 newer.
 
@@ -240,6 +230,20 @@ version between 3.4 (inclusive) and 4.0
 \item {} 
 \sphinxhref{http://pytest.org/}{pytest}, version 2.7 or newer (optional, to run unit tests)
 
+\item {} 
+\sphinxhref{https://github.com/systemd/python-systemd}{systemd} (optional,
+for enabling systemd support). Do \sphinxstyleemphasis{not} install the module from
+PyPi, this is from a third-party developer and incompatible with
+the official module from the systemd developers.
+
+\item {} 
+\sphinxhref{https://pypi.python.org/pypi/requests/}{requests} (optional,
+required for OAuth2 authentication with Google Storage)
+
+\item {} 
+\sphinxhref{https://pypi.python.org/project/google-auth/}{google-auth}
+(optional, required for ADC authentication with Google Storage)
+
 \end{itemize}
 
 To check if a specific module \sphinxcode{\sphinxupquote{\textless{}module\textgreater{}}} is installed, execute
@@ -408,40 +412,27 @@ another, you need to use the \sphinxcode
 
 \section{Google Storage}
 \label{\detokenize{backends:google-storage}}
-\sphinxhref{http://code.google.com/apis/storage/}{Google Storage} is an online
-storage service offered by Google. To use the Google Storage backend,
-you need to have (or sign up for) a Google account, and then \sphinxhref{http://code.google.com/apis/storage/docs/signup.html}{activate
-Google Storage}
-for your account. The account is free, you will pay only for the
-amount of storage and traffic that you actually use. There are two
-ways to access Google storage:
-\begin{enumerate}
-\def\theenumi{\arabic{enumi}}
-\def\labelenumi{\theenumi .}
-\makeatletter\def\p@enumii{\p@enumi \theenumi .}\makeatother
-\item {} 
-Use S3-like authentication. To do this, first \sphinxhref{https://developers.google.com/storage/docs/migrating\#defaultproj}{set a  default
-project}.
-Then use the \sphinxhref{https://code.google.com/apis/console/\#:storage:legacy}{key management tool} to
-retrieve your \sphinxstyleemphasis{Google Storage developer access key} and \sphinxstyleemphasis{Google
-Storage developer secret} and use that as backend login and backend
-password.
-
-\item {} 
-Use OAuth2 authentication. In this case you need to use \sphinxcode{\sphinxupquote{oauth2}}
-as the backend login, and a valid OAuth2 refresh token as the
-backend password. To obtain a refresh token, you can use the
-{\hyperref[\detokenize{man/oauth_client:oauth-client}]{\sphinxcrossref{\DUrole{std,std-ref}{s3ql\_oauth\_client}}}} program. It will instruct
-you to open a specific URL in your browser, enter a code and
-authenticate with your Google account. Once this procedure is
-complete, {\hyperref[\detokenize{man/oauth_client:oauth-client}]{\sphinxcrossref{\DUrole{std,std-ref}{s3ql\_oauth\_client}}}} will print out
-the refresh token. Note that you need to do this procedure only
-once, the refresh token will remain valid until you explicitly
+\sphinxhref{https://cloud.google.com/storage/}{Google Storage} is an online
+storage service offered by Google. In order to use it with S3QL, make
+sure that you enable the JSON API in the \sphinxhref{https://console.cloud.google.com/apis/library/}{GCP Console API Library}
+
+The Google Storage backend uses OAuth2 authentication or \sphinxhref{https://cloud.google.com/docs/authentication/production}{ADC}
+(Application Default Credentials).
+
+To use OAuth2 authentication, specify \sphinxcode{\sphinxupquote{oauth2}} as the backend login
+and a valid OAuth2 refresh token as the backend password. To obtain a
+refresh token, you can use the {\hyperref[\detokenize{man/oauth_client:oauth-client}]{\sphinxcrossref{\DUrole{std,std-ref}{s3ql\_oauth\_client}}}}
+program. It will instruct you to open a specific URL in your browser,
+enter a code and authenticate with your Google account. Once this
+procedure is complete, {\hyperref[\detokenize{man/oauth_client:oauth-client}]{\sphinxcrossref{\DUrole{std,std-ref}{s3ql\_oauth\_client}}}} will
+print out the refresh token. Note that you need to do this procedure
+only once, the refresh token will remain valid until you explicitly
 revoke it.
 
-\end{enumerate}
+To use ADC, specify \sphinxcode{\sphinxupquote{adc}} as the backend login and use an arbitrary
+value for the backend password.
 
-To create a Google Storage bucket, you can use e.g. the \sphinxhref{https://sandbox.google.com/storage/}{Google
+To create a Google Storage bucket, you can use e.g. the \sphinxhref{https://console.cloud.google.com/storage/browser}{Google
 Storage Manager}. The storage URL for accessing the bucket in S3QL is
 then
 
@@ -456,14 +447,6 @@ S3QL. This allows you to store several S
 Google Storage bucket.
 
 The Google Storage backend accepts the following backend options:
-\index{gs\_backend command line option!no-ssl}\index{no-ssl!gs\_backend command line option}
-
-\begin{fulllineitems}
-\phantomsection\label{\detokenize{backends:cmdoption-gs-backend-arg-no-ssl}}\pysigline{\sphinxbfcode{\sphinxupquote{no-ssl}}\sphinxcode{\sphinxupquote{}}}
-Disable encrypted (https) connections and use plain HTTP instead.
-
-\end{fulllineitems}
-
 \index{gs\_backend command line option!ssl-ca-path=\textless{}path\textgreater{}}\index{ssl-ca-path=\textless{}path\textgreater{}!gs\_backend command line option}
 
 \begin{fulllineitems}
@@ -1160,8 +1143,6 @@ than this will be spread over multiple o
 storage backend. Default: 10240 KiB.
 \item [-{-}plain]  
 Create unencrypted file system.
-\item [-{-}force]  
-Overwrite any existing data.
 \end{optionlist}
 \end{quote}
 
@@ -1401,9 +1382,9 @@ Like \sphinxcode{\sphinxupquote{-{-}allo
 mounting user and the root user.
 \item [-{-}fg]  
 Do not daemonize, stay in foreground
-\item [-{-}upstart]  
-Stay in foreground and raise SIGSTOP once mountpoint
-is up.
+\item [-{-}systemd]  
+Run as systemd unit. Consider specifying \textendash{}log none as
+well to make use of journald.
 \item [-{-}compress \textless{}algorithm-lvl\textgreater{}]  
 Compression algorithm and compression level to use
 when storing new data. \sphinxstyleemphasis{algorithm} may be any of
@@ -1568,7 +1549,8 @@ messages is the only way to find out abo
 If you want to mount and umount an S3QL file system automatically at
 system startup and shutdown, you should do so with a dedicated S3QL
 init job (instead of using \sphinxcode{\sphinxupquote{/etc/fstab}}. When using systemd,
-\sphinxstyleliteralstrong{\sphinxupquote{mount.s3ql}} can be run as a service of type \sphinxcode{\sphinxupquote{notify}}.
+\sphinxstyleliteralstrong{\sphinxupquote{mount.s3ql}} can be started with \sphinxcode{\sphinxupquote{-{-}systemd}} to run
+as a systemd service of type \sphinxcode{\sphinxupquote{notify}}.
 
 \begin{sphinxadmonition}{note}{Note:}
 In principle, it is also possible to automatically mount an S3QL
@@ -2841,8 +2823,6 @@ than this will be spread over multiple o
 storage backend. Default: 10240 KiB.
 \item [-{-}plain]  
 Create unencrypted file system.
-\item [-{-}force]  
-Overwrite any existing data.
 \end{optionlist}
 \end{quote}
 
@@ -3123,9 +3103,9 @@ Like \sphinxcode{\sphinxupquote{-{-}allo
 mounting user and the root user.
 \item [-{-}fg]  
 Do not daemonize, stay in foreground
-\item [-{-}upstart]  
-Stay in foreground and raise SIGSTOP once mountpoint
-is up.
+\item [-{-}systemd]  
+Run as systemd unit. Consider specifying \textendash{}log none as
+well to make use of journald.
 \item [-{-}compress \textless{}algorithm-lvl\textgreater{}]  
 Compression algorithm and compression level to use
 when storing new data. \sphinxstyleemphasis{algorithm} may be any of
diff -pruN 2.33+dfsg-1/doc/latex/manual.toc 3.0+dfsg-1/doc/latex/manual.toc
--- 2.33+dfsg-1/doc/latex/manual.toc	2018-12-28 19:37:21.000000000 +0000
+++ 3.0+dfsg-1/doc/latex/manual.toc	2019-02-09 09:56:37.000000000 +0000
@@ -6,7 +6,7 @@
 \contentsline {section}{\numberline {1.4}Contributing}{2}{section.1.4}
 \contentsline {chapter}{\numberline {2}Installation}{3}{chapter.2}
 \contentsline {section}{\numberline {2.1}Dependencies}{3}{section.2.1}
-\contentsline {section}{\numberline {2.2}Installing S3QL}{3}{section.2.2}
+\contentsline {section}{\numberline {2.2}Installing S3QL}{4}{section.2.2}
 \contentsline {section}{\numberline {2.3}Development Version}{4}{section.2.3}
 \contentsline {section}{\numberline {2.4}Running tests requiring remote servers}{4}{section.2.4}
 \contentsline {chapter}{\numberline {3}Storage Backends}{7}{chapter.3}
@@ -84,7 +84,7 @@
 \contentsline {subsection}{\numberline {16.1.5}See Also}{52}{subsection.16.1.5}
 \contentsline {section}{\numberline {16.2}The \sphinxstyleliteralstrong {\sphinxupquote {s3qladm}} command}{52}{section.16.2}
 \contentsline {subsection}{\numberline {16.2.1}Synopsis}{52}{subsection.16.2.1}
-\contentsline {subsection}{\numberline {16.2.2}Description}{53}{subsection.16.2.2}
+\contentsline {subsection}{\numberline {16.2.2}Description}{52}{subsection.16.2.2}
 \contentsline {subsection}{\numberline {16.2.3}Options}{53}{subsection.16.2.3}
 \contentsline {subsection}{\numberline {16.2.4}Actions}{53}{subsection.16.2.4}
 \contentsline {subsection}{\numberline {16.2.5}Exit Codes}{53}{subsection.16.2.5}
@@ -110,7 +110,7 @@
 \contentsline {section}{\numberline {16.6}The \sphinxstyleliteralstrong {\sphinxupquote {s3qlcp}} command}{58}{section.16.6}
 \contentsline {subsection}{\numberline {16.6.1}Synopsis}{58}{subsection.16.6.1}
 \contentsline {subsection}{\numberline {16.6.2}Description}{59}{subsection.16.6.2}
-\contentsline {subsubsection}{Snapshotting vs Hardlinking}{59}{subsubsection*.22}
+\contentsline {subsubsection}{Snapshotting vs Hardlinking}{59}{subsubsection*.21}
 \contentsline {subsection}{\numberline {16.6.3}Options}{59}{subsection.16.6.3}
 \contentsline {subsection}{\numberline {16.6.4}Exit Codes}{59}{subsection.16.6.4}
 \contentsline {subsection}{\numberline {16.6.5}See Also}{60}{subsection.16.6.5}
diff -pruN 2.33+dfsg-1/doc/latex/sphinxhighlight.sty 3.0+dfsg-1/doc/latex/sphinxhighlight.sty
--- 2.33+dfsg-1/doc/latex/sphinxhighlight.sty	2018-12-28 19:37:15.000000000 +0000
+++ 3.0+dfsg-1/doc/latex/sphinxhighlight.sty	2019-02-09 09:56:30.000000000 +0000
@@ -13,73 +13,73 @@
     \PYG@it{\PYG@bf{\PYG@ff{#1}}}}}}}
 \def\PYG#1#2{\PYG@reset\PYG@toks#1+\relax+\PYG@do{#2}}
 
+\expandafter\def\csname PYG@tok@ss\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.67,0.40,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@ne\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
 \expandafter\def\csname PYG@tok@bp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
-\expandafter\def\csname PYG@tok@vi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.73}{##1}}}
-\expandafter\def\csname PYG@tok@o\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.20}{##1}}}
-\expandafter\def\csname PYG@tok@gt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
+\expandafter\def\csname PYG@tok@c1\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
+\expandafter\def\csname PYG@tok@nv\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.60,0.40,0.20}{##1}}}
+\expandafter\def\csname PYG@tok@cpf\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
+\expandafter\def\csname PYG@tok@il\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.87}{##1}}}
+\expandafter\def\csname PYG@tok@gs\endcsname{\let\PYG@bf=\textbf}
+\expandafter\def\csname PYG@tok@err\endcsname{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.67,0.67}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@gp\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}}
+\expandafter\def\csname PYG@tok@mh\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.33,0.53}{##1}}}
+\expandafter\def\csname PYG@tok@ni\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
 \expandafter\def\csname PYG@tok@gd\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@na\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.80}{##1}}}
+\expandafter\def\csname PYG@tok@s1\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@no\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.20,0.40}{##1}}}
+\expandafter\def\csname PYG@tok@mo\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.27,0.00,0.93}{##1}}}
+\expandafter\def\csname PYG@tok@se\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
 \expandafter\def\csname PYG@tok@ch\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
-\expandafter\def\csname PYG@tok@k\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@nl\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.60,0.47,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@nb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
-\expandafter\def\csname PYG@tok@ni\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@gr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@kd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@c\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
-\expandafter\def\csname PYG@tok@kc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@nd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.33,0.33,0.33}{##1}}}
-\expandafter\def\csname PYG@tok@gs\endcsname{\let\PYG@bf=\textbf}
-\expandafter\def\csname PYG@tok@gh\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
-\expandafter\def\csname PYG@tok@kp\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.20,0.53}{##1}}}
-\expandafter\def\csname PYG@tok@mf\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.00,0.93}{##1}}}
-\expandafter\def\csname PYG@tok@gi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
 \expandafter\def\csname PYG@tok@vc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.40,0.60}{##1}}}
-\expandafter\def\csname PYG@tok@no\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.20,0.40}{##1}}}
-\expandafter\def\csname PYG@tok@sc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
-\expandafter\def\csname PYG@tok@gp\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}}
-\expandafter\def\csname PYG@tok@c1\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
+\expandafter\def\csname PYG@tok@na\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.80}{##1}}}
+\expandafter\def\csname PYG@tok@sd\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.87,0.27,0.13}{##1}}}
+\expandafter\def\csname PYG@tok@nt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.47,0.00}{##1}}}
 \expandafter\def\csname PYG@tok@ge\endcsname{\let\PYG@it=\textit}
-\expandafter\def\csname PYG@tok@sr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,1.00}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@kt\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.60}{##1}}}
-\expandafter\def\csname PYG@tok@si\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{0.93,0.93,0.93}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@s1\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@ow\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@cp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.33,0.47,0.60}{##1}}}
 \expandafter\def\csname PYG@tok@cs\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.80,0.00,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@mh\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.33,0.53}{##1}}}
-\expandafter\def\csname PYG@tok@sa\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@nn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}}
-\expandafter\def\csname PYG@tok@mi\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.87}{##1}}}
-\expandafter\def\csname PYG@tok@sd\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.87,0.27,0.13}{##1}}}
-\expandafter\def\csname PYG@tok@s2\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@sx\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.87,0.13,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@fm\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.40,0.73}{##1}}}
+\expandafter\def\csname PYG@tok@s\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@o\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.20}{##1}}}
 \expandafter\def\csname PYG@tok@gu\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
 \expandafter\def\csname PYG@tok@cm\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
+\expandafter\def\csname PYG@tok@sa\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@sr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,1.00}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@w\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
+\expandafter\def\csname PYG@tok@gr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@s2\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
 \expandafter\def\csname PYG@tok@dl\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@kn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@gt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
+\expandafter\def\csname PYG@tok@mf\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.00,0.93}{##1}}}
 \expandafter\def\csname PYG@tok@vm\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.60,0.40,0.20}{##1}}}
-\expandafter\def\csname PYG@tok@mb\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.00,0.93}{##1}}}
-\expandafter\def\csname PYG@tok@sb\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@cpf\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
-\expandafter\def\csname PYG@tok@vg\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.87,0.47,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@s\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@sh\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@ne\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@w\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
-\expandafter\def\csname PYG@tok@kr\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@il\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.87}{##1}}}
-\expandafter\def\csname PYG@tok@nt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.47,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@kd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
 \expandafter\def\csname PYG@tok@nc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.73,0.00,0.40}{##1}}}
-\expandafter\def\csname PYG@tok@se\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@err\endcsname{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.67,0.67}{\strut ##1}}}
-\expandafter\def\csname PYG@tok@ss\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.67,0.40,0.00}{##1}}}
-\expandafter\def\csname PYG@tok@nv\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.60,0.40,0.20}{##1}}}
 \expandafter\def\csname PYG@tok@m\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.00,0.93}{##1}}}
-\expandafter\def\csname PYG@tok@go\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
-\expandafter\def\csname PYG@tok@fm\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.40,0.73}{##1}}}
-\expandafter\def\csname PYG@tok@cp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.33,0.47,0.60}{##1}}}
-\expandafter\def\csname PYG@tok@kn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@nn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}}
+\expandafter\def\csname PYG@tok@sx\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.87,0.13,0.00}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@ow\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@si\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{0.93,0.93,0.93}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@nb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
+\expandafter\def\csname PYG@tok@kc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@kp\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.20,0.53}{##1}}}
+\expandafter\def\csname PYG@tok@k\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@kt\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.60}{##1}}}
+\expandafter\def\csname PYG@tok@nd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.33,0.33,0.33}{##1}}}
+\expandafter\def\csname PYG@tok@gh\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
 \expandafter\def\csname PYG@tok@nf\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.40,0.73}{##1}}}
-\expandafter\def\csname PYG@tok@mo\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.27,0.00,0.93}{##1}}}
+\expandafter\def\csname PYG@tok@vi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.73}{##1}}}
+\expandafter\def\csname PYG@tok@gi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@vg\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.87,0.47,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@mi\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.87}{##1}}}
+\expandafter\def\csname PYG@tok@nl\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.60,0.47,0.00}{##1}}}
+\expandafter\def\csname PYG@tok@sc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
+\expandafter\def\csname PYG@tok@sb\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@mb\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.40,0.00,0.93}{##1}}}
+\expandafter\def\csname PYG@tok@sh\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
+\expandafter\def\csname PYG@tok@c\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
+\expandafter\def\csname PYG@tok@go\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
+\expandafter\def\csname PYG@tok@kr\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.53,0.00}{##1}}}
 
 \def\PYGZbs{\char`\\}
 \def\PYGZus{\char`\_}
diff -pruN 2.33+dfsg-1/doc/man/fsck.s3ql.1 3.0+dfsg-1/doc/man/fsck.s3ql.1
--- 2.33+dfsg-1/doc/man/fsck.s3ql.1	2018-12-28 19:37:19.000000000 +0000
+++ 3.0+dfsg-1/doc/man/fsck.s3ql.1	2019-02-09 09:56:35.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "FSCK.S3QL" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "FSCK.S3QL" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 fsck.s3ql \- Check an S3QL file system for errors
 .
diff -pruN 2.33+dfsg-1/doc/man/mkfs.s3ql.1 3.0+dfsg-1/doc/man/mkfs.s3ql.1
--- 2.33+dfsg-1/doc/man/mkfs.s3ql.1	2018-12-28 19:37:19.000000000 +0000
+++ 3.0+dfsg-1/doc/man/mkfs.s3ql.1	2019-02-09 09:56:35.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "MKFS.S3QL" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "MKFS.S3QL" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 mkfs.s3ql \- Create an S3QL file system
 .
@@ -102,9 +102,6 @@ storage backend. Default: 10240 KiB.
 .TP
 .B \-\-plain
 Create unencrypted file system.
-.TP
-.B \-\-force
-Overwrite any existing data.
 .UNINDENT
 .UNINDENT
 .UNINDENT
diff -pruN 2.33+dfsg-1/doc/man/mount.s3ql.1 3.0+dfsg-1/doc/man/mount.s3ql.1
--- 2.33+dfsg-1/doc/man/mount.s3ql.1	2018-12-28 19:37:19.000000000 +0000
+++ 3.0+dfsg-1/doc/man/mount.s3ql.1	2019-02-09 09:56:35.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "MOUNT.S3QL" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "MOUNT.S3QL" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 mount.s3ql \- Mount an S3QL file system
 .
@@ -125,9 +125,9 @@ mounting user and the root user.
 .B \-\-fg
 Do not daemonize, stay in foreground
 .TP
-.B \-\-upstart
-Stay in foreground and raise SIGSTOP once mountpoint
-is up.
+.B \-\-systemd
+Run as systemd unit. Consider specifying \-\-log none as
+well to make use of journald.
 .TP
 .BI \-\-compress \ <algorithm\-lvl>
 Compression algorithm and compression level to use
diff -pruN 2.33+dfsg-1/doc/man/s3qladm.1 3.0+dfsg-1/doc/man/s3qladm.1
--- 2.33+dfsg-1/doc/man/s3qladm.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/doc/man/s3qladm.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "S3QLADM" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "S3QLADM" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 s3qladm \- Manage S3QL file systems
 .
diff -pruN 2.33+dfsg-1/doc/man/s3qlcp.1 3.0+dfsg-1/doc/man/s3qlcp.1
--- 2.33+dfsg-1/doc/man/s3qlcp.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/doc/man/s3qlcp.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "S3QLCP" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "S3QLCP" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 s3qlcp \- Copy-on-write replication on S3QL file systems
 .
diff -pruN 2.33+dfsg-1/doc/man/s3qlctrl.1 3.0+dfsg-1/doc/man/s3qlctrl.1
--- 2.33+dfsg-1/doc/man/s3qlctrl.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/doc/man/s3qlctrl.1	2019-02-09 09:56:35.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "S3QLCTRL" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "S3QLCTRL" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 s3qlctrl \- Control a mounted S3QL file system
 .
diff -pruN 2.33+dfsg-1/doc/man/s3qllock.1 3.0+dfsg-1/doc/man/s3qllock.1
--- 2.33+dfsg-1/doc/man/s3qllock.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/doc/man/s3qllock.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "S3QLLOCK" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "S3QLLOCK" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 s3qllock \- Make trees on an S3QL file system immutable
 .
diff -pruN 2.33+dfsg-1/doc/man/s3ql_oauth_client.1 3.0+dfsg-1/doc/man/s3ql_oauth_client.1
--- 2.33+dfsg-1/doc/man/s3ql_oauth_client.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/doc/man/s3ql_oauth_client.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "S3QL_OAUTH_CLIENT" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "S3QL_OAUTH_CLIENT" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 s3ql_oauth_client \- Obtain Google Storage OAuth2 tokens
 .
diff -pruN 2.33+dfsg-1/doc/man/s3qlrm.1 3.0+dfsg-1/doc/man/s3qlrm.1
--- 2.33+dfsg-1/doc/man/s3qlrm.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/doc/man/s3qlrm.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "S3QLRM" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "S3QLRM" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 s3qlrm \- Fast tree removal on S3QL file systems
 .
diff -pruN 2.33+dfsg-1/doc/man/s3qlstat.1 3.0+dfsg-1/doc/man/s3qlstat.1
--- 2.33+dfsg-1/doc/man/s3qlstat.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/doc/man/s3qlstat.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "S3QLSTAT" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "S3QLSTAT" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 s3qlstat \- Gather S3QL file system statistics
 .
diff -pruN 2.33+dfsg-1/doc/man/s3ql_verify.1 3.0+dfsg-1/doc/man/s3ql_verify.1
--- 2.33+dfsg-1/doc/man/s3ql_verify.1	2018-12-28 19:37:20.000000000 +0000
+++ 3.0+dfsg-1/doc/man/s3ql_verify.1	2019-02-09 09:56:36.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "S3QL_VERIFY" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "S3QL_VERIFY" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 s3ql_verify \- Verify data in an S3QL file system
 .
diff -pruN 2.33+dfsg-1/doc/man/umount.s3ql.1 3.0+dfsg-1/doc/man/umount.s3ql.1
--- 2.33+dfsg-1/doc/man/umount.s3ql.1	2018-12-28 19:37:19.000000000 +0000
+++ 3.0+dfsg-1/doc/man/umount.s3ql.1	2019-02-09 09:56:35.000000000 +0000
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "UMOUNT.S3QL" "1" "Dec 28, 2018" "2.33" "S3QL"
+.TH "UMOUNT.S3QL" "1" "Feb 09, 2019" "3.0" "S3QL"
 .SH NAME
 umount.s3ql \- Unmount an S3QL file system
 .
Binary files 2.33+dfsg-1/doc/manual.pdf and 3.0+dfsg-1/doc/manual.pdf differ
diff -pruN 2.33+dfsg-1/PKG-INFO 3.0+dfsg-1/PKG-INFO
--- 2.33+dfsg-1/PKG-INFO	2018-12-28 19:37:21.000000000 +0000
+++ 3.0+dfsg-1/PKG-INFO	2019-02-09 09:56:37.000000000 +0000
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: s3ql
-Version: 2.33
+Version: 3.0
 Summary: a full-featured file system for online data storage
 Home-page: https://bitbucket.org/nikratio/s3ql/
 Author: Nikolaus Rath
@@ -139,10 +139,10 @@ Description: ..
         The following resources are available:
         
         * The `S3QL User's Guide`_.
-        * The `S3QL Wiki`_, which also contains the `S3QL FAQ`_.
-        * The `S3QL Mailing List`_. You can subscribe by sending a mail to
-          `s3ql+subscribe@googlegroups.com
-          <mailto:s3ql+subscribe@googlegroups.com>`_.
+        * The `S3QL Wiki <https://github.com/s3ql/s3ql/wiki>`_
+        * The `S3QL Mailing List <http://groups.google.com/group/s3ql>`_. You
+          can subscribe by sending a mail to
+          `s3ql+subscribe@googlegroups.com <mailto:s3ql+subscribe@googlegroups.com>`_.
         
         Please report any bugs you may encounter in the `GitHub Issue Tracker`_.
         
@@ -157,8 +157,6 @@ Description: ..
         Professional support is offered via `Rath Consulting`_.
         
         .. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html
-        .. _`S3QL Wiki`: https://bitbucket.org/nikratio/s3ql/wiki/
-        .. _`S3QL FAQ`: https://bitbucket.org/nikratio/s3ql/wiki/FAQ
         .. _`S3QL Mailing List`: http://groups.google.com/group/s3ql
         .. _`GitHub Issue Tracker`: https://github.com/s3ql/s3ql/issues
         .. _GitHub: https://github.com/s3ql/main
diff -pruN 2.33+dfsg-1/README.rst 3.0+dfsg-1/README.rst
--- 2.33+dfsg-1/README.rst	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/README.rst	2019-01-13 14:54:28.000000000 +0000
@@ -130,10 +130,10 @@ Need Help?
 The following resources are available:
 
 * The `S3QL User's Guide`_.
-* The `S3QL Wiki`_, which also contains the `S3QL FAQ`_.
-* The `S3QL Mailing List`_. You can subscribe by sending a mail to
-  `s3ql+subscribe@googlegroups.com
-  <mailto:s3ql+subscribe@googlegroups.com>`_.
+* The `S3QL Wiki <https://github.com/s3ql/s3ql/wiki>`_
+* The `S3QL Mailing List <http://groups.google.com/group/s3ql>`_. You
+  can subscribe by sending a mail to
+  `s3ql+subscribe@googlegroups.com <mailto:s3ql+subscribe@googlegroups.com>`_.
 
 Please report any bugs you may encounter in the `GitHub Issue Tracker`_.
 
@@ -148,8 +148,6 @@ Professional Support
 Professional support is offered via `Rath Consulting`_.
 
 .. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html
-.. _`S3QL Wiki`: https://bitbucket.org/nikratio/s3ql/wiki/
-.. _`S3QL FAQ`: https://bitbucket.org/nikratio/s3ql/wiki/FAQ
 .. _`S3QL Mailing List`: http://groups.google.com/group/s3ql
 .. _`GitHub Issue Tracker`: https://github.com/s3ql/s3ql/issues
 .. _GitHub: https://github.com/s3ql/main
diff -pruN 2.33+dfsg-1/rst/backends.rst 3.0+dfsg-1/rst/backends.rst
--- 2.33+dfsg-1/rst/backends.rst	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/rst/backends.rst	2019-01-20 14:53:06.000000000 +0000
@@ -30,38 +30,34 @@ environment variables.
    another, you need to use the :file:`clone_fs.py` script (from the
    :file:`contrib` directory in the S3QL tarball).
 
+
 Google Storage
 ==============
 
 .. program:: gs_backend
 
-`Google Storage <http://code.google.com/apis/storage/>`_ is an online
-storage service offered by Google. To use the Google Storage backend,
-you need to have (or sign up for) a Google account, and then `activate
-Google Storage <http://code.google.com/apis/storage/docs/signup.html>`_
-for your account. The account is free, you will pay only for the
-amount of storage and traffic that you actually use. There are two
-ways to access Google storage:
-
-#. Use S3-like authentication. To do this, first `set a  default
-   project
-   <https://developers.google.com/storage/docs/migrating#defaultproj>`_.
-   Then use the `key management tool
-   <https://code.google.com/apis/console/#:storage:legacy>`_ to
-   retrieve your *Google Storage developer access key* and *Google
-   Storage developer secret* and use that as backend login and backend
-   password.
-
-#. Use OAuth2 authentication. In this case you need to use ``oauth2``
-   as the backend login, and a valid OAuth2 refresh token as the
-   backend password. To obtain a refresh token, you can use the
-   :ref:`s3ql_oauth_client <oauth_client>` program. It will instruct
-   you to open a specific URL in your browser, enter a code and
-   authenticate with your Google account. Once this procedure is
-   complete, :ref:`s3ql_oauth_client <oauth_client>` will print out
-   the refresh token. Note that you need to do this procedure only
-   once, the refresh token will remain valid until you explicitly
-   revoke it.
+`Google Storage <https://cloud.google.com/storage/>`_ is an online
+storage service offered by Google. In order to use it with S3QL, make
+sure that you enable the JSON API in the `GCP Console API Library
+<https://console.cloud.google.com/apis/library/>`_
+
+The Google Storage backend uses OAuth2 authentication or ADC_
+(Application Default Credentials).
+
+.. _ADC: https://cloud.google.com/docs/authentication/production
+
+To use OAuth2 authentication, specify ``oauth2`` as the backend login
+and a valid OAuth2 refresh token as the backend password. To obtain a
+refresh token, you can use the :ref:`s3ql_oauth_client <oauth_client>`
+program. It will instruct you to open a specific URL in your browser,
+enter a code and authenticate with your Google account. Once this
+procedure is complete, :ref:`s3ql_oauth_client <oauth_client>` will
+print out the refresh token. Note that you need to do this procedure
+only once, the refresh token will remain valid until you explicitly
+revoke it.
+
+To use ADC, specify ``adc`` as the backend login and use an arbitrary
+value for the backend password.
 
 To create a Google Storage bucket, you can use e.g. the `Google
 Storage Manager`_. The storage URL for accessing the bucket in S3QL is
@@ -76,10 +72,6 @@ Google Storage bucket.
 
 The Google Storage backend accepts the following backend options:
 
-.. option:: no-ssl
-
-   Disable encrypted (https) connections and use plain HTTP instead.
-
 .. option:: ssl-ca-path=<path>
 
    Instead of using the system's default certificate store, validate
@@ -94,7 +86,8 @@ The Google Storage backend accepts the f
    exchanged with the remote server for longer than this period, the
    TCP connection is closed and re-established (default: 20 seconds).
 
-.. _`Google Storage Manager`: https://sandbox.google.com/storage/
+.. _`Google Storage Manager`: https://console.cloud.google.com/storage/browser
+
 
 Amazon S3
 =========
diff -pruN 2.33+dfsg-1/rst/installation.rst 3.0+dfsg-1/rst/installation.rst
--- 2.33+dfsg-1/rst/installation.rst	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/rst/installation.rst	2019-01-20 14:53:06.000000000 +0000
@@ -35,14 +35,8 @@ that is not the case.
 * The following Python modules:
 
   * `setuptools <https://pypi.python.org/pypi/setuptools>`_, version 1.0 or newer.
-  * `pycrypto <https://www.dlitz.net/software/pycrypto/>`_
+  * `cryptography <https://cryptography.io/en/latest/installation/>`_
   * `defusedxml <https://pypi.python.org/pypi/defusedxml/>`_
-  * `requests <https://pypi.python.org/pypi/requests/>`_ (optional,
-    required for OAuth2 authentication with Google Storage)
-  * `systemd <https://github.com/systemd/python-systemd>`_ (optional,
-    for enabling systemd support). Do *not* install the module from
-    PyPi, this is from a third-party developer and incompatible with
-    the official module from the systemd developers.
   * `apsw <https://github.com/rogerbinns/apsw>`_, version 3.7.0 or
     newer.
   * `llfuse <https://pypi.org/project/llfuse/>`_, any
@@ -50,6 +44,14 @@ that is not the case.
   * `dugong <https://pypi.org/project/dugong/>`_, any
     version between 3.4 (inclusive) and 4.0 (exclusive)
   * `pytest <http://pytest.org/>`_, version 2.7 or newer (optional, to run unit tests)
+  * `systemd <https://github.com/systemd/python-systemd>`_ (optional,
+    for enabling systemd support). Do *not* install the module from
+    PyPi, this is from a third-party developer and incompatible with
+    the official module from the systemd developers.
+  * `requests <https://pypi.python.org/pypi/requests/>`_ (optional,
+    required for OAuth2 authentication with Google Storage)
+  * `google-auth <https://pypi.python.org/project/google-auth/>`_
+    (optional, required for ADC authentication with Google Storage)
 
   To check if a specific module :var:`<module>` is installed, execute
   :samp:`python3 -c 'import {<module>};
diff -pruN 2.33+dfsg-1/rst/mount.rst 3.0+dfsg-1/rst/mount.rst
--- 2.33+dfsg-1/rst/mount.rst	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/rst/mount.rst	2019-01-13 14:54:28.000000000 +0000
@@ -177,7 +177,8 @@ Automatic Mounting
 If you want to mount and umount an S3QL file system automatically at
 system startup and shutdown, you should do so with a dedicated S3QL
 init job (instead of using :file:`/etc/fstab`. When using systemd,
-:program:`mount.s3ql` can be run as a service of type ``notify``.
+:program:`mount.s3ql` can be started with :cmdopt:`--systemd` to run
+as a systemd service of type ``notify``.
 
 .. NOTE::
 
diff -pruN 2.33+dfsg-1/setup.py 3.0+dfsg-1/setup.py
--- 2.33+dfsg-1/setup.py	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/setup.py	2019-01-15 20:37:11.000000000 +0000
@@ -130,7 +130,7 @@ def main():
         compile_args.append('-Wno-unused-function')
 
     required_pkgs = ['apsw >= 3.7.0',
-                     'pycrypto',
+                     'cryptography',
                      'requests',
                      'defusedxml',
                      'dugong >= 3.4, < 4.0',
diff -pruN 2.33+dfsg-1/src/s3ql/adm.py 3.0+dfsg-1/src/s3ql/adm.py
--- 2.33+dfsg-1/src/s3ql/adm.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/adm.py	2019-01-15 20:37:11.000000000 +0000
@@ -51,8 +51,10 @@ def parse_args(args):
                                        help='may be either of')
     subparsers.add_parser("passphrase", help="change file system passphrase",
                           parents=[pparser])
-    subparsers.add_parser("clear", help="delete file system and all data",
-                          parents=[pparser])
+    sparser = subparsers.add_parser("clear", help="delete file system and all data",
+                                    parents=[pparser])
+    sparser.add_argument("--threads", type=int, default=20,
+                        help='Number of threads to use')
     subparsers.add_parser("recover-key", help="Recover master key from offline copy.",
                           parents=[pparser])
     subparsers.add_parser("download-metadata",
@@ -91,8 +93,7 @@ def main(args=None):
         raise QuietError('Can not work on mounted file system.')
 
     if options.action == 'clear':
-        with get_backend(options, raw=True) as backend:
-            return clear(backend, options)
+        return clear(options)
     elif options.action == 'upgrade':
         return upgrade(options)
 
@@ -209,9 +210,16 @@ def recover(backend, options):
     backend['s3ql_passphrase_bak2'] = data_pw
     backend['s3ql_passphrase_bak3'] = data_pw
 
-def clear(backend, options):
-    print('I am about to delete all data in %s.' % backend,
-          'This includes any S3QL file systems as well as any other stored objects.',
+@handle_on_return
+def clear(options, on_return):
+    backend_factory = lambda: options.backend_class(options)
+    backend = on_return.enter_context(backend_factory())
+
+    print('I am about to DELETE ALL DATA in %s.' % backend,
+          'This includes not just S3QL file systems but *all* stored objects.',
+          'Depending on the storage service, it may be neccessary to run this command',
+          'several times to delete all data, and it may take a while until the ',
+          'removal becomes effective.',
           'Please enter "yes" to continue.', '> ', sep='\n', end='')
     sys.stdout.flush()
 
@@ -228,10 +236,61 @@ def clear(backend, options):
     if os.path.exists(name):
         shutil.rmtree(name)
 
-    backend.clear()
+    queue = Queue(maxsize=options.threads)
+
+    def removal_loop():
+        with backend_factory() as backend:
+            while True:
+                key = queue.get()
+                if key is None:
+                    return
+                backend.delete(key)
+
+    threads = []
+    for _ in range(options.threads):
+        t = AsyncFn(removal_loop)
+        # Don't wait for worker threads, gives deadlock if main thread
+        # terminates with exception
+        t.daemon = True
+        t.start()
+        threads.append(t)
+
+    stamp = time.time()
+    for (i, obj_id) in enumerate(backend.list()):
+        stamp2 = time.time()
+        if stamp2 - stamp > 1:
+            sys.stdout.write('\r..deleted %d objects so far..' % i)
+            sys.stdout.flush()
+            stamp = stamp2
+
+            # Terminate early if any thread failed with an exception
+            for t in threads:
+                if not t.is_alive():
+                    t.join_and_raise()
+
+        # Avoid blocking if all threads terminated
+        while True:
+            try:
+                queue.put(obj_id, timeout=1)
+            except QueueFull:
+                pass
+            else:
+                break
+            for t in threads:
+                if not t.is_alive():
+                    t.join_and_raise()
+
+    queue.maxsize += len(threads)
+    for t in threads:
+        queue.put(None)
+
+    for t in threads:
+        t.join_and_raise()
+
+    sys.stdout.write('\n')
+    log.info('All visible objects deleted.')
+
 
-    log.info('File system deleted.')
-    log.info('Note: it may take a while for the removals to propagate through the backend.')
 
 def get_old_rev_msg(rev, prog):
     return textwrap.dedent('''\
@@ -253,6 +312,7 @@ def upgrade(options, on_return):
 
     s3ql.backends.comprenc.UPGRADE_MODE = checksum_basic_mapping_old
     s3ql.backends.s3c.UPGRADE_MODE = checksum_basic_mapping_old
+    s3ql.backends.gs.UPGRADE_MODE = checksum_basic_mapping_old
 
     log.info('Getting file system parameters..')
 
diff -pruN 2.33+dfsg-1/src/s3ql/backends/common.py 3.0+dfsg-1/src/s3ql/backends/common.py
--- 2.33+dfsg-1/src/s3ql/backends/common.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/backends/common.py	2019-01-15 20:37:11.000000000 +0000
@@ -174,74 +174,6 @@ def extend_docstring(fun, s):
                                for line in textwrap.wrap(s, width=80 - indent))
     fun.__doc__ += '\n'
 
-class RetryIterator:
-    '''
-    A RetryIterator instance iterates over the elements produced by any
-    generator function passed to its constructor, i.e. it wraps the iterator
-    obtained by calling the generator function.  When retrieving elements from the
-    wrapped iterator, exceptions may occur. Most such exceptions are
-    propagated. However, exceptions for which the *is_temp_failure_fn* function
-    returns True are caught. If that happens, the wrapped iterator is replaced
-    by a new one obtained by calling the generator function again with the
-    *start_after* parameter set to the last element that was retrieved before
-    the exception occured.
-
-    If attempts to retrieve the next element fail repeatedly, the iterator is
-    replaced only after sleeping for increasing intervals. If no new element can
-    be obtained after `RETRY_TIMEOUT` seconds, the last exception is no longer
-    caught but propagated to the caller. This behavior is implemented by
-    wrapping the __next__ method with the `retry` decorator.
-    '''
-
-    def __init__(self, generator, is_temp_failure_fn, args=(), kwargs=None):
-        if not inspect.isgeneratorfunction(generator):
-            raise TypeError('*generator* must be generator function')
-
-        self.generator = generator
-        self.iterator = None
-        self.is_temp_failure = is_temp_failure_fn
-        if kwargs is None:
-            kwargs = {}
-        self.kwargs = kwargs
-        self.args = args
-
-    def __iter__(self):
-        return self
-
-    @retry
-    def __next__(self):
-        if self.iterator is None:
-            self.iterator = self.generator(*self.args, **self.kwargs)
-
-        try:
-            el = next(self.iterator)
-        except Exception as exc:
-            if self.is_temp_failure(exc):
-                self.iterator = None
-            raise
-
-        self.kwargs['start_after'] = el
-        return el
-
-def retry_generator(method):
-    '''Wrap *method* in a `RetryIterator`
-
-    *method* must return a generator, and accept a keyword argument
-    *start_with*. The RetryIterator's `is_temp_failure` attribute
-    will be set to the `is_temp_failure` method of the instance
-    to which *method* is bound.
-    '''
-
-    @wraps(method)
-    def wrapped(*a, **kw):
-        return RetryIterator(method, a[0].is_temp_failure, args=a, kwargs=kw)
-
-    extend_docstring(wrapped,
-                     'This generator method has been wrapped and will return a '
-                     '`RetryIterator` instance.')
-
-    return wrapped
-
 class AbstractBackend(object, metaclass=ABCMeta):
     '''Functionality shared between all backends.
 
@@ -436,11 +368,6 @@ class AbstractBackend(object, metaclass=
 
         pass
 
-    @abstractmethod
-    def clear(self):
-        """Delete all objects in backend"""
-        pass
-
     def contains(self, key):
         '''Check if `key` is in backend'''
 
diff -pruN 2.33+dfsg-1/src/s3ql/backends/comprenc.py 3.0+dfsg-1/src/s3ql/backends/comprenc.py
--- 2.33+dfsg-1/src/s3ql/backends/comprenc.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/backends/comprenc.py	2019-01-15 20:37:11.000000000 +0000
@@ -12,8 +12,8 @@ from .common import AbstractBackend, Cor
 from ..common import ThawError, freeze_basic_mapping, thaw_basic_mapping
 from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring,
                                   ABCDocstMeta)
-from Crypto.Cipher import AES
-from Crypto.Util import Counter
+import cryptography.hazmat.primitives.ciphers as crypto_ciphers
+import cryptography.hazmat.backends as crypto_backends
 import bz2
 import hashlib
 import hmac
@@ -30,14 +30,28 @@ HMAC_SIZE = 32
 # Used only by adm.py
 UPGRADE_MODE=False
 
+crypto_backend = crypto_backends.default_backend()
+
 def sha256(s):
     return hashlib.sha256(s).digest()
 
-def aes_cipher(key):
+def aes_encryptor(key):
+    '''Return AES cipher in CTR mode for *key*'''
+
+    cipher = crypto_ciphers.Cipher(
+        crypto_ciphers.algorithms.AES(key),
+        crypto_ciphers.modes.CTR(nonce=bytes(16)),
+        backend=crypto_backend)
+    return cipher.encryptor()
+
+def aes_decryptor(key):
     '''Return AES cipher in CTR mode for *key*'''
 
-    return AES.new(key, AES.MODE_CTR,
-                   counter=Counter.new(128, initial_value=0))
+    cipher = crypto_ciphers.Cipher(
+        crypto_ciphers.algorithms.AES(key),
+        crypto_ciphers.modes.CTR(nonce=bytes(16)),
+        backend=crypto_backend)
+    return cipher.decryptor()
 
 class ComprencBackend(AbstractBackend, metaclass=ABCDocstMeta):
     '''
@@ -160,7 +174,8 @@ class ComprencBackend(AbstractBackend, m
             raise CorruptedObjectError('Object content does not match its key (%s vs %s)'
                                        % (stored_key, key))
 
-        buf = aes_cipher(meta_key).decrypt(meta_buf)
+        decryptor = aes_decryptor(meta_key)
+        buf = decryptor.update(meta_buf) + decryptor.finalize()
         meta = thaw_basic_mapping(buf)
         if UPGRADE_MODE:
             meta['needs_reupload'] = update_required
@@ -246,9 +261,10 @@ class ComprencBackend(AbstractBackend, m
             nonce = struct.pack('<d', time.time()) + key.encode('utf-8')
             meta_key = sha256(self.passphrase + nonce + b'meta')
             data_key = sha256(self.passphrase + nonce)
+            encryptor = aes_encryptor(meta_key)
             meta_raw['encryption'] = 'AES_v2'
             meta_raw['nonce'] = nonce
-            meta_raw['data'] = aes_cipher(meta_key).encrypt(meta_buf)
+            meta_raw['data'] = encryptor.update(meta_buf) + encryptor.finalize()
             meta_raw['object_id'] = key
             meta_raw['signature'] = checksum_basic_mapping(meta_raw, meta_key)
         else:
@@ -265,10 +281,6 @@ class ComprencBackend(AbstractBackend, m
         return fh
 
     @copy_ancestor_docstring
-    def clear(self):
-        return self.backend.clear()
-
-    @copy_ancestor_docstring
     def contains(self, key):
         return self.backend.contains(key)
 
@@ -313,7 +325,8 @@ class ComprencBackend(AbstractBackend, m
                 meta_buf = freeze_basic_mapping(meta_old)
             else:
                 meta_buf = freeze_basic_mapping(metadata)
-            meta_raw['data'] = aes_cipher(meta_key).encrypt(meta_buf)
+            encryptor = aes_encryptor(meta_key)
+            meta_raw['data'] = encryptor.update(meta_buf) + encryptor.finalize()
             meta_raw['object_id'] = dest
             meta_raw['signature'] = checksum_basic_mapping(meta_raw, meta_key)
         elif metadata is None:
@@ -493,7 +506,7 @@ class EncryptFilter(object):
         self.fh = fh
         self.obj_size = 0
         self.closed = False
-        self.cipher = aes_cipher(key)
+        self.encryptor = aes_encryptor(key)
         self.hmac = hmac.new(key, digestmod=hashlib.sha256)
 
     def write(self, data):
@@ -512,7 +525,7 @@ class EncryptFilter(object):
 
         buf = struct.pack(b'<I', len(data)) + data
         self.hmac.update(buf)
-        buf2 = self.cipher.encrypt(buf)
+        buf2 = self.encryptor.update(buf)
         assert len(buf2) == len(buf)
         self.fh.write(buf2)
         self.obj_size += len(buf2)
@@ -526,7 +539,7 @@ class EncryptFilter(object):
             buf = struct.pack(b'<I', 0)
             self.hmac.update(buf)
             buf += self.hmac.digest()
-            buf2 = self.cipher.encrypt(buf)
+            buf2 = self.encryptor.update(buf)
             assert len(buf) == len(buf2)
             self.fh.write(buf2)
             self.obj_size += len(buf2)
@@ -567,7 +580,7 @@ class DecryptFilter(InputFilter):
         self.remaining = 0 # Remaining length of current packet
         self.metadata = metadata
         self.hmac_checked = False
-        self.cipher = aes_cipher(key)
+        self.decryptor = aes_decryptor(key)
         self.hmac = hmac.new(key, digestmod=hashlib.sha256)
 
     def _read_and_decrypt(self, size):
@@ -580,13 +593,11 @@ class DecryptFilter(InputFilter):
         if not buf:
             raise CorruptedObjectError('Premature end of stream.')
 
-        # Work around https://bugs.launchpad.net/pycrypto/+bug/1256172
-        # cipher.decrypt refuses to work with anything but bytes
         if not isinstance(buf, bytes):
             buf = bytes(buf)
 
         len_ = len(buf)
-        buf = self.cipher.decrypt(buf)
+        buf = self.decryptor.update(buf)
         assert len(buf) == len_
 
         return buf
diff -pruN 2.33+dfsg-1/src/s3ql/backends/gs.py 3.0+dfsg-1/src/s3ql/backends/gs.py
--- 2.33+dfsg-1/src/s3ql/backends/gs.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/backends/gs.py	2019-02-03 14:38:52.000000000 +0000
@@ -7,35 +7,152 @@ This work can be distributed under the t
 '''
 
 from ..logging import logging, QuietError # Ensure use of custom logger class
-from . import s3c
-from .s3c import C_DAY_NAMES, C_MONTH_NAMES, HTTPError, S3Error
-from .common import AuthenticationError, retry, NoSuchObject
+from .common import (AbstractBackend, NoSuchObject, retry, AuthorizationError,
+                     AuthenticationError, DanglingStorageURLError,
+                     get_proxy, get_ssl_context, CorruptedObjectError,
+                     checksum_basic_mapping)
 from ..common import OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET
-from ..inherit_docstrings import copy_ancestor_docstring
-from dugong import CaseInsensitiveDict, HTTPConnection
-from urllib.parse import urlencode
+from .. import BUFSIZE
+from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring,
+                                  ABCDocstMeta)
+from dugong import (HTTPConnection, is_temp_network_error, BodyFollowing, CaseInsensitiveDict,
+                    ConnectionClosed)
+from base64 import b64encode, b64decode
+from itertools import count
+from ast import literal_eval
+from argparse import Namespace
+
+import hashlib
+import urllib.parse
 import re
+import tempfile
+import os
+import dugong
 import json
 import threading
-import time
+import ssl
+from typing import Optional, Dict, Any
+
+try:
+    import google.auth as g_auth
+except ImportError:
+    g_auth = None
 
-# Pylint goes berserk with false positives
-#pylint: disable=E1002,E1101,W0201
 
 log = logging.getLogger(__name__)
 
-class Backend(s3c.Backend):
-    """A backend to store data in Google Storage
+# Used only by adm.py
+UPGRADE_MODE=False
+
+
+class ServerResponseError(Exception):
+    '''Raised if the server response cannot be parsed.
+
+    For HTTP errors (i.e., non 2xx response codes), RequestError should
+    always be used instead (since in that case the response body can
+    not be expected to have any specific format).
+    '''
+
+    def __init__(self, resp: dugong.HTTPResponse, error: str,
+                 body: str):
+        self.resp = resp
+        self.error = error
+        self.body = body
+
+    def __str__(self):
+        return '<ServerResponseError: %s>' % self.error
+
+
+class RequestError(Exception):
+    '''
+    An error returned by the server.
+    '''
+
+    def __init__(self, code: str, reason: str, message: Optional[str] = None,
+                 body: Optional[str] = None):
+        super().__init__()
+        self.code = code
+        self.reason = reason
+        self.message = message
+        self.body = body
+
+    def __str__(self) -> str:
+        if self.message:
+            return '<RequestError, code=%d, reason=%r, message=%r>' % (
+                self.code, self.reason, self.message)
+        elif self.body:
+            return '<RequestError, code=%d, reason=%r, with body data>' % (
+                self.code, self.reason)
+        else:
+            return '<RequestError, code=%d, reason=%r>' % (
+                self.code, self.reason)
+
+
+class GAuthHTTPRequestor:
+    '''Carries out HTTP requests for google.auth
+
+    Implements https://google-auth.readthedocs.io/en/latest/reference/google.auth.transport.html#google.auth.transport.Request
+    '''
+
+    def __init__(self, ssl_context: ssl.SSLContext):
+        self.ssl_context = ssl_context
+        self.proxy = get_proxy(ssl=False)
+        self.ssl_proxy = get_proxy(ssl=True)
+        self.conn_pool = dict()  # type: Dict[Tuple[str,int], HTTPConnection]
+
+    def  __call__(self, url: str, method: str = 'GET', body=None,
+                  headers=None, timeout=None):
+
+        # https://github.com/googleapis/google-auth-library-python/issues/318
+        if not isinstance(body, bytes):
+            body = str(body).encode('ascii')
+
+        if timeout is not None:
+            raise ValueError('*timeout* argument is not supported')
 
-    This class uses standard HTTP connections to connect to GS.
+        hit = re.match(r'^(https?)://([^:/]+)(?::(\d+))?(.*)$', url)
+        if not hit:
+            raise ValueError('Unsupported URL: ' + url)
+
+        if hit.group(1) == 'https':
+            ssl_context = self.ssl_context
+            proxy = self.ssl_proxy
+        else:
+            ssl_context = None
+            proxy = self.proxy
+        hostname = hit.group(2)
+        if hit.group(3):
+            port = int(hit.group(3))
+        elif ssl_context:
+            port = 443
+        else:
+            port = 80
+
+        path = hit.group(4)
 
-    The backend guarantees immediate get consistency and eventual list
-    consistency.
-    """
-
-    xml_ns_prefix = '{http://doc.s3.amazonaws.com/2006-03-01}'
-    known_options = (s3c.Backend.known_options
-                     - {'dumb-copy', 'disable-expect100' })
+        try:
+            conn = self.conn_pool[(hostname, port)]
+        except KeyError:
+            conn = HTTPConnection(hostname, port, proxy=proxy,
+                                  ssl_context=ssl_context)
+            self.conn_pool[(hostname, port)] = conn
+
+        try:
+            conn.send_request(method, path, headers=headers, body=body)
+            resp = conn.read_response()
+        except (dugong.ConnectionClosed, dugong.InvalidResponse, dugong.UnsupportedResponse,
+                dugong.ConnectionTimedOut, dugong.HostnameNotResolvable,
+                dugong.DNSUnavailable, ssl.SSLError) as exc:
+            raise g_auth.exceptions.TransportError(exc)
+
+        return Namespace(status=resp.status, headers=resp.headers,
+                         data=conn.readall())
+
+
+class Backend(AbstractBackend, metaclass=ABCDocstMeta):
+    """A backend to store data in Google Storage"""
+
+    known_options = {'ssl-ca-path', 'tcp-timeout'}
 
     # We don't want to request an access token for each instance,
     # because there is a limit on the total number of valid tokens.
@@ -43,91 +160,385 @@ class Backend(s3c.Backend):
     # access tokens.
     access_token = dict()
     _refresh_lock = threading.Lock()
+    adc = None
 
     def __init__(self, options):
-        super().__init__(options)
+        super().__init__()
 
-        self.use_oauth2 = (options.backend_login == 'oauth2')
+        self.ssl_context = get_ssl_context(
+            options.backend_options.get('ssl-ca-path', None)) # type: Optional[ssl.Context]
+        self.options = options.backend_options  # type: Dict[str, str]
+        self.proxy = get_proxy(ssl=True) # type: str
+        self.login = options.backend_login  # type: str
+        self.refresh_token = options.backend_password # type: str
+
+        if self.login == 'adc':
+            if g_auth is None:
+                raise QuietError('ADC authentification requires the google.auth module')
+            elif self.adc is None:
+                requestor = GAuthHTTPRequestor(self.ssl_context)
+                try:
+                    credentials, _ = g_auth.default(
+                        request=requestor,
+                        scopes=['https://www.googleapis.com/auth/devstorage.full_control'])
+                except g_auth.exceptions.DefaultCredentialsError as exc:
+                    raise QuietError('ADC found no valid credential sources: ' + str(exc))
+                type(self).adc = (credentials, requestor)
+        elif self.login != 'oauth2':
+            raise QuietError("Google Storage backend requires OAuth2 or ADC authentication")
 
-        self.options['disable-expect100'] = True
-        if self.use_oauth2:
-            self.hdr_prefix = 'x-goog-'
-
-    @staticmethod
-    def _parse_storage_url(storage_url, ssl_context):
         # Special case for unit testing against local mock server
         hit = re.match(r'^gs://!unittest!'
                        r'([^/:]+)' # Hostname
                        r':([0-9]+)' # Port
                        r'/([^/]+)' # Bucketname
                        r'(?:/(.*))?$', # Prefix
-                       storage_url)
+                       options.storage_url)
         if hit:
-            hostname = hit.group(1)
-            port = int(hit.group(2))
-            bucket_name = hit.group(3)
-            prefix = hit.group(4) or ''
-            return (hostname, port, bucket_name, prefix)
+            self.hostname = hit.group(1)
+            self.port = int(hit.group(2))
+            self.bucket_name = hit.group(3)
+            self.prefix = hit.group(4) or ''
+        else:
+            hit = re.match(r'^gs://([^/]+)(?:/(.*))?$', options.storage_url)
+            if not hit:
+                raise QuietError('Invalid storage URL', exitcode=2)
 
-        hit = re.match(r'^gs://([^/]+)(?:/(.*))?$', storage_url)
-        if not hit:
-            raise QuietError('Invalid storage URL', exitcode=2)
+            self.bucket_name = hit.group(1)
+            self.hostname = 'www.googleapis.com'
+            self.prefix = hit.group(2) or ''
+            self.port = 443
 
-        bucket_name = hit.group(1)
+        self.conn = self._get_conn()
 
-        # Dots in the bucket cause problems with SSL certificate validation,
-        # because server certificate is for *.commondatastorage.googleapis.com
-        # (which does not match e.g. a.b.commondatastorage.googleapis.com)
-        if '.' in bucket_name and ssl_context:
-            hostname = 'commondatastorage.googleapis.com'
+        # Check if bucket exists and/or credentials are correct
+        path = '/storage/v1/b/' + urllib.parse.quote(self.bucket_name, safe='')
+        try:
+            resp = self._do_request('GET', path)
+        except RequestError as exc:
+            if exc.code == 404:
+                raise DanglingStorageURLError("Bucket '%s' does not exist" %
+                                              self.bucket_name)
+            exc = _map_request_error(exc, None)
+            if exc:
+                raise exc
+            raise
+        self._parse_json_response(resp)
+
+    @property
+    @copy_ancestor_docstring
+    def has_native_rename(self):
+        return False
+
+    @copy_ancestor_docstring
+    def reset(self):
+        if (self.conn is not None and
+            (self.conn.response_pending() or self.conn._out_remaining)):
+            log.debug('Resetting state of http connection %d', id(self.conn))
+            self.conn.disconnect()
+
+    def _get_conn(self):
+        '''Return connection to server'''
+
+        conn =  HTTPConnection(self.hostname, self.port, proxy=self.proxy,
+                               ssl_context=self.ssl_context)
+        conn.timeout = int(self.options.get('tcp-timeout', 20))
+        return conn
+
+
+    @copy_ancestor_docstring
+    def is_temp_failure(self, exc): #IGNORE:W0613
+        if is_temp_network_error(exc) or isinstance(exc, ssl.SSLError):
+            # We probably can't use the connection anymore, so use this
+            # opportunity to reset it
+            self.conn.reset()
+            return True
+
+        elif isinstance(exc, RequestError) and (
+                500 <= exc.code <= 599 or exc.code == 408):
+            return True
+
+        # Not clear at all what is happening here, but in doubt we retry
+        elif isinstance(exc, ServerResponseError):
+            return True
+
+        return False
+
+    def _assert_empty_response(self, resp):
+        '''Assert that current response body is empty'''
+
+        buf = self.conn.read(2048)
+        if not buf:
+            return # expected
+
+        body = '\n'.join('%s: %s' % x for x in resp.headers.items())
+
+        hit = re.search(r'; charset="(.+)"$',
+                        resp.headers.get('Content-Type', ''),
+                        re.IGNORECASE)
+        if hit:
+            charset = hit.group(1)
+            body += '\n' + buf.decode(charset, errors='backslashreplace')
+
+        log.warning('Expected empty response body, but got data - this is odd.')
+        raise ServerResponseError(resp, error='expected empty response',
+                                  body=body)
+
+    @retry
+    @copy_ancestor_docstring
+    def delete(self, key, force=False, is_retry=False):
+        log.debug('started with %s', key)
+        path = '/storage/v1/b/%s/o/%s' % (
+            urllib.parse.quote(self.bucket_name, safe=''),
+            urllib.parse.quote(self.prefix + key, safe=''))
+        try:
+            resp = self._do_request('DELETE', path)
+            self._assert_empty_response(resp)
+        except RequestError as exc:
+            exc = _map_request_error(exc, key)
+            if isinstance(exc, NoSuchObject) and (force or is_retry):
+                pass
+            elif exc:
+                raise exc
+            else:
+                raise
+
+
+    @copy_ancestor_docstring
+    def list(self, prefix=''):
+        prefix = self.prefix + prefix
+        strip = len(self.prefix)
+        page_token = None
+        while True:
+            (els, page_token) = self._list_page(prefix, page_token)
+            for el in els:
+                yield el[strip:]
+            if page_token is None:
+                break
+
+    @retry
+    def _list_page(self, prefix, page_token=None, batch_size=1000):
+
+        # Limit maximum number of results since we read everything
+        # into memory (because Python JSON doesn't have a streaming API)
+        query_string = { 'prefix': prefix, 'maxResults': str(batch_size) }
+        if page_token:
+            query_string['pageToken'] = page_token
+
+        path = '/storage/v1/b/%s/o' % (
+            urllib.parse.quote(self.bucket_name, safe=''),)
+
+        try:
+            resp = self._do_request('GET', path, query_string=query_string)
+        except RequestError as exc:
+            exc = _map_request_error(exc, None)
+            if exc:
+                raise exc
+            raise
+        json_resp = self._parse_json_response(resp)
+        page_token = json_resp.get('nextPageToken', None)
+
+        if 'items' not in json_resp:
+            assert page_token is None
+            return ((), None)
+
+        return ([ x['name'] for x in json_resp['items'] ], page_token)
+
+
+    @retry
+    @copy_ancestor_docstring
+    def lookup(self, key):
+        log.debug('started with %s', key)
+        return _unwrap_user_meta(self._get_gs_meta(key))
+
+    def _get_gs_meta(self, key):
+
+        path = '/storage/v1/b/%s/o/%s' % (
+            urllib.parse.quote(self.bucket_name, safe=''),
+            urllib.parse.quote(self.prefix + key, safe=''))
+        try:
+            resp = self._do_request('GET', path)
+        except RequestError as exc:
+            exc = _map_request_error(exc, key)
+            if exc:
+                raise exc
+            raise
+        return self._parse_json_response(resp)
+
+    @retry
+    @copy_ancestor_docstring
+    def get_size(self, key):
+        json_resp = self._get_gs_meta(key)
+        return json_resp['size']
+
+    @retry
+    @copy_ancestor_docstring
+    def open_read(self, key):
+        gs_meta = self._get_gs_meta(key)
+
+        path = '/storage/v1/b/%s/o/%s' % (
+            urllib.parse.quote(self.bucket_name, safe=''),
+            urllib.parse.quote(self.prefix + key, safe=''))
+        try:
+            resp = self._do_request('GET', path, query_string={'alt': 'media'})
+        except RequestError as exc:
+            exc = _map_request_error(exc, key)
+            if exc:
+                raise exc
+            raise
+
+        return ObjectR(key, resp, self, gs_meta)
+
+    @prepend_ancestor_docstring
+    def open_write(self, key, metadata=None, is_compressed=False):
+        """
+        The returned object will buffer all data and only start the upload
+        when its `close` method is called.
+        """
+
+        return ObjectW(key, self, metadata)
+
+    @retry
+    def write_fh(self, fh, key: str, md5: bytes,
+                 metadata: Optional[Dict[str, Any]] = None,
+                 size: Optional[int] = None):
+        '''Write data from byte stream *fh* into *key*.
+
+        *fh* must be seekable. If *size* is None, *fh* must also implement
+        `fh.fileno()` so that the size can be determined through `os.fstat`.
+
+        *md5* must be the (binary) md5 checksum of the data.
+        '''
+
+        metadata = json.dumps({
+            'metadata': _wrap_user_meta(metadata if metadata else {}),
+            'md5Hash': b64encode(md5).decode(),
+            'name': self.prefix + key,
+        })
+
+        # Google Storage uses Content-Length to read the object data, so we
+        # don't have to worry about the boundary occurring in the object data.
+        boundary = 'foo_bar_baz'
+        headers = CaseInsensitiveDict()
+        headers['Content-Type'] = 'multipart/related; boundary=%s' % boundary
+
+        body_prefix = '\n'.join(('--' + boundary,
+                                 'Content-Type: application/json; charset=UTF-8',
+                                 '', metadata,
+                                 '--' + boundary,
+                                 'Content-Type: application/octet-stream',
+                                 '', '')).encode()
+        body_suffix = ('\n--%s--\n' % boundary).encode()
+
+        body_size = len(body_prefix) + len(body_suffix)
+        if size is not None:
+            body_size += size
         else:
-            hostname = '%s.commondatastorage.googleapis.com' % bucket_name
+            body_size += os.fstat(fh.fileno()).st_size
+
+        path = '/upload/storage/v1/b/%s/o' % (
+            urllib.parse.quote(self.bucket_name, safe=''),)
+        query_string = {'uploadType': 'multipart'}
+        try:
+            resp = self._do_request('POST', path, query_string=query_string,
+                                    headers=headers, body=BodyFollowing(body_size))
+        except RequestError as exc:
+            exc = _map_request_error(exc, key)
+            if exc:
+                raise exc
+            raise
 
-        prefix = hit.group(2) or ''
-        port = 443 if ssl_context else 80
-        return (hostname, port, bucket_name, prefix)
+        assert resp.status == 100
+        fh.seek(0)
 
-    def __str__(self):
-        return 'Google Storage bucket %s, prefix %s' % (self.bucket_name, self.prefix)
+        md5_run = hashlib.md5()
+        try:
+            self.conn.write(body_prefix)
+            while True:
+                buf = fh.read(BUFSIZE)
+                if not buf:
+                    break
+                self.conn.write(buf)
+                md5_run.update(buf)
+            self.conn.write(body_suffix)
+        except ConnectionClosed:
+            # Server closed connection while we were writing body data -
+            # but we may still be able to read an error response
+            try:
+                resp = self.conn.read_response()
+            except ConnectionClosed: # No server response available
+                pass
+            else:
+                if resp.status >= 400: # Got error response
+                    return resp
+                    log.warning('Server broke connection during upload, but signaled '
+                                '%d %s', resp.status, resp.reason)
+            # Re-raise first ConnectionClosed exception
+            raise
+
+        if md5_run.digest() != md5:
+            raise ValueError('md5 passed to write_fd does not match fd data')
+
+        resp = self.conn.read_response()
+        if resp.status != 200:
+            exc = self._parse_error_response(resp)
+            raise _map_request_error(exc, key) or exc
+        self._parse_json_response(resp)
 
-    def _authorize_request(self, method, path, headers, subres, query_string):
-        '''Add authorization information to *headers*'''
+    @copy_ancestor_docstring
+    def update_meta(self, key, metadata):
 
-        if not self.use_oauth2:
-            return super()._authorize_request(method, path, headers,
-                                              subres, query_string)
-
-        headers['Authorization'] = 'Bearer ' + self.access_token[self.password]
-
-        now = time.gmtime()
-        headers['Date'] = ('%s, %02d %s %04d %02d:%02d:%02d GMT'
-                           % (C_DAY_NAMES[now.tm_wday],
-                              now.tm_mday,
-                              C_MONTH_NAMES[now.tm_mon - 1],
-                              now.tm_year, now.tm_hour,
-                              now.tm_min, now.tm_sec))
-
-    # This method performs a different kind of HTTP request than the methods
-    # decorated with `retry` that it is called by, so in theory it should do its
-    # own retry handling (perhaps with a new `retry_on` decorator that allows to
-    # specify a custom `is_temp_failure` function instead of calling the
-    # instance method). However, in practice there is currently no difference in
-    # the set of exceptions that are considered temporary when retrieving an
-    # access token, and the set of exceptions checked for in the
-    # `_is_temp_failure` method. Therefore, for now we avoid the additional
-    # complexity of custom retry handling and rely on the @retry decorator of
-    # the caller to handle temporary errors. This should be kept in mind
-    # when modifying either method.
+        headers = CaseInsensitiveDict()
+        headers['Content-Type'] = 'application/json; charset="utf-8"'
+        body = json.dumps({ 'metadata': _wrap_user_meta(metadata),
+                            'acl': [] }).encode()
+
+        path = '/storage/v1/b/%s/o/%s' % (
+            urllib.parse.quote(self.bucket_name, safe=''),
+            urllib.parse.quote(self.prefix + key, safe=''))
+        try:
+            resp = self._do_request('PUT', path, headers=headers, body=body)
+        except RequestError as exc:
+            exc = _map_request_error(exc, key)
+            if exc:
+                raise exc
+            raise
+
+        self._parse_json_response(resp)
+
+
+    @copy_ancestor_docstring
+    def close(self):
+        self.conn.disconnect()
+
+    def __str__(self):
+        return '<gs.Backend, name=%s, prefix=%s>' % (self.bucket_name, self.prefix)
+
+    # This method uses a different HTTP connection than its callers, but shares
+    # the same retry logic. It is therefore possible that errors with this
+    # connection cause the other connection to be reset - but this should not
+    # be a problem, because there can't be a pending request if we don't have
+    # a valid access token.
     def _get_access_token(self):
         log.info('Requesting new access token')
 
+        if self.adc:
+            try:
+                self.adc[0].refresh(self.adc[1])
+            except g_auth.exceptions.RefreshError as exc:
+                raise AuthenticationError(
+                    'Failed to refresh credentials: '  + str(exc))
+            self.access_token[self.refresh_token] = self.adc[0].token
+            return
+
         headers = CaseInsensitiveDict()
         headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'
 
-        body = urlencode({'client_id': OAUTH_CLIENT_ID,
-                          'client_secret': OAUTH_CLIENT_SECRET,
-                          'refresh_token': self.password,
-                          'grant_type': 'refresh_token' })
+        body = urllib.parse.urlencode({
+            'client_id': OAUTH_CLIENT_ID,
+            'client_secret': OAUTH_CLIENT_SECRET,
+            'refresh_token': self.refresh_token,
+            'grant_type': 'refresh_token' })
 
         conn = HTTPConnection('accounts.google.com', 443, proxy=self.proxy,
                               ssl_context=self.ssl_context)
@@ -136,62 +547,131 @@ class Backend(s3c.Backend):
             conn.send_request('POST', '/o/oauth2/token', headers=headers,
                               body=body.encode('utf-8'))
             resp = conn.read_response()
+            json_resp = self._parse_json_response(resp, conn)
 
             if resp.status > 299 or resp.status < 200:
-                raise HTTPError(resp.status, resp.reason, resp.headers)
-
-            content_type = resp.headers.get('Content-Type', None)
-            if content_type:
-                hit = re.match(r'application/json(?:; charset="(.+)")?$',
-                               resp.headers['Content-Type'], re.IGNORECASE)
+                assert 'error' in json_resp
+            if 'error' in json_resp:
+                raise AuthenticationError(json_resp['error'])
             else:
-                hit = None
+                self.access_token[self.refresh_token] = json_resp['access_token']
+        finally:
+            conn.disconnect()
 
-            if not hit:
-                log.error('Unexpected server reply when refreshing access token:\n%s',
-                          self._dump_response(resp))
-                raise RuntimeError('Unable to parse server response')
-
-            charset = hit.group(1) or 'utf-8'
-            body = conn.readall().decode(charset)
-            resp_json = json.loads(body)
-
-            if not isinstance(resp_json, dict):
-                log.error('Invalid json server response. Expected dict, got:\n%s', body)
-                raise RuntimeError('Unable to parse server response')
-
-            if 'error' in resp_json:
-                raise AuthenticationError(resp_json['error'])
-
-            if 'access_token' not in resp_json:
-                log.error('Unable to find access token in server response:\n%s', body)
-                raise RuntimeError('Unable to parse server response')
+    def _parse_error_response(self, resp, conn=None):
+        '''Return exception corresponding to server response.'''
+
+        try:
+            json_resp = self._parse_json_response(resp, conn)
+        except ServerResponseError as exc:
+            # Error messages may come from intermediate proxies and thus may not
+            # be in JSON.
+            log.debug('Server response not JSON - intermediate proxy failure?')
+            return RequestError(code=resp.status, reason=resp.reason,
+                                body=exc.body)
 
-            self.access_token[self.password] = resp_json['access_token']
+        try:
+            message = json_resp['error']['message']
+            body = None
+        except KeyError:
+            log.warning('Did not find error.message element in JSON '
+                        'error response. This is odd.')
+            message = None
+            body = str(json_resp)
+
+        return RequestError(code=resp.status, reason=resp.reason, message=message,
+                            body=body)
+
+
+    def _parse_json_response(self, resp, conn=None):
+
+        if conn is None:
+            conn = self.conn
+
+        # Note that even though the final server backend may guarantee to always
+        # deliver a JSON document body with a detailed error message, we may
+        # also get errors from intermediate proxies.
+        content_type = resp.headers.get('Content-Type', None)
+        if content_type:
+            hit = re.match(r'application/json(?:; charset="(.+)")?$',
+                           resp.headers['Content-Type'], re.IGNORECASE)
+        if not content_type or not hit:
+            raise ServerResponseError(resp, error='expected json, got %s' % content_type,
+                                      body=self._dump_body(resp))
+        charset = hit.group(1)
 
-        finally:
-            conn.disconnect()
+        body = conn.readall()
+        try:
+            body_text = body.decode(charset)
+        except UnicodeDecodeError as exc:
+            log.warning('Unable to decode JSON response as Unicode (%s) '
+                        '- this is odd.', str(exc))
+            raise ServerResponseError(resp, error=str(exc),
+                                      body=body.decode(charset, errors='backslashreplace'))
+
+        try:
+            resp_json = json.loads(body_text)
+        except json.JSONDecodeError as exc:
+            log.warning('Unable to decode JSON response (%s) - this is odd.', str(exc))
+            raise ServerResponseError(resp, error=str(exc), body=body_text)
+
+        return resp_json
+
+    def _dump_body(self, resp):
+        '''Return truncated string representation of response body.'''
+
+        is_truncated = False
+        try:
+            body = self.conn.read(2048)
+            if self.conn.read(1):
+                is_truncated = True
+                self.conn.discard()
+        except dugong.UnsupportedResponse:
+            log.warning('Unsupported response, trying to retrieve data from raw socket!')
+            body = self.conn.read_raw(2048)
+            self.conn.close()
+
+        hit = re.search(r'; charset="(.+)"$',
+                        resp.headers.get('Content-Type', ''),
+                        re.IGNORECASE)
+        if hit:
+            charset = hit.group(1)
+        else:
+            charset = 'utf-8'
+
+        body = body.decode(charset, errors='backslashreplace')
+
+        if is_truncated:
+            body += '... [truncated]'
+
+        return body
+
+    def _do_request(self, method, path, query_string=None, headers=None, body=None):
+        '''Send request, read and return response object'''
 
-    def _do_request(self, method, path, subres=None, query_string=None,
-                    headers=None, body=None):
+        log.debug('started with %s %s, qs=%s', method, path, query_string)
 
-        # When not using OAuth2, fall-through.
-        if not self.use_oauth2:
-            return super()._do_request(method, path, subres=subres, headers=headers,
-                                       query_string=query_string, body=body)
+        if headers is None:
+            headers = CaseInsensitiveDict()
+
+        expect100 = isinstance(body, BodyFollowing)
+        headers['host'] = self.hostname
+        if query_string:
+            s = urllib.parse.urlencode(query_string, doseq=True)
+            path += '?%s' % s
 
         # If we have an access token, try to use it.
-        token = self.access_token.get(self.password, None)
+        token = self.access_token.get(self.refresh_token, None)
         if token is not None:
-            try:
-                return super()._do_request(method, path, subres=subres, headers=headers,
-                                           query_string=query_string, body=body)
-            except HTTPError as exc:
-                if exc.status != 401:
-                    raise
-            except S3Error as exc:
-                if exc.code != 'AuthenticationRequired':
-                    raise
+            headers['Authorization'] = 'Bearer ' + token
+            self.conn.send_request(method, path, body=body, headers=headers,
+                                   expect100=expect100)
+            resp = self.conn.read_response()
+            if ((expect100 and resp.status == 100) or
+                (not expect100 and 200 <= resp.status <= 299)):
+                return resp
+            elif resp.status != 401:
+                raise self._parse_error_response(resp)
 
         # If we reach this point, then the access token must have
         # expired, so we try to get a new one. We use a lock to prevent
@@ -199,22 +679,23 @@ class Backend(s3c.Backend):
         with self._refresh_lock:
             # Don't refresh if another thread has already done so while
             # we waited for the lock.
-            if token is None or self.access_token.get(self.password, None) == token:
+            if token is None or self.access_token.get(self.refresh_token, None) == token:
                 self._get_access_token()
 
-        # Reset body, so we can resend the request with the new access token
-        if body and not isinstance(body, (bytes, bytearray, memoryview)):
-            body.seek(0)
-
         # Try request again. If this still fails, propagate the error
         # (because we have just refreshed the access token).
         # FIXME: We can't rely on this if e.g. the system hibernated
         # after refreshing the token, but before reaching this line.
-        return super()._do_request(method, path, subres=subres, headers=headers,
-                                   query_string=query_string, body=body)
+        headers['Authorization'] = 'Bearer ' + self.access_token[self.refresh_token]
+        self.conn.send_request(method, path, body=body, headers=headers,
+                               expect100=expect100)
+        resp = self.conn.read_response()
+        if ((expect100 and resp.status == 100) or
+            (not expect100 and 200 <= resp.status <= 299)):
+            return resp
+        else:
+            raise self._parse_error_response(resp)
 
-    # Overwrite, because Google Storage does not return errors after
-    # 200 OK.
     @retry
     @copy_ancestor_docstring
     def copy(self, src, dest, metadata=None):
@@ -224,17 +705,249 @@ class Backend(s3c.Backend):
             raise TypeError('*metadata*: expected dict or None, got %s' % type(metadata))
 
         headers = CaseInsensitiveDict()
-        headers[self.hdr_prefix + 'copy-source'] = \
-            '/%s/%s%s' % (self.bucket_name, self.prefix, src)
 
-        if metadata is None:
-            headers[self.hdr_prefix + 'metadata-directive'] = 'COPY'
+        if metadata is not None:
+            headers['Content-Type'] = 'application/json; charset="utf-8"'
+            body = json.dumps({'metadata': _wrap_user_meta(metadata)}).encode()
         else:
-            headers[self.hdr_prefix + 'metadata-directive'] = 'REPLACE'
-            self._add_meta_headers(headers, metadata)
+            body = None
+
+        path = '/storage/v1/b/%s/o/%s/rewriteTo/b/%s/o/%s' % (
+            urllib.parse.quote(self.bucket_name, safe=''),
+            urllib.parse.quote(self.prefix + src, safe=''),
+            urllib.parse.quote(self.bucket_name, safe=''),
+            urllib.parse.quote(self.prefix + dest, safe=''))
+        try:
+            resp = self._do_request('POST', path, headers=headers, body=body)
+        except RequestError as exc:
+            exc = _map_request_error(exc, src)
+            if exc:
+                raise exc
+            raise
+
+        json_resp = self._parse_json_response(resp)
+        assert json_resp['done']
+        assert 'rewriteToken' not in json_resp
+
+
+def _map_request_error(exc: RequestError, key: str):
+    '''Map RequestError to more general exception if possible'''
+
+    if exc.code == 404 and key:
+        return NoSuchObject(key)
+    elif exc.message == 'Forbidden':
+        return AuthorizationError()
+    elif exc.message == 'Login Required':
+        return AuthenticationError()
+
+    return None
+
+
+def _wrap_user_meta(user_meta):
+
+    obj_meta = dict()
+    for (key, val) in user_meta.items():
+        if not isinstance(key, str):
+            raise TypeError('metadata keys must be str, not %s' % type(key))
+        if (not isinstance(val, (str, bytes, int, float, complex, bool))
+            and val is not None):
+            raise TypeError('value for key %s (%s) is not elementary' % (key, val))
+        if isinstance(val, (bytes, bytearray)):
+            val = b64encode(val)
+
+        obj_meta[key] = repr(val)
+
+    return obj_meta
+
+
+def _unwrap_user_meta(json_resp):
+    '''Extract user metadata from JSON object metadata'''
+
+    meta_raw = json_resp.get('metadata', None)
+    if meta_raw is None:
+        return {}
+
+    # Detect Legacy format.
+    if (meta_raw.get('format', None) == 'raw2' and
+        'md5' in meta_raw and
+        all(key in ('format', 'md5') or re.match(r'^\d\d\d$', key)
+            for key in meta_raw.keys())):
+        parts = []
+        for i in count():
+            part = meta_raw.get('%03d' % i, None)
+            if part is None:
+                break
+            parts.append(part)
+        buf = ''.join(parts)
+        meta = literal_eval('{ %s }' % buf)
+        for (k,v) in meta.items():
+            if isinstance(v, bytes):
+                meta[k] = b64decode(v)
+
+        # TODO: Remove next block on next file system revision bump.
+        # Metadata MD5 headers were created by old S3QL versions where the
+        # Google Storage backend shared code with the S3C backend (which
+        # supports plain HTTP connections).  There's no need to validate them
+        # here since Google Storage always uses TLS. However, we retain the code
+        # for now since the metadata format was used to detect an old filesystem
+        # revision.
+        stored_md5 = meta_raw.get('md5', None)
+        new_md5 = b64encode(checksum_basic_mapping(meta)).decode('ascii')
+        if stored_md5 != new_md5:
+            if UPGRADE_MODE:
+                old_md5 = b64encode(UPGRADE_MODE(meta)).decode('ascii')
+                if stored_md5 == old_md5:
+                    meta['needs_reupload'] = True
+                else:
+                    raise CorruptedObjectError(
+                        'Metadata MD5 mismatch for %s (%s vs %s (old) or %s (new))'
+                        % (json_resp.get('name', None), stored_md5, old_md5, new_md5))
+            else:
+                raise CorruptedObjectError(
+                    'Metadata MD5 mismatch for %s (%s vs %s)'
+                    % (json_resp.get('name', None), stored_md5, new_md5))
+        elif UPGRADE_MODE:
+            meta['needs_reupload'] = False
+
+        return meta
 
+    meta = {}
+    for (k,v) in meta_raw.items():
         try:
-            self._do_request('PUT', '/%s%s' % (self.prefix, dest), headers=headers)
-            self.conn.discard()
-        except s3c.NoSuchKeyError:
-            raise NoSuchObject(src)
+            v2 = literal_eval(v)
+        except ValueError as exc:
+            raise CorruptedObjectError('Invalid metadata value: ' + str(exc))
+        if isinstance(v2, bytes):
+            meta[k] = b64decode(v2)
+        else:
+            meta[k] = v2
+
+    return meta
+
+
+class ObjectR(object):
+    '''A GS object open for reading'''
+
+    def __init__(self, key, resp, backend, gs_meta):
+        self.key = key
+        self.closed = False
+        self.md5_checked = False
+        self.backend = backend
+        self.resp = resp
+        self.metadata = _unwrap_user_meta(gs_meta)
+        self.md5_want = b64decode(gs_meta['md5Hash'])
+        self.md5 = hashlib.md5()
+
+    def read(self, size=None):
+        '''Read up to *size* bytes of object data
+
+        For integrity checking to work, this method has to be called until
+        it returns an empty string, indicating that all data has been read
+        (and verified).
+        '''
+
+        if size == 0:
+            return b''
+
+        # This may raise an exception, in which case we probably can't re-use
+        # the connection. However, we rely on the caller to still close the
+        # file-like object, so that we can do cleanup in close().
+        buf = self.backend.conn.read(size)
+        self.md5.update(buf)
+
+        # Check MD5 on EOF (size == None implies EOF)
+        if (not buf or size is None) and not self.md5_checked:
+            self.md5_checked = True
+            if self.md5_want != self.md5.digest():
+                log.warning('MD5 mismatch for %s: %s vs %s',
+                            self.key, b64encode(self.md5_want),
+                            b64encode(self.md5.digest()))
+                raise ServerResponseError(error='md5Hash mismatch',
+                                          body=b'<binary blob>',
+                                          resp=self.resp)
+
+        return buf
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *a):
+        self.close()
+        return False
+
+    def close(self, checksum_warning=True):
+        '''Close object
+
+        If *checksum_warning* is true, this will generate a warning message if
+        the object has not been fully read (because in that case the MD5
+        checksum cannot be checked).
+        '''
+
+        if self.closed:
+            return
+        self.closed = True
+
+        # If we have not read all the data, close the entire
+        # connection (otherwise we loose synchronization)
+        if not self.md5_checked:
+            if checksum_warning:
+                log.warning("Object closed prematurely, can't check MD5, and have to "
+                            "reset connection")
+            self.backend.conn.disconnect()
+
+
+class ObjectW(object):
+    '''An GS object open for writing
+
+    All data is first cached in memory, upload only starts when
+    the close() method is called.
+    '''
+
+    def __init__(self, key, backend, metadata):
+        self.key = key
+        self.backend = backend
+        self.metadata = metadata
+        self.closed = False
+        self.obj_size = 0
+        self.md5 = hashlib.md5()
+
+        # According to http://docs.python.org/3/library/functions.html#open
+        # the buffer size is typically ~8 kB. We process data in much
+        # larger chunks, so buffering would only hurt performance.
+        self.fh = tempfile.TemporaryFile(buffering=0)
+
+    def write(self, buf):
+        '''Write object data'''
+
+        self.fh.write(buf)
+        self.md5.update(buf)
+        self.obj_size += len(buf)
+
+    def close(self):
+        '''Close object and upload data'''
+
+        if self.closed:
+            return
+
+        self.backend.write_fh(self.fh, self.key, self.md5.digest(),
+                              self.metadata, size=self.obj_size)
+        self.closed = True
+        self.fh.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *a):
+        self.close()
+        return False
+
+    def get_obj_size(self):
+        if not self.closed:
+            raise RuntimeError('Object must be closed first.')
+        return self.obj_size
+
+
+def md5sum_b64(buf):
+    '''Return base64 encoded MD5 sum'''
+
+    return b64encode(hashlib.md5(buf).digest()).decode('ascii')
diff -pruN 2.33+dfsg-1/src/s3ql/backends/local.py 3.0+dfsg-1/src/s3ql/backends/local.py
--- 2.33+dfsg-1/src/s3ql/backends/local.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/backends/local.py	2019-01-15 20:37:11.000000000 +0000
@@ -111,15 +111,6 @@ class Backend(AbstractBackend, metaclass
         return dest
 
     @copy_ancestor_docstring
-    def clear(self):
-        for name in os.listdir(self.prefix):
-            path = os.path.join(self.prefix, name)
-            if os.path.isdir(path):
-                shutil.rmtree(path)
-            else:
-                os.unlink(path)
-
-    @copy_ancestor_docstring
     def contains(self, key):
         path = self._key_to_path(key)
         try:
diff -pruN 2.33+dfsg-1/src/s3ql/backends/s3c.py 3.0+dfsg-1/src/s3ql/backends/s3c.py
--- 2.33+dfsg-1/src/s3ql/backends/s3c.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/backends/s3c.py	2019-01-15 20:37:11.000000000 +0000
@@ -9,9 +9,8 @@ This work can be distributed under the t
 from ..logging import logging, QuietError # Ensure use of custom logger class
 from .. import BUFSIZE
 from .common import (AbstractBackend, NoSuchObject, retry, AuthorizationError,
-                     AuthenticationError, DanglingStorageURLError, retry_generator,
-                     get_proxy, get_ssl_context, CorruptedObjectError,
-                     checksum_basic_mapping)
+                     AuthenticationError, DanglingStorageURLError, get_proxy,
+                     get_ssl_context, CorruptedObjectError, checksum_basic_mapping)
 from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring,
                                   ABCDocstMeta)
 from io import BytesIO
@@ -131,15 +130,6 @@ class Backend(AbstractBackend, metaclass
         conn.timeout = int(self.options.get('tcp-timeout', 20))
         return conn
 
-    @staticmethod
-    def _tag_xmlns_uri(elem):
-        '''Extract the XML namespace (xmlns) URI from an element'''
-        if elem.tag[0] == '{':
-            uri, ignore, tag = elem.tag[1:].partition("}")
-        else:
-            uri = None
-        return uri
-
     # This method is also used implicitly for the retry handling of
     # `gs.Backend._get_access_token`. When modifying this method, do not forget
     # to check if this makes it unsuitable for use by `_get_access_token` (in
@@ -235,68 +225,57 @@ class Backend(AbstractBackend, metaclass
             else:
                 raise NoSuchObject(key)
 
-    @retry_generator
     @copy_ancestor_docstring
-    def list(self, prefix='', start_after=''):
-        log.debug('started with %s, %s', prefix, start_after)
-
-        keys_remaining = True
-
-        # Without this, a call to list('foo') would result
-        # in *prefix* being longer than *marker* - which causes
-        # trouble for some S3 implementions (minio).
-        if start_after:
-            marker = self.prefix + start_after
-        else:
-            marker = ''
+    def list(self, prefix=''):
         prefix = self.prefix + prefix
+        strip = len(self.prefix)
+        page_token = None
+        while True:
+            (els, page_token) = self._list_page(prefix, page_token)
+            for el in els:
+                yield el[strip:]
+            if page_token is None:
+                break
 
-        while keys_remaining:
-            log.debug('requesting with marker=%s', marker)
-
-            keys_remaining = None
-            resp = self._do_request('GET', '/', query_string={ 'prefix': prefix,
-                                                              'marker': marker,
-                                                              'max-keys': 1000 })
-
-            if not XML_CONTENT_RE.match(resp.headers['Content-Type']):
-                raise RuntimeError('unexpected content type: %s' %
-                                   resp.headers['Content-Type'])
+    @retry
+    def _list_page(self, prefix, page_token=None, batch_size=1000):
 
-            try:
-                itree = iter(ElementTree.iterparse(self.conn, events=("start", "end")))
-                (event, root) = next(itree)
+        # We can get at most 1000 keys at a time, so there's no need
+        # to bother with streaming.
+        query_string = { 'prefix': prefix, 'max-keys': str(batch_size) }
+        if page_token:
+            query_string['marker'] = page_token
+
+        resp = self._do_request('GET', '/', query_string=query_string)
+
+        if not XML_CONTENT_RE.match(resp.headers['Content-Type']):
+            raise RuntimeError('unexpected content type: %s' %
+                               resp.headers['Content-Type'])
 
-                root_xmlns_uri = self._tag_xmlns_uri(root)
-                if root_xmlns_uri is None:
-                    root_xmlns_prefix = ''
-                else:
-                    # Validate the XML namespace
-                    root_xmlns_prefix = '{%s}' % (root_xmlns_uri, )
-                    if root_xmlns_prefix != self.xml_ns_prefix:
-                        log.error('Unexpected server reply to list operation:\n%s',
-                                  self._dump_response(resp, body=None))
-                        raise RuntimeError('List response has %s as root tag, unknown namespace' % root.tag)
-
-                for (event, el) in itree:
-                    if event != 'end':
-                        continue
-
-                    if el.tag == root_xmlns_prefix + 'IsTruncated':
-                        keys_remaining = (el.text == 'true')
-
-                    elif el.tag == root_xmlns_prefix + 'Contents':
-                        marker = el.findtext(root_xmlns_prefix + 'Key')
-                        yield marker[len(self.prefix):]
-                        root.clear()
+        body = self.conn.readall()
+        etree = ElementTree.fromstring(body)
+        root_xmlns_uri = _tag_xmlns_uri(etree)
+        if root_xmlns_uri is None:
+            root_xmlns_prefix = ''
+        else:
+            # Validate the XML namespace
+            root_xmlns_prefix = '{%s}' % (root_xmlns_uri, )
+            if root_xmlns_prefix != self.xml_ns_prefix:
+                log.error('Unexpected server reply to list operation:\n%s',
+                          self._dump_response(resp, body=body))
+                raise RuntimeError('List response has unknown namespace')
+
+        names = [ x.findtext(root_xmlns_prefix + 'Key')
+                  for x in etree.findall(root_xmlns_prefix + 'Contents') ]
+
+        is_truncated = etree.find(root_xmlns_prefix + 'IsTruncated')
+        if is_truncated.text == 'false':
+            page_token = None
+        else:
+            page_token = names[-1]
 
-            except GeneratorExit:
-                # Need to read rest of response
-                self.conn.discard()
-                break
+        return (names, page_token)
 
-            if keys_remaining is None:
-                raise RuntimeError('Could not parse body')
 
     @retry
     @copy_ancestor_docstring
@@ -610,25 +589,6 @@ class Backend(AbstractBackend, metaclass
 
         return tree
 
-    # NOTE: ! This function is also used by the swift backend. !
-    @prepend_ancestor_docstring
-    def clear(self):
-        """
-        This method may not be able to see (and therefore also not delete)
-        recently uploaded objects.
-        """
-
-        # We have to cache keys, because otherwise we can't use the
-        # http connection to delete keys.
-        for (no, s3key) in enumerate(list(self)):
-            if no != 0 and no % 1000 == 0:
-                log.info('clear(): deleted %d objects so far..', no)
-
-            log.debug('started with %s', s3key)
-
-            # Ignore missing objects when clearing bucket
-            self.delete(s3key, True)
-
     def __str__(self):
         return 's3c://%s/%s/%s' % (self.hostname, self.bucket_name, self.prefix)
 
@@ -806,6 +766,16 @@ class Backend(AbstractBackend, metaclass
 
         return meta
 
+
+def _tag_xmlns_uri(elem):
+    '''Extract the XML namespace (xmlns) URI from an element'''
+    if elem.tag[0] == '{':
+        uri, ignore, tag = elem.tag[1:].partition("}")
+    else:
+        uri = None
+    return uri
+
+
 class ObjectR(object):
     '''An S3 object open for reading'''
 
diff -pruN 2.33+dfsg-1/src/s3ql/backends/s3.py 3.0+dfsg-1/src/s3ql/backends/s3.py
--- 2.33+dfsg-1/src/s3ql/backends/s3.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/backends/s3.py	2019-01-13 14:54:28.000000000 +0000
@@ -148,17 +148,13 @@ class Backend(s3c.Backend):
                               errtag.findtext(ns_p + 'Key')[offset:],
                               errtag.findtext(ns_p + 'Code'))
 
-            # If *force*, just modify the passed list and return without
-            # raising an exception, otherwise raise exception for the first error
-            if force:
-                return
-
             errcode = error_tags[0].findtext(ns_p + 'Code')
             errmsg = error_tags[0].findtext(ns_p + 'Message')
             errkey = error_tags[0].findtext(ns_p + 'Key')[offset:]
 
             if errcode == 'NoSuchKeyError':
-                raise NoSuchObject(errkey)
+                if not force:
+                    raise NoSuchObject(errkey)
             else:
                 raise get_S3Error(errcode, 'Error deleting %s: %s' % (errkey, errmsg))
 
diff -pruN 2.33+dfsg-1/src/s3ql/backends/swift.py 3.0+dfsg-1/src/s3ql/backends/swift.py
--- 2.33+dfsg-1/src/s3ql/backends/swift.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/backends/swift.py	2019-01-15 20:37:11.000000000 +0000
@@ -9,8 +9,7 @@ This work can be distributed under the t
 from ..logging import logging, QuietError, LOG_ONCE # Ensure use of custom logger class
 from .. import BUFSIZE
 from .common import (AbstractBackend, NoSuchObject, retry, AuthorizationError,
-                     DanglingStorageURLError, retry_generator, get_proxy,
-                     get_ssl_context)
+                     DanglingStorageURLError, get_proxy, get_ssl_context)
 from .s3c import HTTPError, ObjectR, ObjectW, md5sum_b64, BadDigestError
 from . import s3c
 from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring,
@@ -46,7 +45,6 @@ class Backend(AbstractBackend, metaclass
     _extractmeta = s3c.Backend._extractmeta
     _assert_empty_response = s3c.Backend._assert_empty_response
     _dump_response = s3c.Backend._dump_response
-    clear = s3c.Backend.clear
     reset = s3c.Backend.reset
 
     def __init__(self, options):
@@ -665,55 +663,62 @@ class Backend(AbstractBackend, metaclass
         self._do_request('POST', '/%s%s' % (self.prefix, key), headers=headers)
         self.conn.discard()
 
-    @retry_generator
     @copy_ancestor_docstring
-    def list(self, prefix='', start_after='', batch_size=5000):
-        log.debug('started with %s, %s', prefix, start_after)
-
-        keys_remaining = True
-        marker = self.prefix + start_after
+    def list(self, prefix=''):
         prefix = self.prefix + prefix
+        strip = len(self.prefix)
+        page_token = None
+        while True:
+            (els, page_token) = self._list_page(prefix, page_token)
+            for el in els:
+                yield el[strip:]
+            if page_token is None:
+                break
+
+    @retry
+    def _list_page(self, prefix, page_token=None, batch_size=1000):
 
-        while keys_remaining:
-            log.debug('requesting with marker=%s', marker)
+        # Limit maximum number of results since we read everything
+        # into memory (because Python JSON doesn't have a streaming API)
+        query_string = { 'prefix': prefix, 'limit': str(batch_size),
+                         'format': 'json' }
+        if page_token:
+            query_string['marker'] = page_token
 
-            try:
-                resp = self._do_request('GET', '/', query_string={'prefix': prefix,
-                                                                  'format': 'json',
-                                                                  'marker': marker,
-                                                                  'limit': batch_size })
-            except HTTPError as exc:
-                if exc.status == 404:
-                    raise DanglingStorageURLError(self.container_name)
-                raise
+        try:
+            resp = self._do_request('GET', '/', query_string=query_string)
+        except HTTPError as exc:
+            if exc.status == 404:
+                raise DanglingStorageURLError(self.container_name)
+            raise
 
-            if resp.status == 204:
-                return
+        if resp.status == 204:
+            return
 
-            hit = re.match('application/json; charset="?(.+?)"?$',
-                           resp.headers['content-type'])
-            if not hit:
-                log.error('Unexpected server response. Expected json, got:\n%s',
-                          self._dump_response(resp))
-                raise RuntimeError('Unexpected server reply')
+        hit = re.match('application/json; charset="?(.+?)"?$',
+                       resp.headers['content-type'])
+        if not hit:
+            log.error('Unexpected server response. Expected json, got:\n%s',
+                      self._dump_response(resp))
+            raise RuntimeError('Unexpected server reply')
 
-            strip = len(self.prefix)
-            count = 0
-            try:
-                # JSON does not have a streaming API, so we just read
-                # the entire response in memory.
-                for dataset in json.loads(self.conn.read().decode(hit.group(1))):
-                    count += 1
-                    marker = dataset['name']
-                    if marker.endswith(TEMP_SUFFIX):
-                        continue
-                    yield marker[strip:]
+        body = self.conn.readall()
+        names = []
+        count = 0
+        for dataset in json.loads(body.decode(hit.group(1))):
+            count += 1
+            name = dataset['name']
+            if name.endswith(TEMP_SUFFIX):
+                continue
+            names.append(name)
 
-            except GeneratorExit:
-                self.conn.discard()
-                break
+        if count == batch_size:
+            page_token = names[-1]
+        else:
+            page_token = None
+
+        return (names, page_token)
 
-            keys_remaining = count == batch_size
 
     @copy_ancestor_docstring
     def close(self):
diff -pruN 2.33+dfsg-1/src/s3ql/fsck.py 3.0+dfsg-1/src/s3ql/fsck.py
--- 2.33+dfsg-1/src/s3ql/fsck.py	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/fsck.py	2019-01-15 20:46:18.000000000 +0000
@@ -184,11 +184,13 @@ class Fsck(object):
         else:
             stamp1 = float('inf')
 
+        total = len(candidates)
         for (i, filename) in enumerate(candidates):
+            i += 1 # start at 1
             stamp2 = time.time()
-            if stamp2 - stamp1 > 1:
+            if stamp2 - stamp1 > 1 or i == total:
                 sys.stdout.write('\r..processed %d/%d files (%d%%)..'
-                                 % (i, len(candidates), i/len(candidates)*100))
+                                 % (i, total, i/total*100))
                 sys.stdout.flush()
                 stamp1 = stamp2
 
diff -pruN 2.33+dfsg-1/src/s3ql/__init__.py 3.0+dfsg-1/src/s3ql/__init__.py
--- 2.33+dfsg-1/src/s3ql/__init__.py	2018-12-28 19:36:32.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/__init__.py	2019-02-09 09:56:00.000000000 +0000
@@ -38,7 +38,7 @@ assert logging.LOG_ONCE  # prevent warni
 
 from llfuse import ROOT_INODE
 
-VERSION = '2.33'
+VERSION = '3.0'
 RELEASE = '%s' % VERSION
 
 # TODO: On next revision bump, remove upgrade code from backend/comprenc.py and
diff -pruN 2.33+dfsg-1/src/s3ql/logging.py 3.0+dfsg-1/src/s3ql/logging.py
--- 2.33+dfsg-1/src/s3ql/logging.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/logging.py	2019-01-13 14:54:28.000000000 +0000
@@ -31,6 +31,23 @@ class QuietError(Exception):
     def __str__(self):
         return self.msg
 
+
+SYSTEMD_LOG_LEVEL_MAP = {
+    logging.CRITICAL: 0,
+    logging.ERROR: 3,
+    logging.WARNING: 4,
+    logging.INFO: 6,
+    logging.DEBUG: 7,
+}
+
+class SystemdFormatter(logging.Formatter):
+    def format(self, record):
+        s = super().format(record)
+        prefix = SYSTEMD_LOG_LEVEL_MAP.get(record.levelno, None)
+        if prefix:
+            s = '<%d>%s' % (prefix, s)
+        return s
+
 class MyFormatter(logging.Formatter):
     '''Prepend severity to log message if it exceeds threshold'''
 
@@ -85,7 +102,7 @@ def setup_logging(options):
         root_logger.debug("Logging already initialized.")
         return
 
-    stdout_handler = add_stdout_logging(options.quiet)
+    stdout_handler = add_stdout_logging(options.quiet, getattr(options, 'systemd', False))
     if hasattr(options, 'log') and options.log:
         root_logger.addHandler(create_handler(options.log))
     elif options.debug and (not hasattr(options, 'log') or not options.log):
@@ -134,13 +151,16 @@ def setup_excepthook():
 
     sys.excepthook = excepthook
 
-def add_stdout_logging(quiet=False):
+def add_stdout_logging(quiet=False, systemd=False):
     '''Add stdout logging handler to root logger'''
 
     root_logger = logging.getLogger()
-    formatter = MyFormatter('%(message)s')
+    if systemd:
+        formatter = SystemdFormatter('%(message)s')
+    else:
+        formatter = MyFormatter('%(message)s')
     handler = logging.StreamHandler(sys.stderr)
-    if quiet:
+    if not systemd and quiet:
         handler.setLevel(logging.WARNING)
     else:
         handler.setLevel(logging.INFO)
diff -pruN 2.33+dfsg-1/src/s3ql/mkfs.py 3.0+dfsg-1/src/s3ql/mkfs.py
--- 2.33+dfsg-1/src/s3ql/mkfs.py	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/mkfs.py	2019-01-15 20:37:11.000000000 +0000
@@ -45,8 +45,6 @@ def parse_args(args):
                            "Default: %(default)d KiB.")
     parser.add_argument("--plain", action="store_true", default=False,
                       help="Create unencrypted file system.")
-    parser.add_argument("--force", action="store_true", default=False,
-                        help="Overwrite any existing data.")
 
     options = parser.parse_args(args)
 
@@ -102,13 +100,8 @@ def main(args=None):
                     '(cf. https://forums.aws.amazon.com/thread.jspa?threadID=130560)')
 
     if 's3ql_metadata' in plain_backend:
-        if not options.force:
-            raise QuietError("Found existing file system! Use --force to overwrite")
-
-        log.info('Purging existing file system data..')
-        plain_backend.clear()
-        log.info('Please note that the new file system may appear inconsistent\n'
-                 'for a while until the removals have propagated through the backend.')
+        raise QuietError("Refusing to overwrite existing file system! "
+                         "(use `s3qladm clear` to delete)")
 
     if not options.plain:
         if sys.stdin.isatty():
diff -pruN 2.33+dfsg-1/src/s3ql/mount.py 3.0+dfsg-1/src/s3ql/mount.py
--- 2.33+dfsg-1/src/s3ql/mount.py	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/mount.py	2019-01-13 15:36:07.000000000 +0000
@@ -37,11 +37,6 @@ import time
 import shutil
 import atexit
 
-try:
-    from systemd.daemon import notify as sd_notify
-except ImportError:
-    sd_notify = None
-
 log = logging.getLogger(__name__)
 
 def install_thread_excepthook():
@@ -178,7 +173,7 @@ def main(args=None):
             llfuse.close(unmount=unmount_clean)
         cm.callback(unmount)
 
-        if options.fg:
+        if options.fg or options.systemd:
             faulthandler.enable()
             faulthandler.register(signal.SIGUSR1)
         else:
@@ -203,15 +198,13 @@ def main(args=None):
         cm.callback(commit_thread.join)
         cm.callback(commit_thread.stop)
 
-        if options.upstart:
-            os.kill(os.getpid(), signal.SIGSTOP)
-        if sd_notify is not None:
-            sd_notify('READY=1')
-            sd_notify('MAINPID=%d' % os.getpid())
-
         exc_info = setup_exchook()
         workers = 1 if options.single else None # use default
 
+        if options.systemd:
+            import systemd.daemon
+            systemd.daemon.notify('READY=1')
+
         if options.profile:
             ret = prof.runcall(llfuse.main, workers)
         else:
@@ -536,9 +529,9 @@ def parse_args(args):
                            'user and the root user.')
     parser.add_argument("--fg", action="store_true", default=False,
                       help="Do not daemonize, stay in foreground")
-    parser.add_argument("--upstart", action="store_true", default=False,
-                      help="Stay in foreground and raise SIGSTOP once mountpoint "
-                           "is up.")
+    parser.add_argument("--systemd", action="store_true", default=False,
+                      help="Run as systemd unit. Consider specifying --log none as well "
+                           "to make use of journald.")
     parser.add_argument("--compress", action="store", default='lzma-6',
                         metavar='<algorithm-lvl>', type=compression_type,
                         help="Compression algorithm and compression level to use when "
@@ -572,15 +565,12 @@ def parse_args(args):
     if options.allow_other and options.allow_root:
         parser.error("--allow-other and --allow-root are mutually exclusive.")
 
-    if not options.log and not options.fg:
+    if not options.log and not (options.fg or options.systemd):
         parser.error("Please activate logging to a file or syslog, or use the --fg option.")
 
     if options.profile:
         options.single = True
 
-    if options.upstart:
-        options.fg = True
-
     if options.metadata_upload_interval == 0:
         options.metadata_upload_interval = None
 
diff -pruN 2.33+dfsg-1/src/s3ql/parse_args.py 3.0+dfsg-1/src/s3ql/parse_args.py
--- 2.33+dfsg-1/src/s3ql/parse_args.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/parse_args.py	2019-01-15 20:37:11.000000000 +0000
@@ -202,6 +202,26 @@ class ArgumentParser(argparse.ArgumentPa
 
         return super().add_subparsers(**kw)
 
+    def _read_authinfo(self, path, storage_url):
+
+        ini_config = configparser.ConfigParser()
+        if os.path.isfile(path):
+            mode = os.stat(path).st_mode
+            if mode & (stat.S_IRGRP | stat.S_IROTH):
+                self.exit(12, "%s has insecure permissions, aborting." % path)
+            ini_config.read(path)
+
+        merged = dict()
+        for section in ini_config.sections():
+            pattern = ini_config[section].get('storage-url', None)
+            if not pattern or not storage_url.startswith(pattern):
+                continue
+
+            for (key, val) in ini_config[section].items():
+                if key != 'storage-url':
+                    merged[key] = val
+        return merged
+
     def parse_args(self, *args, **kwargs):
 
         try:
@@ -209,6 +229,27 @@ class ArgumentParser(argparse.ArgumentPa
         except ArgumentError as exc:
             self.error(str(exc))
 
+        if hasattr(options, 'authfile'):
+            storage_url = getattr(options, 'storage_url', '')
+            ini_config = self._read_authinfo(options.authfile, storage_url)
+
+            # Validate configuration file
+            fixed_keys = { 'backend-login', 'backend-password', 'fs-passphrase',
+                           'storage-url' }
+            unknown_keys = (set(ini_config.keys())
+                            - { x.replace('_', '-') for x in options.__dict__.keys() }
+                            - fixed_keys)
+            if unknown_keys:
+                            self.exit(2, 'Unknown keys(s) in configuration file: ' +
+                                      ', '.join(unknown_keys))
+
+            # Update defaults and re-parse arguments
+            defaults = { k.replace('-', '_'): v
+                         for (k,v) in ini_config.items()
+                         if k != 'storage_url' }
+            self.set_defaults(**defaults)
+            options = super().parse_args(*args, **kwargs)
+
         if hasattr(options, 'storage_url'):
             self._init_backend_factory(options)
 
@@ -241,28 +282,11 @@ class ArgumentParser(argparse.ArgumentPa
         except KeyError:
             self.exit(11, 'No such backend: ' + backend)
 
-        # Read authfile
-        ini_config = configparser.ConfigParser()
-        if os.path.isfile(options.authfile):
-            mode = os.stat(options.authfile).st_mode
-            if mode & (stat.S_IRGRP | stat.S_IROTH):
-                self.exit(12, "%s has insecure permissions, aborting."
-                          % options.authfile)
-            ini_config.read(options.authfile)
-
-        # Validate backend options
         backend_options = options.backend_options
         for opt in backend_options.keys():
             if opt not in backend_class.known_options:
                 self.exit(3, 'Unknown backend option: ' + opt)
 
-        valid_keys = backend_class.known_options | {
-            'backend_login', 'backend_password', 'fs_passphrase' }
-        unknown = _merge_sections(ini_config, options, valid_keys)
-        if unknown:
-            self.exit(2, 'Unknown key(s) in configuration file: ' +
-                      ', '.join(unknown))
-
         if not hasattr(options, 'backend_login') and backend_class.needs_login:
             if sys.stdin.isatty():
                 options.backend_login = getpass("Enter backend login: ")
@@ -278,39 +302,6 @@ class ArgumentParser(argparse.ArgumentPa
         options.backend_class = backend_class
 
 
-def _merge_sections(ini_config, options, valid_keys):
-    '''Merge configuration sections from *ini_config* into *options*
-
-    Merge the data from all sections that apply to the given storage
-    URL. Later sections take precedence over earlier sections.
-
-    Keys in *ini_config* that are neither in *options* nor in *valid_keys* will
-    be returned.
-
-    Dashes will be replaced by underscores.
-    '''
-
-    storage_url = options.storage_url
-    merged = dict()
-    for section in ini_config.sections():
-        pattern = ini_config[section].get('storage-url', None)
-        if not pattern or not storage_url.startswith(pattern):
-            continue
-
-        for (key, val) in ini_config[section].items():
-            if key != 'storage-url':
-                merged[key.replace('-', '_')] = val
-
-    unknown = set()
-    for (key, val) in merged.items():
-        if key not in valid_keys and not hasattr(options, key):
-            unknown.add(key)
-        else:
-            setattr(options, key, val)
-
-    return unknown
-
-
 def storage_url_type(s):
     '''Validate and canonicalize storage url'''
 
diff -pruN 2.33+dfsg-1/src/s3ql/umount.py 3.0+dfsg-1/src/s3ql/umount.py
--- 2.33+dfsg-1/src/s3ql/umount.py	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/umount.py	2019-02-09 09:50:00.000000000 +0000
@@ -90,18 +90,26 @@ def get_cmdline(pid):
     and return None.
     '''
 
-    try:
-        output = subprocess.check_output(['ps', '-p', str(pid), '-o', 'args='],
-                                         universal_newlines=True).strip()
-    except subprocess.CalledProcessError:
-        log.warning('Unable to execute ps, assuming process %d has terminated.'
-                    % pid)
-        return None
+    if os.path.isdir('/proc'):
+        try:
+            with open('/proc/%d/cmdline' % pid, 'r') as cmd_file:
+                return cmd_file.read()
+
+        except FileNotFoundError:
+            return None
 
-    if output:
-        return output
     else:
-        return None
+        try:
+            output = subprocess.check_output(['ps', '-p', str(pid), '-o', 'args='],
+                                             universal_newlines=True).strip()
+            if output:
+                return output
+
+        except subprocess.CalledProcessError:
+            log.warning('Error when executing ps, assuming process %d has terminated.'
+                        % pid)
+
+    return None
 
 def blocking_umount(mountpoint):
     '''Invoke fusermount and wait for daemon to terminate.'''
diff -pruN 2.33+dfsg-1/src/s3ql/verify.py 3.0+dfsg-1/src/s3ql/verify.py
--- 2.33+dfsg-1/src/s3ql/verify.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql/verify.py	2019-01-15 20:46:18.000000000 +0000
@@ -27,7 +27,7 @@ log = logging.getLogger(__name__)
 def _new_file_type(s, encoding='utf-8'):
     '''An argparse type for a file that does not yet exist'''
 
-    if os.path.exists(s):
+    if os.path.exists(s) and os.stat(s).st_size != 0:
         msg = 'File already exists - refusing to overwrite: %s' % s
         raise argparse.ArgumentTypeError(msg)
 
@@ -131,8 +131,9 @@ def retrieve_objects(db, backend_factory
     stamp1 = 0
     try:
         for (i, (obj_id, size)) in enumerate(db.query(sql)):
+            i += 1 # start at 1
             stamp2 = time.time()
-            if stamp2 - stamp1 > 1:
+            if stamp2 - stamp1 > 1 or i == total_count:
                 stamp1 = stamp2
                 progress = '%d objects (%.2f%%)' % (i, i/total_count * 100)
                 if full:
diff -pruN 2.33+dfsg-1/src/s3ql.egg-info/PKG-INFO 3.0+dfsg-1/src/s3ql.egg-info/PKG-INFO
--- 2.33+dfsg-1/src/s3ql.egg-info/PKG-INFO	2018-12-28 19:37:21.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql.egg-info/PKG-INFO	2019-02-09 09:56:37.000000000 +0000
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: s3ql
-Version: 2.33
+Version: 3.0
 Summary: a full-featured file system for online data storage
 Home-page: https://bitbucket.org/nikratio/s3ql/
 Author: Nikolaus Rath
@@ -139,10 +139,10 @@ Description: ..
         The following resources are available:
         
         * The `S3QL User's Guide`_.
-        * The `S3QL Wiki`_, which also contains the `S3QL FAQ`_.
-        * The `S3QL Mailing List`_. You can subscribe by sending a mail to
-          `s3ql+subscribe@googlegroups.com
-          <mailto:s3ql+subscribe@googlegroups.com>`_.
+        * The `S3QL Wiki <https://github.com/s3ql/s3ql/wiki>`_
+        * The `S3QL Mailing List <http://groups.google.com/group/s3ql>`_. You
+          can subscribe by sending a mail to
+          `s3ql+subscribe@googlegroups.com <mailto:s3ql+subscribe@googlegroups.com>`_.
         
         Please report any bugs you may encounter in the `GitHub Issue Tracker`_.
         
@@ -157,8 +157,6 @@ Description: ..
         Professional support is offered via `Rath Consulting`_.
         
         .. _`S3QL User's Guide`: http://www.rath.org/s3ql-docs/index.html
-        .. _`S3QL Wiki`: https://bitbucket.org/nikratio/s3ql/wiki/
-        .. _`S3QL FAQ`: https://bitbucket.org/nikratio/s3ql/wiki/FAQ
         .. _`S3QL Mailing List`: http://groups.google.com/group/s3ql
         .. _`GitHub Issue Tracker`: https://github.com/s3ql/s3ql/issues
         .. _GitHub: https://github.com/s3ql/main
diff -pruN 2.33+dfsg-1/src/s3ql.egg-info/requires.txt 3.0+dfsg-1/src/s3ql.egg-info/requires.txt
--- 2.33+dfsg-1/src/s3ql.egg-info/requires.txt	2018-12-28 19:37:21.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql.egg-info/requires.txt	2019-02-09 09:56:37.000000000 +0000
@@ -1,5 +1,5 @@
 apsw>=3.7.0
-pycrypto
+cryptography
 requests
 defusedxml
 dugong<4.0,>=3.4
diff -pruN 2.33+dfsg-1/src/s3ql.egg-info/SOURCES.txt 3.0+dfsg-1/src/s3ql.egg-info/SOURCES.txt
--- 2.33+dfsg-1/src/s3ql.egg-info/SOURCES.txt	2018-12-28 19:37:21.000000000 +0000
+++ 3.0+dfsg-1/src/s3ql.egg-info/SOURCES.txt	2019-02-09 09:56:37.000000000 +0000
@@ -3,6 +3,18 @@ LICENSE
 README.rst
 setup.cfg
 setup.py
+/home/nikratio/in-progress/s3ql/doc/man/fsck.s3ql.1
+/home/nikratio/in-progress/s3ql/doc/man/mkfs.s3ql.1
+/home/nikratio/in-progress/s3ql/doc/man/mount.s3ql.1
+/home/nikratio/in-progress/s3ql/doc/man/s3ql_oauth_client.1
+/home/nikratio/in-progress/s3ql/doc/man/s3ql_verify.1
+/home/nikratio/in-progress/s3ql/doc/man/s3qladm.1
+/home/nikratio/in-progress/s3ql/doc/man/s3qlcp.1
+/home/nikratio/in-progress/s3ql/doc/man/s3qlctrl.1
+/home/nikratio/in-progress/s3ql/doc/man/s3qllock.1
+/home/nikratio/in-progress/s3ql/doc/man/s3qlrm.1
+/home/nikratio/in-progress/s3ql/doc/man/s3qlstat.1
+/home/nikratio/in-progress/s3ql/doc/man/umount.s3ql.1
 bin/fsck.s3ql
 bin/mkfs.s3ql
 bin/mount.s3ql
@@ -258,6 +270,7 @@ tests/t3_fsck.py
 tests/t3_inode_cache.py
 tests/t3_verify.py
 tests/t4_adm.py
+tests/t4_authinfo.py
 tests/t4_fuse.py
 tests/t5_cache.py
 tests/t5_cp.py
@@ -267,5 +280,9 @@ tests/t5_fsck.py
 tests/t5_full.py
 tests/t5_lock_rm.py
 tests/t6_upgrade.py
+tests/.pytest_cache/README.md
+tests/.pytest_cache/v/cache/lastfailed
+tests/.pytest_cache/v/cache/nodeids
+tests/.pytest_cache/v/cache/stepwise
 util/cmdline_lexer.py
 util/sphinx_pipeinclude.py
\ No newline at end of file
diff -pruN 2.33+dfsg-1/tests/conftest.py 3.0+dfsg-1/tests/conftest.py
--- 2.33+dfsg-1/tests/conftest.py	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/tests/conftest.py	2019-01-15 20:37:11.000000000 +0000
@@ -45,11 +45,11 @@ def s3ql_cmd_argv(request):
     '''Provide argument list to execute s3ql commands in tests'''
 
     if request.config.getoption('installed'):
-        request.cls.s3ql_cmd_argv = lambda self, cmd: [ cmd ]
+        yield lambda cmd: [ cmd ]
     else:
         basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
-        request.cls.s3ql_cmd_argv = lambda self, cmd: [ sys.executable,
-                                                        os.path.join(basedir, 'bin', cmd) ]
+        yield lambda cmd: [ sys.executable,
+                            os.path.join(basedir, 'bin', cmd) ]
 
 # Enable output checks
 pytest_plugins = ('pytest_checklogs',)
@@ -59,6 +59,11 @@ def pass_reg_output(request, reg_output)
     '''Provide reg_output function to UnitTest instances'''
     request.instance.reg_output = reg_output
 
+@pytest.fixture()
+def pass_s3ql_cmd_argv(request, s3ql_cmd_argv):
+    '''Provide s3ql_cmd_argv function to UnitTest instances'''
+    request.instance.s3ql_cmd_argv = s3ql_cmd_argv
+
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting")
     group._addoption("--logdebug", action="append", metavar='<module>',
diff -pruN 2.33+dfsg-1/tests/mock_server.py 3.0+dfsg-1/tests/mock_server.py
--- 2.33+dfsg-1/tests/mock_server.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/tests/mock_server.py	2019-01-13 14:54:28.000000000 +0000
@@ -291,17 +291,6 @@ class S3CRequestHandler(BaseHTTPRequestH
             self.wfile.write(content)
 
 
-class GSRequestHandler(S3CRequestHandler):
-    '''A request handler implementing a subset of the Google Storage API.
-
-    Bucket names are ignored, all keys share the same global namespace.
-    '''
-
-    meta_header_re = re.compile(r'x-goog-meta-([a-z0-9_.-]+)$',
-                                re.IGNORECASE)
-    hdr_prefix = 'x-goog-'
-    xml_ns = 'http://doc.s3.amazonaws.com/2006-03-01'
-
 class BasicSwiftRequestHandler(S3CRequestHandler):
     '''A request handler implementing a subset of the OpenStack Swift Interface
 
@@ -578,7 +567,6 @@ class BulkDeleteSwiftRequestHandler(Basi
 handler_list = [ (S3CRequestHandler, 's3c://%(host)s:%(port)d/s3ql_test'),
 
                  # Special syntax only for testing against mock server
-                 (GSRequestHandler, 'gs://!unittest!%(host)s:%(port)d/s3ql_test'),
                  (BasicSwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'),
                  (CopySwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'),
                  (BulkDeleteSwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test') ]
diff -pruN 2.33+dfsg-1/tests/.pytest_cache/README.md 3.0+dfsg-1/tests/.pytest_cache/README.md
--- 2.33+dfsg-1/tests/.pytest_cache/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 3.0+dfsg-1/tests/.pytest_cache/README.md	2018-12-29 15:43:40.000000000 +0000
@@ -0,0 +1,8 @@
+# pytest cache directory #
+
+This directory contains data from the pytest's cache plugin,
+which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
+
+**Do not** commit this to version control.
+
+See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
diff -pruN 2.33+dfsg-1/tests/.pytest_cache/v/cache/lastfailed 3.0+dfsg-1/tests/.pytest_cache/v/cache/lastfailed
--- 2.33+dfsg-1/tests/.pytest_cache/v/cache/lastfailed	1970-01-01 00:00:00.000000000 +0000
+++ 3.0+dfsg-1/tests/.pytest_cache/v/cache/lastfailed	2019-01-14 21:50:51.000000000 +0000
@@ -0,0 +1,48 @@
+{
+  "t1_backends.py::test_clear[remote-gs/aes]": true,
+  "t1_backends.py::test_complex_meta[remote-gs/raw]": true,
+  "t1_backends.py::test_complex_meta[remote-swiftks/raw]": true,
+  "t1_backends.py::test_copy[remote-gs/raw]": true,
+  "t1_backends.py::test_copy[remote-swiftks/raw]": true,
+  "t1_backends.py::test_copy_newmeta[remote-gs/raw]": true,
+  "t1_backends.py::test_copy_newmeta[remote-swiftks/raw]": true,
+  "t1_backends.py::test_copy_special[remote-gs/raw]": true,
+  "t1_backends.py::test_copy_special[remote-swiftks/raw]": true,
+  "t1_backends.py::test_delete[remote-gs/aes]": true,
+  "t1_backends.py::test_delete[remote-swiftks/aes]": true,
+  "t1_backends.py::test_delete_multi[remote-gs/aes]": true,
+  "t1_backends.py::test_delete_multi[remote-swiftks/aes]": true,
+  "t1_backends.py::test_list[remote-gs/aes]": true,
+  "t1_backends.py::test_list[remote-swiftks/aes]": true,
+  "t1_backends.py::test_multi_packet[remote-gs/aes+zlib]": true,
+  "t1_backends.py::test_multi_packet[remote-gs/aes]": true,
+  "t1_backends.py::test_multi_packet[remote-gs/plain]": true,
+  "t1_backends.py::test_multi_packet[remote-gs/raw]": true,
+  "t1_backends.py::test_multi_packet[remote-gs/zlib]": true,
+  "t1_backends.py::test_multi_packet[remote-swiftks/aes+zlib]": true,
+  "t1_backends.py::test_multi_packet[remote-swiftks/aes]": true,
+  "t1_backends.py::test_multi_packet[remote-swiftks/plain]": true,
+  "t1_backends.py::test_multi_packet[remote-swiftks/raw]": true,
+  "t1_backends.py::test_multi_packet[remote-swiftks/zlib]": true,
+  "t1_backends.py::test_read_write[mock-gs/aes]": true,
+  "t1_backends.py::test_read_write[remote-gs/aes+zlib]": true,
+  "t1_backends.py::test_read_write[remote-gs/aes]": true,
+  "t1_backends.py::test_read_write[remote-gs/bzip2]": true,
+  "t1_backends.py::test_read_write[remote-gs/lzma]": true,
+  "t1_backends.py::test_read_write[remote-gs/plain]": true,
+  "t1_backends.py::test_read_write[remote-gs/raw]": true,
+  "t1_backends.py::test_read_write[remote-gs/zlib]": true,
+  "t1_backends.py::test_read_write[remote-swiftks/aes+zlib]": true,
+  "t1_backends.py::test_read_write[remote-swiftks/bzip2]": true,
+  "t1_backends.py::test_read_write[remote-swiftks/lzma]": true,
+  "t1_backends.py::test_read_write[remote-swiftks/plain]": true,
+  "t1_backends.py::test_read_write[remote-swiftks/raw]": true,
+  "t1_backends.py::test_read_write[remote-swiftks/zlib]": true,
+  "t1_backends.py::test_readslowly[remote-gs/raw]": true,
+  "t1_backends.py::test_rename[remote-gs/raw]": true,
+  "t1_backends.py::test_rename[remote-swiftks/raw]": true,
+  "t1_backends.py::test_rename_newmeta[remote-gs/raw]": true,
+  "t1_backends.py::test_rename_newmeta[remote-swiftks/raw]": true,
+  "t1_backends.py::test_update_meta[remote-gs/raw]": true,
+  "t1_backends.py::test_update_meta[remote-swiftks/raw]": true
+}
\ No newline at end of file
diff -pruN 2.33+dfsg-1/tests/.pytest_cache/v/cache/nodeids 3.0+dfsg-1/tests/.pytest_cache/v/cache/nodeids
--- 2.33+dfsg-1/tests/.pytest_cache/v/cache/nodeids	1970-01-01 00:00:00.000000000 +0000
+++ 3.0+dfsg-1/tests/.pytest_cache/v/cache/nodeids	2019-01-14 21:50:51.000000000 +0000
@@ -0,0 +1,370 @@
+[
+  "t1_backends.py::test_read_write[local/aes]",
+  "t1_backends.py::test_read_write[remote-s3/aes]",
+  "t1_backends.py::test_read_write[mock-s3c/aes]",
+  "t1_backends.py::test_read_write[mock-swift/aes0]",
+  "t1_backends.py::test_read_write[mock-swift/aes1]",
+  "t1_backends.py::test_read_write[mock-swift/aes2]",
+  "t1_backends.py::test_read_write[local/aes+zlib]",
+  "t1_backends.py::test_read_write[remote-s3/aes+zlib]",
+  "t1_backends.py::test_read_write[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_read_write[mock-swift/aes+zlib0]",
+  "t1_backends.py::test_read_write[mock-swift/aes+zlib1]",
+  "t1_backends.py::test_read_write[mock-swift/aes+zlib2]",
+  "t1_backends.py::test_read_write[local/plain]",
+  "t1_backends.py::test_read_write[remote-s3/plain]",
+  "t1_backends.py::test_read_write[mock-s3c/plain]",
+  "t1_backends.py::test_read_write[mock-swift/plain0]",
+  "t1_backends.py::test_read_write[mock-swift/plain1]",
+  "t1_backends.py::test_read_write[mock-swift/plain2]",
+  "t1_backends.py::test_read_write[local/zlib]",
+  "t1_backends.py::test_read_write[remote-s3/zlib]",
+  "t1_backends.py::test_read_write[mock-s3c/zlib]",
+  "t1_backends.py::test_read_write[mock-swift/zlib0]",
+  "t1_backends.py::test_read_write[mock-swift/zlib1]",
+  "t1_backends.py::test_read_write[mock-swift/zlib2]",
+  "t1_backends.py::test_read_write[local/bzip2]",
+  "t1_backends.py::test_read_write[remote-s3/bzip2]",
+  "t1_backends.py::test_read_write[mock-s3c/bzip2]",
+  "t1_backends.py::test_read_write[mock-swift/bzip20]",
+  "t1_backends.py::test_read_write[mock-swift/bzip21]",
+  "t1_backends.py::test_read_write[mock-swift/bzip22]",
+  "t1_backends.py::test_read_write[local/lzma]",
+  "t1_backends.py::test_read_write[remote-s3/lzma]",
+  "t1_backends.py::test_read_write[mock-s3c/lzma]",
+  "t1_backends.py::test_read_write[mock-swift/lzma0]",
+  "t1_backends.py::test_read_write[mock-swift/lzma1]",
+  "t1_backends.py::test_read_write[mock-swift/lzma2]",
+  "t1_backends.py::test_read_write[local/raw]",
+  "t1_backends.py::test_read_write[remote-s3/raw]",
+  "t1_backends.py::test_read_write[mock-s3c/raw]",
+  "t1_backends.py::test_read_write[mock-swift/raw0]",
+  "t1_backends.py::test_read_write[mock-swift/raw1]",
+  "t1_backends.py::test_read_write[mock-swift/raw2]",
+  "t1_backends.py::test_issue114[mock-swift/raw0]",
+  "t1_backends.py::test_issue114[mock-swift/raw1]",
+  "t1_backends.py::test_issue114[mock-swift/raw2]",
+  "t1_backends.py::test_complex_meta[local/raw]",
+  "t1_backends.py::test_complex_meta[remote-s3/raw]",
+  "t1_backends.py::test_complex_meta[mock-s3c/raw]",
+  "t1_backends.py::test_complex_meta[mock-swift/raw0]",
+  "t1_backends.py::test_complex_meta[mock-swift/raw1]",
+  "t1_backends.py::test_complex_meta[mock-swift/raw2]",
+  "t1_backends.py::test_complex_meta[local/plain]",
+  "t1_backends.py::test_complex_meta[local/aes]",
+  "t1_backends.py::test_complex_meta[local/zlib]",
+  "t1_backends.py::test_list[local/aes]",
+  "t1_backends.py::test_list[remote-s3/aes]",
+  "t1_backends.py::test_list[mock-s3c/aes]",
+  "t1_backends.py::test_list[mock-swift/aes0]",
+  "t1_backends.py::test_list[mock-swift/aes1]",
+  "t1_backends.py::test_list[mock-swift/aes2]",
+  "t1_backends.py::test_readslowly[local/raw]",
+  "t1_backends.py::test_readslowly[mock-s3c/raw]",
+  "t1_backends.py::test_readslowly[mock-swift/raw0]",
+  "t1_backends.py::test_readslowly[mock-swift/raw1]",
+  "t1_backends.py::test_readslowly[mock-swift/raw2]",
+  "t1_backends.py::test_readslowly[local/plain]",
+  "t1_backends.py::test_readslowly[local/aes]",
+  "t1_backends.py::test_readslowly[local/zlib]",
+  "t1_backends.py::test_readslowly[local/aes+zlib]",
+  "t1_backends.py::test_delete[local/aes]",
+  "t1_backends.py::test_delete[remote-s3/aes]",
+  "t1_backends.py::test_delete[mock-s3c/aes]",
+  "t1_backends.py::test_delete[mock-swift/aes0]",
+  "t1_backends.py::test_delete[mock-swift/aes1]",
+  "t1_backends.py::test_delete[mock-swift/aes2]",
+  "t1_backends.py::test_delete_multi[local/aes]",
+  "t1_backends.py::test_delete_multi[remote-s3/aes]",
+  "t1_backends.py::test_delete_multi[mock-s3c/aes]",
+  "t1_backends.py::test_delete_multi[mock-swift/aes0]",
+  "t1_backends.py::test_delete_multi[mock-swift/aes1]",
+  "t1_backends.py::test_delete_multi[mock-swift/aes2]",
+  "t1_backends.py::test_copy[local/raw]",
+  "t1_backends.py::test_copy[remote-s3/raw]",
+  "t1_backends.py::test_copy[mock-s3c/raw]",
+  "t1_backends.py::test_copy[mock-swift/raw0]",
+  "t1_backends.py::test_copy[mock-swift/raw1]",
+  "t1_backends.py::test_copy[mock-swift/raw2]",
+  "t1_backends.py::test_copy[local/plain]",
+  "t1_backends.py::test_copy[local/aes]",
+  "t1_backends.py::test_copy[local/zlib]",
+  "t1_backends.py::test_copy_special[local/raw]",
+  "t1_backends.py::test_copy_special[remote-s3/raw]",
+  "t1_backends.py::test_copy_special[mock-s3c/raw]",
+  "t1_backends.py::test_copy_special[mock-swift/raw0]",
+  "t1_backends.py::test_copy_special[mock-swift/raw1]",
+  "t1_backends.py::test_copy_special[mock-swift/raw2]",
+  "t1_backends.py::test_copy_newmeta[local/raw]",
+  "t1_backends.py::test_copy_newmeta[remote-s3/raw]",
+  "t1_backends.py::test_copy_newmeta[mock-s3c/raw]",
+  "t1_backends.py::test_copy_newmeta[mock-swift/raw0]",
+  "t1_backends.py::test_copy_newmeta[mock-swift/raw1]",
+  "t1_backends.py::test_copy_newmeta[mock-swift/raw2]",
+  "t1_backends.py::test_copy_newmeta[local/aes]",
+  "t1_backends.py::test_copy_newmeta[local/zlib]",
+  "t1_backends.py::test_rename[local/raw]",
+  "t1_backends.py::test_rename[remote-s3/raw]",
+  "t1_backends.py::test_rename[mock-s3c/raw]",
+  "t1_backends.py::test_rename[mock-swift/raw0]",
+  "t1_backends.py::test_rename[mock-swift/raw1]",
+  "t1_backends.py::test_rename[mock-swift/raw2]",
+  "t1_backends.py::test_rename[local/aes]",
+  "t1_backends.py::test_rename[local/zlib]",
+  "t1_backends.py::test_rename_newmeta[local/raw]",
+  "t1_backends.py::test_rename_newmeta[remote-s3/raw]",
+  "t1_backends.py::test_rename_newmeta[mock-s3c/raw]",
+  "t1_backends.py::test_rename_newmeta[mock-swift/raw0]",
+  "t1_backends.py::test_rename_newmeta[mock-swift/raw1]",
+  "t1_backends.py::test_rename_newmeta[mock-swift/raw2]",
+  "t1_backends.py::test_rename_newmeta[local/aes]",
+  "t1_backends.py::test_rename_newmeta[local/zlib]",
+  "t1_backends.py::test_update_meta[local/raw]",
+  "t1_backends.py::test_update_meta[remote-s3/raw]",
+  "t1_backends.py::test_update_meta[mock-s3c/raw]",
+  "t1_backends.py::test_update_meta[mock-swift/raw0]",
+  "t1_backends.py::test_update_meta[mock-swift/raw1]",
+  "t1_backends.py::test_update_meta[mock-swift/raw2]",
+  "t1_backends.py::test_update_meta[local/aes]",
+  "t1_backends.py::test_update_meta[local/zlib]",
+  "t1_backends.py::test_copy_error[mock-s3c/raw]",
+  "t1_backends.py::test_copy_error[mock-s3c/aes]",
+  "t1_backends.py::test_copy_error[mock-s3c/zlib]",
+  "t1_backends.py::test_corruption[local/aes]",
+  "t1_backends.py::test_corruption[local/aes+zlib]",
+  "t1_backends.py::test_corruption[local/zlib]",
+  "t1_backends.py::test_corruption[local/bzip2]",
+  "t1_backends.py::test_corruption[local/lzma]",
+  "t1_backends.py::test_extra_data[local/aes]",
+  "t1_backends.py::test_extra_data[local/aes+zlib]",
+  "t1_backends.py::test_extra_data[local/zlib]",
+  "t1_backends.py::test_extra_data[local/bzip2]",
+  "t1_backends.py::test_extra_data[local/lzma]",
+  "t1_backends.py::test_multi_packet[local/raw]",
+  "t1_backends.py::test_multi_packet[remote-s3/raw]",
+  "t1_backends.py::test_multi_packet[mock-s3c/raw]",
+  "t1_backends.py::test_multi_packet[mock-swift/raw0]",
+  "t1_backends.py::test_multi_packet[mock-swift/raw1]",
+  "t1_backends.py::test_multi_packet[mock-swift/raw2]",
+  "t1_backends.py::test_multi_packet[local/plain]",
+  "t1_backends.py::test_multi_packet[remote-s3/plain]",
+  "t1_backends.py::test_multi_packet[mock-s3c/plain]",
+  "t1_backends.py::test_multi_packet[mock-swift/plain0]",
+  "t1_backends.py::test_multi_packet[mock-swift/plain1]",
+  "t1_backends.py::test_multi_packet[mock-swift/plain2]",
+  "t1_backends.py::test_multi_packet[local/aes]",
+  "t1_backends.py::test_multi_packet[remote-s3/aes]",
+  "t1_backends.py::test_multi_packet[mock-s3c/aes]",
+  "t1_backends.py::test_multi_packet[mock-swift/aes0]",
+  "t1_backends.py::test_multi_packet[mock-swift/aes1]",
+  "t1_backends.py::test_multi_packet[mock-swift/aes2]",
+  "t1_backends.py::test_multi_packet[local/aes+zlib]",
+  "t1_backends.py::test_multi_packet[remote-s3/aes+zlib]",
+  "t1_backends.py::test_multi_packet[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_multi_packet[mock-swift/aes+zlib0]",
+  "t1_backends.py::test_multi_packet[mock-swift/aes+zlib1]",
+  "t1_backends.py::test_multi_packet[mock-swift/aes+zlib2]",
+  "t1_backends.py::test_multi_packet[local/zlib]",
+  "t1_backends.py::test_multi_packet[remote-s3/zlib]",
+  "t1_backends.py::test_multi_packet[mock-s3c/zlib]",
+  "t1_backends.py::test_multi_packet[mock-swift/zlib0]",
+  "t1_backends.py::test_multi_packet[mock-swift/zlib1]",
+  "t1_backends.py::test_multi_packet[mock-swift/zlib2]",
+  "t1_backends.py::test_issue431[local/raw]",
+  "t1_backends.py::test_issue431[local/plain]",
+  "t1_backends.py::test_issue431[local/aes]",
+  "t1_backends.py::test_issue431[local/aes+zlib]",
+  "t1_backends.py::test_issue431[local/zlib]",
+  "t1_backends.py::test_encryption[local/aes]",
+  "t1_backends.py::test_encryption[local/aes+zlib]",
+  "t1_backends.py::test_replay[local/aes]",
+  "t1_backends.py::test_replay[local/aes+zlib]",
+  "t1_backends.py::test_list_bug[mock-s3c/raw]",
+  "t1_backends.py::test_corrupted_get[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_corrupted_meta[mock-s3c/raw]",
+  "t1_backends.py::test_corrupted_meta[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_corrupted_put[mock-s3c/raw]",
+  "t1_backends.py::test_corrupted_put[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_get_s3error[mock-s3c/raw]",
+  "t1_backends.py::test_get_s3error[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_head_s3error[mock-s3c/raw]",
+  "t1_backends.py::test_head_s3error[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_delete_s3error[mock-s3c/raw]",
+  "t1_backends.py::test_backoff[mock-s3c/raw]",
+  "t1_backends.py::test_httperror[mock-s3c/raw]",
+  "t1_backends.py::test_put_s3error_early[mock-s3c/raw]",
+  "t1_backends.py::test_put_s3error_early[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_put_s3error_med[mock-s3c/raw]",
+  "t1_backends.py::test_put_s3error_med[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_put_s3error_late[mock-s3c/raw]",
+  "t1_backends.py::test_put_s3error_late[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_issue58[mock-s3c/raw]",
+  "t1_backends.py::test_issue58[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_issue58_b[mock-s3c/raw]",
+  "t1_backends.py::test_issue58_b[mock-s3c/aes+zlib]",
+  "t1_backends.py::test_expired_token_get[backend0]",
+  "t1_backends.py::test_expired_token_put[backend0]",
+  "t1_backends.py::test_conn_abort[mock-s3c/raw]",
+  "t1_backends.py::test_conn_abort[mock-s3c/aes+zlib]",
+  "t1_dump.py::DumpTests::test_1_vals_1",
+  "t1_dump.py::DumpTests::test_1_vals_2",
+  "t1_dump.py::DumpTests::test_1_vals_3",
+  "t1_dump.py::DumpTests::test_2_buf_auto",
+  "t1_dump.py::DumpTests::test_2_buf_fixed",
+  "t1_dump.py::DumpTests::test_3_deltas_1",
+  "t1_dump.py::DumpTests::test_3_deltas_2",
+  "t1_dump.py::DumpTests::test_3_deltas_3",
+  "t1_dump.py::DumpTests::test_5_multi",
+  "t1_dump.py::DumpTests::test_transactions",
+  "t1_retry.py::test_retry",
+  "t1_retry.py::test_is_retry",
+  "t1_retry.py::test_logging",
+  "t1_serialization.py::test_simple",
+  "t1_serialization.py::test_wrong_key",
+  "t1_serialization.py::test_cmplx_value",
+  "t1_serialization.py::test_thaw_errors",
+  "t1_serialization.py::test_checksum",
+  "t1_serialization.py::test_checksum_bytes",
+  "t2_block_cache.py::test_thread_hang",
+  "t2_block_cache.py::test_get",
+  "t2_block_cache.py::test_expire",
+  "t2_block_cache.py::test_upload",
+  "t2_block_cache.py::test_remove_referenced",
+  "t2_block_cache.py::test_remove_cache",
+  "t2_block_cache.py::test_upload_race",
+  "t2_block_cache.py::test_expire_race",
+  "t2_block_cache.py::test_parallel_expire",
+  "t2_block_cache.py::test_remove_cache_db",
+  "t2_block_cache.py::test_remove_db",
+  "t2_block_cache.py::test_issue_241",
+  "t3_fs_api.py::test_getattr_root",
+  "t3_fs_api.py::test_create",
+  "t3_fs_api.py::test_extstat",
+  "t3_fs_api.py::test_getxattr",
+  "t3_fs_api.py::test_link",
+  "t3_fs_api.py::test_listxattr",
+  "t3_fs_api.py::test_read",
+  "t3_fs_api.py::test_readdir",
+  "t3_fs_api.py::test_forget",
+  "t3_fs_api.py::test_removexattr",
+  "t3_fs_api.py::test_rename",
+  "t3_fs_api.py::test_replace_file",
+  "t3_fs_api.py::test_replace_dir",
+  "t3_fs_api.py::test_setattr_one",
+  "t3_fs_api.py::test_setattr_two",
+  "t3_fs_api.py::test_truncate",
+  "t3_fs_api.py::test_truncate_0",
+  "t3_fs_api.py::test_setxattr",
+  "t3_fs_api.py::test_names",
+  "t3_fs_api.py::test_statfs",
+  "t3_fs_api.py::test_symlink",
+  "t3_fs_api.py::test_unlink",
+  "t3_fs_api.py::test_rmdir",
+  "t3_fs_api.py::test_relink",
+  "t3_fs_api.py::test_write",
+  "t3_fs_api.py::test_failsafe",
+  "t3_fs_api.py::test_create_open",
+  "t3_fs_api.py::test_edit",
+  "t3_fs_api.py::test_copy_tree",
+  "t3_fs_api.py::test_copy_tree_2",
+  "t3_fs_api.py::test_lock_tree",
+  "t3_fs_api.py::test_remove_tree",
+  "t3_fsck.py::fsck_tests::test_blocks_checksum",
+  "t3_fsck.py::fsck_tests::test_blocks_obj_id",
+  "t3_fsck.py::fsck_tests::test_cache",
+  "t3_fsck.py::fsck_tests::test_contents_inode",
+  "t3_fsck.py::fsck_tests::test_contents_inode_p",
+  "t3_fsck.py::fsck_tests::test_contents_name",
+  "t3_fsck.py::fsck_tests::test_ext_attrs_inode",
+  "t3_fsck.py::fsck_tests::test_ext_attrs_name",
+  "t3_fsck.py::fsck_tests::test_inode_blocks_block_id",
+  "t3_fsck.py::fsck_tests::test_inode_blocks_inode",
+  "t3_fsck.py::fsck_tests::test_inodes_size",
+  "t3_fsck.py::fsck_tests::test_lof1",
+  "t3_fsck.py::fsck_tests::test_lof2",
+  "t3_fsck.py::fsck_tests::test_loops",
+  "t3_fsck.py::fsck_tests::test_missing_obj",
+  "t3_fsck.py::fsck_tests::test_name_refcount",
+  "t3_fsck.py::fsck_tests::test_obj_refcounts",
+  "t3_fsck.py::fsck_tests::test_objects_id",
+  "t3_fsck.py::fsck_tests::test_orphaned_block",
+  "t3_fsck.py::fsck_tests::test_orphaned_inode",
+  "t3_fsck.py::fsck_tests::test_orphaned_name",
+  "t3_fsck.py::fsck_tests::test_orphaned_obj",
+  "t3_fsck.py::fsck_tests::test_symlinks_inode",
+  "t3_fsck.py::fsck_tests::test_tmpfile",
+  "t3_fsck.py::fsck_tests::test_unix_blocks",
+  "t3_fsck.py::fsck_tests::test_unix_child",
+  "t3_fsck.py::fsck_tests::test_unix_nomode_dir",
+  "t3_fsck.py::fsck_tests::test_unix_nomode_reg",
+  "t3_fsck.py::fsck_tests::test_unix_rdev",
+  "t3_fsck.py::fsck_tests::test_unix_size",
+  "t3_fsck.py::fsck_tests::test_unix_size_symlink",
+  "t3_fsck.py::fsck_tests::test_unix_symlink_no_target",
+  "t3_fsck.py::fsck_tests::test_unix_target",
+  "t3_fsck.py::fsck_tests::test_wrong_block_refcount",
+  "t3_fsck.py::fsck_tests::test_wrong_inode_refcount",
+  "t3_inode_cache.py::cache_tests::test_create",
+  "t3_inode_cache.py::cache_tests::test_del",
+  "t3_inode_cache.py::cache_tests::test_get",
+  "t3_verify.py::test_missing[True]",
+  "t3_verify.py::test_missing[False]",
+  "t3_verify.py::test_corrupted_head[True]",
+  "t3_verify.py::test_corrupted_head[False]",
+  "t3_verify.py::test_corrupted_body[True]",
+  "t3_verify.py::test_corrupted_body[False]",
+  "t4_adm.py::AdmTests::test_clear",
+  "t4_adm.py::AdmTests::test_key_recovery",
+  "t4_adm.py::AdmTests::test_passphrase",
+  "t4_authinfo.py::test_invalid_option",
+  "t4_authinfo.py::test_invalid_backend_option",
+  "t4_authinfo.py::test_option_precedence",
+  "t4_authinfo.py::test_passphrase",
+  "t4_fuse.py::TestFuse::()::test",
+  "t5_cache.py::TestPerstCache::()::test",
+  "t5_cache.py::TestPerstCache::()::test_cache_upload",
+  "t5_cache.py::TestPerstCache::()::test_cache_flush[True]",
+  "t5_cache.py::TestPerstCache::()::test_cache_flush[False]",
+  "t5_cache.py::TestPerstCache::()::test_cache_flush_unclean",
+  "t5_cp.py::TestCp::()::test",
+  "t5_ctrl.py::TestCtrl::()::test",
+  "t5_failsafe.py::TestFailsafe::()::test",
+  "t5_failsafe.py::TestNewerMetadata::()::test",
+  "t5_fsck.py::TestFsck::()::test",
+  "t5_full.py::TestFullgs::()::test",
+  "t5_full.py::TestFullswiftks::()::test",
+  "t5_full.py::TestFulls3c::()::test",
+  "t5_full.py::TestFullrackspace::()::test",
+  "t5_full.py::TestFulls3::()::test",
+  "t5_full.py::TestFullswift::()::test",
+  "t5_full.py::TestFull::()::test",
+  "t5_lock_rm.py::TestLockRemove::()::test",
+  "t6_upgrade.py::Tests3cUpgrade::()::test[True]",
+  "t6_upgrade.py::Tests3cUpgrade::()::test[False]",
+  "t6_upgrade.py::TestPlainswiftksUpgrade::()::test[True]",
+  "t6_upgrade.py::TestPlainswiftksUpgrade::()::test[False]",
+  "t6_upgrade.py::TestswiftUpgrade::()::test[True]",
+  "t6_upgrade.py::TestswiftUpgrade::()::test[False]",
+  "t6_upgrade.py::Tests3Upgrade::()::test[True]",
+  "t6_upgrade.py::Tests3Upgrade::()::test[False]",
+  "t6_upgrade.py::TestPlainswiftUpgrade::()::test[True]",
+  "t6_upgrade.py::TestPlainswiftUpgrade::()::test[False]",
+  "t6_upgrade.py::TestPlainrackspaceUpgrade::()::test[True]",
+  "t6_upgrade.py::TestPlainrackspaceUpgrade::()::test[False]",
+  "t6_upgrade.py::TestrackspaceUpgrade::()::test[True]",
+  "t6_upgrade.py::TestrackspaceUpgrade::()::test[False]",
+  "t6_upgrade.py::TestgsUpgrade::()::test[True]",
+  "t6_upgrade.py::TestgsUpgrade::()::test[False]",
+  "t6_upgrade.py::TestPlaingsUpgrade::()::test[True]",
+  "t6_upgrade.py::TestPlaingsUpgrade::()::test[False]",
+  "t6_upgrade.py::TestPlains3Upgrade::()::test[True]",
+  "t6_upgrade.py::TestPlains3Upgrade::()::test[False]",
+  "t6_upgrade.py::TestPlains3cUpgrade::()::test[True]",
+  "t6_upgrade.py::TestPlains3cUpgrade::()::test[False]",
+  "t6_upgrade.py::TestswiftksUpgrade::()::test[True]",
+  "t6_upgrade.py::TestswiftksUpgrade::()::test[False]",
+  "t6_upgrade.py::TestUpgrade::()::test[True]",
+  "t6_upgrade.py::TestUpgrade::()::test[False]",
+  "t6_upgrade.py::TestPlainUpgrade::()::test[True]",
+  "t6_upgrade.py::TestPlainUpgrade::()::test[False]"
+]
\ No newline at end of file
diff -pruN 2.33+dfsg-1/tests/.pytest_cache/v/cache/stepwise 3.0+dfsg-1/tests/.pytest_cache/v/cache/stepwise
--- 2.33+dfsg-1/tests/.pytest_cache/v/cache/stepwise	1970-01-01 00:00:00.000000000 +0000
+++ 3.0+dfsg-1/tests/.pytest_cache/v/cache/stepwise	2019-01-13 15:35:44.000000000 +0000
@@ -0,0 +1 @@
+[]
\ No newline at end of file
diff -pruN 2.33+dfsg-1/tests/t1_backends.py 3.0+dfsg-1/tests/t1_backends.py
--- 2.33+dfsg-1/tests/t1_backends.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/tests/t1_backends.py	2019-01-15 20:37:11.000000000 +0000
@@ -248,7 +248,8 @@ def yield_remote_backend(bi, _ctr=[0]):
     try:
         yield backend
     finally:
-        backend.clear()
+        for name in list(backend.list()):
+            backend.delete(name, force=True)
         backend.close()
 
 def newname(name_counter=[0]):
@@ -513,29 +514,6 @@ def test_delete_multi(backend):
     for key in remaining:
         fetch_object(backend, key)
 
-# No need to run with different encryption/compression settings,
-# ComprencBackend should just forward this 1:1 to the raw backend.
-@pytest.mark.with_backend('*/aes')
-def test_clear(backend):
-    keys = [ newname() for _ in range(5) ]
-    value = newvalue()
-
-    # Create objects
-    for key in keys:
-        backend[key] = value
-
-    # Wait for them
-    assert_in_index(backend, keys)
-    for key in keys:
-        fetch_object(backend, key)
-
-    # Delete everything
-    backend.clear()
-
-    assert_not_in_index(backend, keys)
-    for key in keys:
-        assert_not_readable(backend, key)
-
 @pytest.mark.with_backend('*/raw', 'local/{plain,aes,zlib}')
 def test_copy(backend):
     key1 = newname()
diff -pruN 2.33+dfsg-1/tests/t1_retry.py 3.0+dfsg-1/tests/t1_retry.py
--- 2.33+dfsg-1/tests/t1_retry.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/tests/t1_retry.py	2019-01-15 20:37:11.000000000 +0000
@@ -12,7 +12,7 @@ if __name__ == '__main__':
     import sys
     sys.exit(pytest.main([__file__] + sys.argv[1:]))
 
-from s3ql.backends.common import retry, retry_generator
+from s3ql.backends.common import retry
 from pytest_checklogs import assert_logs
 import logging
 
@@ -35,21 +35,6 @@ class NthAttempt:
         self.count += 1
         raise TemporaryProblem()
 
-    @retry_generator
-    def list_stuff(self, upto=10, start_after=-1):
-        for i in range(upto):
-            if i <= start_after:
-                continue
-            if i == 2 and self.count < 1:
-                self.count += 1
-                raise TemporaryProblem
-
-            if i == 7 and self.count < 4:
-                self.count += 1
-                raise TemporaryProblem
-
-            yield i
-
     @retry
     def test_is_retry(self, is_retry=False):
         assert is_retry == (self.count != 0)
@@ -63,10 +48,6 @@ def test_retry():
 
     assert inst.do_stuff()
 
-def test_retry_generator():
-    inst = NthAttempt(3)
-    assert list(inst.list_stuff(10)) == list(range(10))
-
 def test_is_retry():
     inst = NthAttempt(3)
     assert inst.test_is_retry()
diff -pruN 2.33+dfsg-1/tests/t2_block_cache.py 3.0+dfsg-1/tests/t2_block_cache.py
--- 2.33+dfsg-1/tests/t2_block_cache.py	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/tests/t2_block_cache.py	2019-01-15 20:37:11.000000000 +0000
@@ -609,9 +609,6 @@ class MockBackendPool(AbstractBackend):
     def is_temp_failure(self, exc):
         return self.backend.is_temp_failure(exc)
 
-    def clear(self):
-        return self.backend.clear()
-
     def contains(self, key):
         return self.backend.contains(key)
 
diff -pruN 2.33+dfsg-1/tests/t4_adm.py 3.0+dfsg-1/tests/t4_adm.py
--- 2.33+dfsg-1/tests/t4_adm.py	2018-12-28 13:08:31.000000000 +0000
+++ 3.0+dfsg-1/tests/t4_adm.py	2019-01-15 20:37:11.000000000 +0000
@@ -23,7 +23,7 @@ import unittest
 import subprocess
 import pytest
 
-@pytest.mark.usefixtures('s3ql_cmd_argv', 'pass_reg_output')
+@pytest.mark.usefixtures('pass_s3ql_cmd_argv', 'pass_reg_output')
 class AdmTests(unittest.TestCase):
 
     def setUp(self):
@@ -79,28 +79,20 @@ class AdmTests(unittest.TestCase):
 
         backend.fetch('s3ql_passphrase') # will fail with wrong pw
 
-
-    def test_authinfo(self):
+    def test_clear(self):
         self.mkfs()
 
-        with tempfile.NamedTemporaryFile('wt') as fh:
-            print('[entry1]',
-                  'storage-url: local://',
-                  'fs-passphrase: clearly wrong',
-                  '',
-                  '[entry2]',
-                  'storage-url: %s' % self.storage_url,
-                  'fs-passphrase: %s' % self.passphrase,
-                  file=fh, sep='\n')
-            fh.flush()
-
-            proc = subprocess.Popen(self.s3ql_cmd_argv('fsck.s3ql') +
-                                    [ '--quiet', '--authfile', fh.name,
-                                      '--cachedir', self.cache_dir, '--log', 'none', self.storage_url ],
-                                    stdin=subprocess.PIPE, universal_newlines=True)
+        proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') +
+                                [ '--quiet', '--log', 'none', '--authfile',
+                                  '/dev/null', 'clear', self.storage_url ],
+                                stdin=subprocess.PIPE, universal_newlines=True)
+        print('yes', file=proc.stdin)
+        proc.stdin.close()
+        self.assertEqual(proc.wait(), 0)
 
-            proc.stdin.close()
-            self.assertEqual(proc.wait(), 0)
+        plain_backend = local.Backend(Namespace(
+            storage_url=self.storage_url))
+        assert list(plain_backend.list()) == []
 
 
     def test_key_recovery(self):
diff -pruN 2.33+dfsg-1/tests/t4_authinfo.py 3.0+dfsg-1/tests/t4_authinfo.py
--- 2.33+dfsg-1/tests/t4_authinfo.py	1970-01-01 00:00:00.000000000 +0000
+++ 3.0+dfsg-1/tests/t4_authinfo.py	2019-01-15 20:37:11.000000000 +0000
@@ -0,0 +1,129 @@
+#!/usr/bin/env python3
+'''
+t4_authinfo.py - this file is part of S3QL.
+
+Copyright © 2019 Nikolaus Rath <Nikolaus@rath.org>
+
+This work can be distributed under the terms of the GNU GPLv3.
+'''
+
+if __name__ == '__main__':
+    import pytest
+    import sys
+    sys.exit(pytest.main([__file__] + sys.argv[1:]))
+
+from argparse import Namespace
+import tempfile
+import shutil
+import subprocess
+import pytest
+
+
+def test_invalid_option(s3ql_cmd_argv, reg_output):
+    with tempfile.NamedTemporaryFile('wt') as fh:
+        print('[entry1]',
+              'storage-url: local:///foo',
+              'invalid-option: bla',
+              '',
+              file=fh, sep='\n')
+        fh.flush()
+
+        proc = subprocess.Popen(s3ql_cmd_argv('fsck.s3ql') +
+                                [ '--quiet', '--authfile', fh.name,
+                                  '--log', 'none', 'local:///foo/bar' ],
+                                stdin=subprocess.PIPE, universal_newlines=True)
+        proc.stdin.close()
+        assert proc.wait() == 2
+
+        reg_output(r"ERROR: '/com' does not exist", count=1)
+        proc = subprocess.Popen(s3ql_cmd_argv('fsck.s3ql') +
+                                [ '--quiet', '--authfile', fh.name,
+                                  '--log', 'none', 'local:///com' ],
+                                stdin=subprocess.PIPE, universal_newlines=True)
+        proc.stdin.close()
+        assert proc.wait() == 16
+
+
+def test_invalid_backend_option(s3ql_cmd_argv):
+    with tempfile.NamedTemporaryFile('wt') as fh:
+        print('[entry1]',
+              'storage-url: local:///foo',
+              'backend-options: invalid-key',
+              file=fh, sep='\n')
+        fh.flush()
+
+        proc = subprocess.Popen(s3ql_cmd_argv('fsck.s3ql') +
+                                [ '--quiet', '--authfile', fh.name,
+                                  '--log', 'none', 'local:///foo/bar' ],
+                                stdin=subprocess.PIPE, universal_newlines=True)
+        proc.stdin.close()
+        assert proc.wait() == 3
+
+
+def test_option_precedence(s3ql_cmd_argv, reg_output):
+    with tempfile.NamedTemporaryFile('wt') as fh:
+        print('[entry1]',
+              'storage-url: s3://',
+              'backend-options: invalid-key',
+              '',
+              '[entry2]',
+              'storage-url: s3://foo',
+              'backend-options: no-ssl',
+              file=fh, sep='\n')
+        fh.flush()
+
+        reg_output(r"ERROR: Invalid storage URL", count=1)
+        proc = subprocess.Popen(s3ql_cmd_argv('fsck.s3ql') +
+                                [ '--quiet', '--authfile', fh.name,
+                                  '--log', 'none', 's3://foo' ],
+                                stdin=subprocess.PIPE, universal_newlines=True)
+        proc.stdin.close()
+        assert proc.wait() == 2
+
+
+@pytest.fixture()
+def context():
+    ctx = Namespace()
+    ctx.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-')
+    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
+    ctx.storage_url = 'local://' + ctx.backend_dir
+
+    yield ctx
+
+    shutil.rmtree(ctx.cache_dir)
+    shutil.rmtree(ctx.backend_dir)
+
+
+def test_passphrase(context, reg_output, s3ql_cmd_argv):
+    passphrase = 'out3d'
+    proc = subprocess.Popen(s3ql_cmd_argv('mkfs.s3ql') +
+                            ['-L', 'test fs', '--max-obj-size', '500',
+                             '--authfile', '/dev/null', '--cachedir', context.cache_dir,
+                             '--quiet', context.storage_url ],
+                            stdin=subprocess.PIPE, universal_newlines=True)
+
+    print(passphrase, file=proc.stdin)
+    print(passphrase, file=proc.stdin)
+    proc.stdin.close()
+    assert proc.wait() == 0
+    reg_output(r'^WARNING: Maximum object sizes less than '
+                    '1 MiB will degrade performance\.$', count=1)
+
+    with tempfile.NamedTemporaryFile('wt') as fh:
+        print('[entry1]',
+              'storage-url: local://',
+              'fs-passphrase: clearly wrong',
+              '',
+              '[entry2]',
+              'storage-url: %s' % context.storage_url,
+              'fs-passphrase: %s' % passphrase,
+              file=fh, sep='\n')
+        fh.flush()
+
+        proc = subprocess.Popen(s3ql_cmd_argv('fsck.s3ql') +
+                                [ '--quiet', '--authfile', fh.name,
+                                  '--cachedir', context.cache_dir, '--log', 'none',
+                                  context.storage_url ],
+                                stdin=subprocess.PIPE, universal_newlines=True)
+        proc.stdin.close()
+        assert proc.wait() == 0
diff -pruN 2.33+dfsg-1/tests/t4_fuse.py 3.0+dfsg-1/tests/t4_fuse.py
--- 2.33+dfsg-1/tests/t4_fuse.py	2018-12-28 13:23:30.000000000 +0000
+++ 3.0+dfsg-1/tests/t4_fuse.py	2019-01-15 20:37:11.000000000 +0000
@@ -30,7 +30,7 @@ from pytest import raises as assert_rais
 # For debugging
 USE_VALGRIND = False
 
-@pytest.mark.usefixtures('s3ql_cmd_argv', 'pass_reg_output')
+@pytest.mark.usefixtures('pass_s3ql_cmd_argv', 'pass_reg_output')
 class TestFuse:
 
     def setup_method(self, method):
