diff -pruN 0.54-1/ChangeLog 0.55-1/ChangeLog
--- 0.54-1/ChangeLog	2022-07-13 17:35:03.000000000 +0000
+++ 0.55-1/ChangeLog	2022-08-01 19:54:37.000000000 +0000
@@ -1,3 +1,135 @@
+Mon Aug  1 15:44:04 2022  Rob Lauer  <rlauer6@comcast.net>
+
+	[0.55 - bucket region]:
+	* requires: latest version of most modules
+	* src/main/perl/lib/Amazon/S3.pm.in
+	- pod tweaks, corrections
+	- don't specify a minimum version of perl
+	(new): set default region to 'us-east-1', again
+	(get_bucket_location): $bucket, not $self
+	(buckets)
+	- verify region option
+	- pass hash of options and region to _send_request
+	(add_bucket)
+	- do not add region constraint if us-east-1
+	- refactored, send region to _send_request_expect_nothing
+	(delete_bucket): likewise refactored
+	(list_bucket): likewise refactored
+	(_make_request): use region() method of signer
+	(_do_http): debug statements, set last_reponse, reset_errors
+	(_do_http_no_redirect): likewise
+	(_send_request_expect_nothing): likewise
+	(_send_request_expect_nothing_probed)
+	- accept hash argument
+	- debug statements
+	- croak if redirect, but no Location
+	(error): new
+	(reset_errors): new
+	(_remember_error): set error
+	* src/main/perl/lib/Amazon/S3/Bucket.pm.in
+	- pod tweaks, corrections
+	(new)
+	- + logger attribute
+	- + verify_region attribute, verify region if true
+	(_uri): remove leading '/'
+	(add_key): correct region if 301 response
+	(upload_multipart_object): debug messages
+	(upload_part_of_multipart_upload): likewise
+	(complete_multipart_upload): likewise
+	(get_key): remove redundant debug message
+	(delete_key): pass region to _send_request_expect_nothing
+	(set_acl): likewise
+	* src/main/perl/t/01-api.t: do not bailout on early tests
+	(error): new
+	(last_response): new
+	* src/main/perl/t/03-region.t: default region is us-east-1
+
+Fri Jul 22 14:47:30 2022  Rob Lauer  <rlauer6@comcast.net>
+
+	[0.55 - testing, revert to XML::Simple]:
+	* src/main/perl/t/01-api.t: remove /r option in regex
+	* src/main/perl/t/04-list-buckets: likewise
+	* src/main/perl/lib/Amazon/S3.pm: use XML::Simple
+	* src/main/perl/lib/Amazon/S3/Bucket.pm.in: likewise
+	(make_xml_document_simple): new
+	* src/main/perl/t/06-list-multipart-uploads.t: XML::Simple
+	* configure.ac: remove Lib::XML, Lib::XML::Simple, add XML::Simple
+	* cpan/requires: likewise
+	* TODO.md: new
+
+Thu Jul 21 11:14:16 2022  Rob Lauer  <rlauer6@comcast.net>
+
+	[0.55 - CI/CD]:
+	* .github/workflows/build.yml: remove make cpan
+	* README.md: generated
+	* src/main/perl/lib/Amazon/S3.pm.in: update badge
+
+Thu Jul 21 10:53:03 2022  Rob Lauer  <rlauer6@comcast.net>
+
+	[0.55 - CI/CD]:
+	* .github/workflows/build.yml
+	* README.md: generated
+	* configure.ac: typo, IO::Scalar
+	* cpan/requires
+	- IO::Scalar, JSON:PP, Pod::Markdown
+	* src/main/perl/lib/Amazon/S3.pm.in: add badge
+	* NEWS.md: update
+
+Mon Jul 18 16:27:41 2022  Rob Lauer  <rlauer6@comcast.net>
+
+	[0.55 - regional buckets]:
+	* NEWS.md: new
+	* src/main/perl/lib/Amazon/S3/Constants.pm.in
+	- + $MIN_MULTIPART_UPLOAD_CHUNK_SIZE
+	* src/main/perl/lib/Amazon/S3.pm.in
+	- document Signature V4 changes/implications
+	- use new Amazon::S3::Signature::V4 object
+	(_make_request): accept hash ref as argument
+	(get_bucket_location): new
+	(reset_signer_region): new
+	* src/main/perl/lib/Amazon/S3/Bucket.pm.in
+	- document multipart methods
+	- send region in all _make_request calls
+	(_send_request): check if arg is a request
+	(new)
+	- accept region argument
+	- set bucket region if region not passed
+	(upload_multipart_object): new
+	* src/main/perl/lib/Amazon/S3/Signature/V4: new
+	* src/main/perl/lib/Makefile.am: add above to build
+	* src/main/perl/t/05-multpart-upload.t: new
+	* src/main/perl/t/06-list-multpart-upload.t: new
+
+Thu Jul 14 06:34:56 2022  Rob Lauer  <rlauer6@comcast.net>>
+
+	[0.55 - use XML::LibXML]:
+	* VERSION: bump
+	* src/main/perl/lib/Amazon/S3.pm.in: use XML::LibXML, not XML::Simple
+	- perlcritic cleanups
+	- pod cleanup
+	(new)
+	- cache_signer
+	- encrypt credentials
+	(get_default_region): new
+	(get_aws_access_key_id): new
+	(get_aws_secret_access_key): new
+	(get_token): new
+	(_decrypt): new
+	(_encrypt): new
+	(signer)
+	- accesses _signer now
+	- set default region to caller's value or default
+	(buckets): set region to us-east-1 temporarily
+	(debug): new convenience method for level => 'debug'
+	(_make_request): allow disabling of domain buckets
+	* src/main/perl/lib/Amazon/S3/Bucket.pm.in: comment tweak
+	* src/main/perl/lib/Amazon/S3/Constant.pm.in: $DOT
+	* src/main/perl/t/01-api.t: set $dns_bucket_names to true?
+	* cpan/test-requires: +Test::Output
+	* cpan/requires: -Test::Output
+	* configure.ac
+	- ads_PERL_MODULE XML::LibXML::Simple, XML::LibXML, Test::Output
+
 Wed Jul 13 13:09:04 2022  Rob Lauer  <rlauer6@comcast.net>
 
 	[0.54 - merge timmullin changes]:
diff -pruN 0.54-1/debian/changelog 0.55-1/debian/changelog
--- 0.54-1/debian/changelog	2022-07-22 10:14:45.000000000 +0000
+++ 0.55-1/debian/changelog	2022-08-03 19:23:19.000000000 +0000
@@ -1,3 +1,12 @@
+libamazon-s3-perl (0.55-1) unstable; urgency=medium
+
+  * Team upload.
+  * Import upstream version 0.55.
+  * Update test and runtime dependencies.
+  * Refresh spellings.patch.
+
+ -- gregor herrmann <gregoa@debian.org>  Wed, 03 Aug 2022 21:23:19 +0200
+
 libamazon-s3-perl (0.54-1) unstable; urgency=medium
 
   * Team upload.
diff -pruN 0.54-1/debian/control 0.55-1/debian/control
--- 0.54-1/debian/control	2022-07-22 10:14:45.000000000 +0000
+++ 0.55-1/debian/control	2022-08-03 19:23:19.000000000 +0000
@@ -10,6 +10,8 @@ Build-Depends-Indep: libclass-accessor-p
                      libdigest-md5-file-perl <!nocheck>,
                      libfile-sharedir-install-perl,
                      libhttp-date-perl <!nocheck>,
+                     libio-stringy-perl <!nocheck>,
+                     liblwp-protocol-https-perl <!nocheck>,
                      liblwp-useragent-determined-perl <!nocheck>,
                      libnet-amazon-signature-v4-perl <!nocheck>,
                      libreadonly-perl <!nocheck>,
@@ -17,8 +19,8 @@ Build-Depends-Indep: libclass-accessor-p
                      libtest-output-perl (>= 1.033) <!nocheck>,
                      libtest-simple-perl (>= 1.302190) <!nocheck>,
                      liburi-perl (>= 5.10) <!nocheck>,
+                     libwww-perl <!nocheck>,
                      libxml-simple-perl <!nocheck>,
-                     libxml-libxml-perl <!nocheck>,
                      perl
 Standards-Version: 4.6.1
 Vcs-Browser: https://salsa.debian.org/perl-team/modules/packages/libamazon-s3-perl
@@ -34,14 +36,15 @@ Depends: ${misc:Depends},
          libdigest-hmac-perl (>= 1.04),
          libdigest-md5-file-perl,
          libhttp-date-perl,
+         libio-stringy-perl,
+         liblwp-protocol-https-perl,
          liblwp-useragent-determined-perl,
          libnet-amazon-signature-v4-perl,
          libreadonly-perl,
          libscalar-list-utils-perl,
-         libtest-output-perl (>= 1.033),
          liburi-perl (>= 5.10),
-         libxml-simple-perl,
-         libxml-libxml-perl
+         libwww-perl,
+         libxml-simple-perl
 Description: portable client interface to Amazon Simple Storage System (S3)
  Amazon::S3 provides a portable client interface to Amazon Simple Storage
  System (S3). It is a fork of Net::Amazon::S3 (packaged for Debian as
diff -pruN 0.54-1/debian/patches/spellings.patch 0.55-1/debian/patches/spellings.patch
--- 0.54-1/debian/patches/spellings.patch	2022-07-22 10:14:45.000000000 +0000
+++ 0.55-1/debian/patches/spellings.patch	2022-08-03 19:23:19.000000000 +0000
@@ -3,24 +3,24 @@ Bug: https://rt.cpan.org/Public/Bug/Disp
 Forwarded: https://rt.cpan.org/Public/Bug/Display.html?id=119229
 Author: Christopher Hoskin <mans0954@debian.org>
 Reviewed-by: gregor herrmann <gregoa@debian.org>
-Last-Update: 2022-07-16
+Last-Update: 2022-08-03
 
 --- a/lib/Amazon/S3.pm
 +++ b/lib/Amazon/S3.pm
-@@ -1374,7 +1374,7 @@
+@@ -1850,7 +1850,7 @@
  
  =item acl_short (optional)
  
 -See the set_acl subroutine for documenation on the acl_short options
 +See the set_acl subroutine for documentation on the acl_short options
  
- =back
+ =item location_constraint
  
 --- a/lib/Amazon/S3/Bucket.pm
 +++ b/lib/Amazon/S3/Bucket.pm
-@@ -740,9 +740,9 @@
- library would send and add headers that are not typically
- required for S3 interactions.
+@@ -1080,9 +1080,9 @@
+ 
+ =item acl_short (optional)
  
 -In addition to additional and overriden HTTP headers, this
 +In addition to additional and overridden HTTP headers, this
diff -pruN 0.54-1/lib/Amazon/S3/Bucket.pm 0.55-1/lib/Amazon/S3/Bucket.pm
--- 0.54-1/lib/Amazon/S3/Bucket.pm	2022-07-13 17:35:03.000000000 +0000
+++ 0.55-1/lib/Amazon/S3/Bucket.pm	2022-08-01 19:54:37.000000000 +0000
@@ -12,15 +12,17 @@ use Digest::MD5::File qw(file_md5 file_m
 use English qw{-no_match_vars};
 use File::stat;
 use IO::File;
+use IO::Scalar;
 use MIME::Base64;
-use XML::LibXML;
+use Scalar::Util qw{reftype};
 use URI;
 
 use parent qw{Class::Accessor::Fast};
 
-our $VERSION = '0.54'; ## no critic
+our $VERSION = '0.55'; ## no critic
 
-__PACKAGE__->mk_accessors(qw{bucket creation_date account buffer_size});
+__PACKAGE__->mk_accessors(
+  qw{bucket creation_date account buffer_size region logger verify_region });
 
 ########################################################################
 sub new {
@@ -38,6 +40,27 @@ sub new {
   croak 'no account'
     if !$self->account;
 
+  if ( !$self->logger ) {
+    $self->logger( $self->account->get_logger );
+  }
+
+  # now each bucket maintains its own region
+  if ( !$self->region && $self->verify_region ) {
+    my $region;
+
+    if ( !$self->account->err ) {
+      $region = $self->get_location_constraint() // 'us-east-1';
+    }
+
+    $self->logger->debug( sprintf "bucket: %s region: %s\n",
+      $self->bucket, ( $region // $EMPTY ) );
+
+    $self->region($region);
+  }
+  elsif ( !$self->region ) {
+    $self->region( $self->account->region );
+  }
+
   return $self;
 } ## end sub new
 
@@ -46,6 +69,10 @@ sub _uri {
 ########################################################################
   my ( $self, $key ) = @_;
 
+  if ($key) {
+    $key =~ s/^\///xsm;
+  }
+
   my $uri
     = ($key)
     ? $self->bucket . $SLASH . $self->account->_urlencode($key)
@@ -98,20 +125,64 @@ sub add_key {
     $conf->{'Content-MD5'} = $md5_base64;
   } ## end else [ if ( ref $value eq 'SCALAR')]
 
-  # If we're pushing to a bucket that's under DNS flux, we might get a 307
-  # Since LWP doesn't support actually waiting for a 100 Continue response,
-  # we'll just send a HEAD first to see what's going on
-
-  if ( ref $value ) {
-    return $self->account->_send_request_expect_nothing_probed( 'PUT',
-      $self->_uri($key), $conf, $value );
-  } ## end if ( ref $value )
-  else {
-    return $self->account->_send_request_expect_nothing( 'PUT',
-      $self->_uri($key), $conf, $value );
-  } ## end else [ if ( ref $value ) ]
+  # If we're pushing to a bucket that's under
+  # DNS flux, we might get a 307 Since LWP doesn't support actually
+  # waiting for a 100 Continue response, we'll just send a HEAD first
+  # to see what's going on
+  my $retval = eval {
+    return $self->_add_key(
+      { headers => $conf,
+        data    => $value,
+        key     => $key
+      }
+    );
+  };
+
+  # one more try? if someone specified the wrong region, we'll get a
+  # 301 and you'll only know the region of redirection - no location
+  # header provided...
+  if ($EVAL_ERROR) {
+    my $rsp = $self->account->last_response;
+    if ( $rsp->code eq '301' ) {
+      $self->region( $rsp->headers->{'x-amz-bucket-region'} );
+    }
+
+    return $self->_add_key(
+      { headers => $conf,
+        data    => $value,
+        key     => $key
+      }
+    );
+  }
+
 } ## end sub add_key
 
+sub _add_key {
+  my ( $self, @args ) = @_;
+
+  my ( $data, $headers, $key ) = @{ $args[0] }{qw{data headers key}};
+
+  if ( ref $data ) {
+    return $self->account->_send_request_expect_nothing_probed(
+      { method  => 'PUT',
+        path    => $self->_uri($key),
+        headers => $headers,
+        data    => $data,
+        region  => $self->region,
+      }
+    );
+  } ## end if ( ref $value )
+  else {
+    return $self->account->_send_request_expect_nothing(
+      { method  => 'PUT',
+        path    => $self->_uri($key),
+        headers => $headers,
+        data    => $data,
+        region  => $self->region,
+      }
+    );
+  }
+} ## end else [ if ( ref $value ) ]
 ########################################################################
 sub add_key_filename {
 ########################################################################
@@ -120,12 +191,125 @@ sub add_key_filename {
   return $self->add_key( $key, \$value, $conf );
 } ## end sub add_key_filename
 
+########################################################################
+sub upload_multipart_object {
+########################################################################
+  my ( $self, @args ) = @_;
+
+  my $logger = $self->logger;
+
+  my %parameters;
+
+  if ( @args == 1 && reftype( $args[0] ) eq 'HASH' ) {
+    %parameters = %{ $args[0] };
+  }
+  else {
+    %parameters = @args;
+  }
+
+  croak 'no key!'
+    if !$parameters{key};
+
+  croak 'either data, callback or fh must be set!'
+    if !$parameters{data} && !$parameters{callback} && !$parameters{fh};
+
+  croak 'callback must be a reference to a subroutine!'
+    if $parameters{callback} && reftype( $parameters{callback} ) ne 'CODE';
+
+  $parameters{abort_on_error} //= $TRUE;
+  $parameters{chunk_size}     //= $MIN_MULTIPART_UPLOAD_CHUNK_SIZE;
+
+  if ( !$parameters{callback} && !$parameters{fh} ) {
+    #...but really nobody should be passing a >5MB scalar
+    my $data = ref $parameters{data} ? $parameters{data} : \$parameters{data};
+
+    $parameters{fh} = IO::Scalar->new($data);
+  }
+
+  # ...having a file handle implies, we use this callback
+  if ( $parameters{fh} ) {
+    my $fh = $parameters{fh};
+
+    $fh->seek( 0, 2 );
+
+    my $length = $fh->tell;
+    $fh->seek( 0, 0 );
+
+    $logger->trace( sub { return sprintf 'length of object: %s', $length; } );
+
+    croak 'length of the object must be >= '
+      . $MIN_MULTIPART_UPLOAD_CHUNK_SIZE
+      if $length < $MIN_MULTIPART_UPLOAD_CHUNK_SIZE;
+
+    my $chunk_size
+      = ( $parameters{chunk_size} && $parameters{chunk_size} )
+      > $MIN_MULTIPART_UPLOAD_CHUNK_SIZE
+      ? $parameters{chunk_size}
+      : $MIN_MULTIPART_UPLOAD_CHUNK_SIZE;
+
+    $parameters{callback} = sub {
+      return
+        if !$length;
+
+      my $bytes_read = 0;
+
+      my $n = $length >= $chunk_size ? $chunk_size : $length;
+
+      $logger->trace( sprintf 'reading %d bytes', $n );
+
+      my $buffer;
+
+      my $bytes = $fh->read( $buffer, $n, $bytes_read );
+      $logger->trace( sprintf 'read %d bytes', $bytes );
+
+      $bytes_read += $bytes;
+
+      $length -= $bytes;
+
+      $logger->trace( sprintf '%s bytes left to read', $length );
+
+      return ( \$buffer, $bytes );
+    };
+
+  }
+
+  my $headers = $parameters{headers} || {};
+
+  my $id = $self->initiate_multipart_upload( $parameters{key}, $headers );
+
+  $logger->trace( sprintf 'multipart id: %s', $id );
+
+  my $part = 1;
+  my %parts;
+  my $key = $parameters{key};
+
+  eval {
+    while (1) {
+      my ( $buffer, $length ) = $parameters{callback}->();
+      last if !$buffer;
+
+      my $etag = $self->upload_part_of_multipart_upload(
+        { id => $id, key => $key, data => $buffer, part => $part } );
+
+      $parts{ $part++ } = $etag;
+    }
+
+    $self->complete_multipart_upload( $parameters{key}, $id, \%parts );
+  };
+
+  if ( $EVAL_ERROR && $parameters{abort_on_error} ) {
+    $self->abort_multipart_upload( $key, $id );
+    %parts = ();
+  }
+
+  return \%parts;
+}
+
 # Initiates a multipart upload operation. This is necessary for uploading
 # files > 5Gb to Amazon S3
 #
 # returns: upload ID assigned by Amazon (used to identify this
 # particular upload in other operations)
-
 ########################################################################
 sub initiate_multipart_upload {
 ########################################################################
@@ -136,8 +320,14 @@ sub initiate_multipart_upload {
 
   my $acct = $self->account;
 
-  my $request
-    = $acct->_make_request( 'POST', $self->_uri($key) . '?uploads=', $conf );
+  my $request = $acct->_make_request(
+    { region  => $self->region,
+      method  => 'POST',
+      path    => $self->_uri($key) . '?uploads=',
+      headers => $conf
+    }
+  );
+
   my $response = $acct->_do_http($request);
 
   $acct->_croak_if_response_error($response);
@@ -160,7 +350,27 @@ sub upload_part_of_multipart_upload {
 ########################################################################
   my ( $self, @args ) = @_;
 
-  my ( $key, $upload_id, $part_number, $data, $length ) = @args;
+  my ( $key, $upload_id, $part_number, $data, $length );
+
+  if ( @args == 1 ) {
+    if ( reftype( $args[0] ) eq 'HASH' ) {
+      ( $key, $upload_id, $part_number, $data, $length )
+        = @{ $args[0] }{qw{ key id part data length}};
+    }
+    elsif ( reftype( $args[0] ) eq 'ARRAY' ) {
+      ( $key, $upload_id, $part_number, $data, $length ) = @{ $args[0] };
+    }
+  }
+  else {
+    ( $key, $upload_id, $part_number, $data, $length ) = @args;
+  }
+
+  # argh...wish we didn't have to do this!
+  if ( ref $data ) {
+    $data = ${$data};
+  }
+
+  $length = $length || length $data;
 
   croak 'Object key is required'
     if !$key;
@@ -183,9 +393,18 @@ sub upload_part_of_multipart_upload {
   $conf->{'Content-Length'} = $length;
 
   my $params = "?partNumber=${part_number}&uploadId=${upload_id}";
-  my $request
-    = $acct->_make_request( 'PUT', $self->_uri($key) . $params, $conf,
-    $data );
+
+  $self->logger->debug( 'uploading ' . sprintf 'part: %s length: %s',
+    $part_number, length $data );
+
+  my $request = $acct->_make_request(
+    { region  => $self->region,
+      method  => 'PUT',
+      path    => $self->_uri($key) . $params,
+      headers => $conf,
+      data    => $data
+    }
+  );
 
   my $response = $acct->_do_http($request);
 
@@ -202,6 +421,26 @@ sub upload_part_of_multipart_upload {
   return $etag;
 } ## end sub upload_part_of_multipart_upload
 
+########################################################################
+sub make_xml_document_simple {
+########################################################################
+  my ($parts_hr) = @_;
+
+  my $xml = q{<?xml version="1.0" encoding="UTF-8"?>};
+  my $xml_template
+    = '<Part><PartNumber>%s</PartNumber><ETag>%s</ETag></Part>';
+  my @parts;
+
+  foreach my $part_num ( sort { $a <=> $b } keys %{$parts_hr} ) {
+    push @parts, sprintf $xml_template, $part_num, $parts_hr->{$part_num};
+  }
+
+  $xml .= sprintf "\n<CompleteMultipartUpload>%s</CompleteMultipartUpload>\n",
+    join q{}, @parts;
+
+  return $xml;
+}
+
 #
 # Inform Amazon that the multipart upload has been completed
 # You must supply a hash of part Numbers => eTags
@@ -212,6 +451,8 @@ sub complete_multipart_upload {
 ########################################################################
   my ( $self, $key, $upload_id, $parts_hr ) = @_;
 
+  $self->logger->debug( Dumper( [ $key, $upload_id, $parts_hr ] ) );
+
   croak 'Object key is required'
     if !$key;
 
@@ -224,22 +465,12 @@ sub complete_multipart_upload {
   # The complete command requires sending a block of xml containing all
   # the part numbers and their associated etags (returned from the upload)
 
-  #build XML doc
-  my $xml_doc      = XML::LibXML::Document->new( '1.0', 'UTF-8' );
-  my $root_element = $xml_doc->createElement('CompleteMultipartUpload');
-  $xml_doc->addChild($root_element);
+  # build XML doc
 
-  # Add the content
-  foreach my $part_num ( sort { $a <=> $b } keys %{$parts_hr} ) {
+  my $content = make_xml_document_simple($parts_hr);
 
-    # For each part, create a <Part> element with the part number & etag
-    my $part = $xml_doc->createElement('Part');
-    $part->appendTextChild( 'PartNumber' => $part_num );
-    $part->appendTextChild( 'ETag'       => $parts_hr->{$part_num} );
-    $root_element->addChild($part);
-  } ## end foreach my $part_num ( sort...)
+  $self->logger->debug("content: \n$content");
 
-  my $content    = $xml_doc->toString;
   my $md5        = md5($content);
   my $md5_base64 = encode_base64($md5);
   chomp $md5_base64;
@@ -250,13 +481,24 @@ sub complete_multipart_upload {
     'Content-Type'   => 'application/xml'
   };
 
-  my $acct    = $self->account;
-  my $params  = "?uploadId=${upload_id}";
-  my $request = $acct->_make_request( 'POST', $self->_uri($key) . $params,
-    $conf, $content );
+  my $acct   = $self->account;
+  my $params = "?uploadId=${upload_id}";
+
+  my $request = $acct->_make_request(
+    { region  => $self->region,
+      method  => 'POST',
+      path    => $self->_uri($key) . $params,
+      headers => $conf,
+      data    => $content
+    }
+  );
+
   my $response = $acct->_do_http($request);
 
-  $acct->_croak_if_response_error($response);
+  if ( $response->code !~ /\A2\d\d\z/xsm ) {
+    $acct->_remember_errors( $response->content, 1 );
+    croak $response->status_line;
+  }
 
   return $TRUE;
 } ## end sub complete_multipart_upload
@@ -275,9 +517,16 @@ sub abort_multipart_upload {
   croak 'Upload id is required'
     if !$upload_id;
 
-  my $acct    = $self->account;
-  my $params  = "?uploadId=${upload_id}";
-  my $request = $acct->_make_request( 'DELETE', $self->_uri($key) . $params );
+  my $acct   = $self->account;
+  my $params = "?uploadId=${upload_id}";
+
+  my $request = $acct->_make_request(
+    { region => $self->region,
+      method => 'DELETE',
+      path   => $self->_uri($key) . $params
+    }
+  );
+
   my $response = $acct->_do_http($request);
 
   $acct->_croak_if_response_error($response);
@@ -302,8 +551,14 @@ sub list_multipart_upload_parts {
 
   my $acct   = $self->account;
   my $params = "?uploadId=${upload_id}";
-  my $request
-    = $acct->_make_request( 'GET', $self->_uri($key) . $params, $conf );
+
+  my $request = $acct->_make_request(
+    { region  => $self->region,
+      method  => 'GET',
+      path    => $self->_uri($key) . $params,
+      headers => $conf
+    }
+  );
 
   my $response = $acct->_do_http($request);
 
@@ -323,8 +578,14 @@ sub list_multipart_uploads {
   my ( $self, $conf ) = @_;
 
   my $acct = $self->account;
-  my $request
-    = $acct->_make_request( 'GET', $self->_uri() . '?uploads', $conf );
+
+  my $request = $acct->_make_request(
+    { region  => $self->region,
+      method  => 'GET',
+      path    => $self->_uri() . '?uploads',
+      headers => $conf
+    }
+  );
 
   my $response = $acct->_do_http($request);
 
@@ -357,16 +618,16 @@ sub get_key {
 
   my $uri = $self->_uri($key);
 
-  my $request = $acct->_make_request( $method, $uri, {} );
-
-  my $response = $acct->_do_http( $request, $filename );
-
-  $acct->get_logger->debug(
-    sub {
-      return Dumper( [ $request, $response ] );
+  my $request = $acct->_make_request(
+    { region  => $self->region,
+      method  => $method,
+      path    => $uri,
+      headers => {}
     }
   );
 
+  my $response = $acct->_do_http( $request, $filename );
+
   return if $response->code == 404;
 
   $acct->_croak_if_response_error($response);
@@ -392,7 +653,8 @@ sub get_key {
       ? file_md5_hex($filename)
       : md5_hex( $return->{value} );
 
-    # Some S3-compatible providers return an all-caps MD5 value in the etag so it should be lc'd for comparison.
+    # Some S3-compatible providers return an all-caps MD5 value in the
+    # etag so it should be lc'd for comparison.
     croak "Computed and Response MD5's do not match:  $md5 : $etag"
       if $md5 ne lc $etag;
   } ## end if ( $method eq 'GET' )
@@ -426,8 +688,13 @@ sub delete_key {
   croak 'must specify key'
     if !$key && length $key;
 
-  return $self->account->_send_request_expect_nothing( 'DELETE',
-    $self->_uri($key), {} );
+  return $self->account->_send_request_expect_nothing(
+    { method  => 'DELETE',
+      region  => $self->region,
+      path    => $self->_uri($key),
+      headers => {}
+    }
+  );
 } ## end sub delete_key
 
 ########################################################################
@@ -501,8 +768,13 @@ sub get_acl {
 
   my $acct = $self->account;
 
-  my $request
-    = $acct->_make_request( 'GET', $self->_uri($key) . '?acl=', {} );
+  my $request = $acct->_make_request(
+    { region  => $self->region,
+      method  => 'GET',
+      path    => $self->_uri($key) . '?acl=',
+      headers => {}
+    }
+  );
 
   my $old_redirectable = $acct->ua->requests_redirectable;
   $acct->ua->requests_redirectable( [] );
@@ -516,7 +788,13 @@ sub get_acl {
     my $old_host = $acct->host;
     $acct->host( $uri->host );
 
-    my $request = $acct->_make_request( 'GET', $uri->path, {} );
+    my $request = $acct->_make_request(
+      { region  => $self->region,
+        method  => 'GET',
+        path    => $uri->path,
+        headers => {}
+      }
+    );
 
     $response = $acct->_do_http($request);
 
@@ -553,8 +831,14 @@ sub set_acl {
 
   my $xml = $conf->{acl_xml} || $EMPTY;
 
-  return $self->account->_send_request_expect_nothing( 'PUT', $path,
-    $hash_ref, $xml );
+  return $self->account->_send_request_expect_nothing(
+    { method  => 'PUT',
+      path    => $path,
+      headers => $hash_ref,
+      data    => $xml,
+      region  => $self->region
+    }
+  );
 } ## end sub set_acl
 
 ########################################################################
@@ -562,8 +846,12 @@ sub get_location_constraint {
 ########################################################################
   my ($self) = @_;
 
-  my $xpc
-    = $self->account->_send_request( 'GET', $self->bucket . '/?location=' );
+  my $xpc = $self->account->_send_request(
+    { region => $self->region,
+      method => 'GET',
+      path   => $self->bucket . '/?location='
+    }
+  );
 
   if ( !$xpc ) {
     $self->account->_remember_errors($xpc);
@@ -583,6 +871,14 @@ sub get_location_constraint {
 # proxy up the err requests
 
 ########################################################################
+sub last_response {
+########################################################################
+  my ($self) = @_;
+
+  return $self->account->last_reponse;
+}
+
+########################################################################
 sub err {
 ########################################################################
   my ($self) = @_;
@@ -599,6 +895,14 @@ sub errstr {
 } ## end sub errstr
 
 ########################################################################
+sub error {
+########################################################################
+  my ($self) = @_;
+
+  return $self->account->error;
+} ## end sub err
+
+########################################################################
 sub _content_sub {
 ########################################################################
   my ( $filename, $buffer_size ) = @_;
@@ -652,6 +956,8 @@ sub _content_sub {
 
 __END__
 
+=pod
+
 =head1 NAME
 
 Amazon::S3::Bucket - A container class for a S3 bucket and its contents.
@@ -695,23 +1001,53 @@ Amazon::S3::Bucket - A container class f
 
 Instaniates a new bucket object. 
 
-Requires a hash containing two arguments:
+Pass a hash or hash reference containing various options:
 
 =over
 
-=item bucket
+=item bucket (required)
 
 The name (identifier) of the bucket.
 
-=item account
+=item account (required)
 
 The L<S3::Amazon> object (representing the S3 account) this
 bucket is associated with.
 
+=item buffer_size
+
+The buffer size used for reading and writing objects to S3.
+
+default: 4K
+
+=item region
+
+If no region is set and C<verify_region> is set to true, the region of
+the bucket will be determined by calling the
+C<get_location_constraint> method.  Note that this will decrease
+performance of the constructor. If you know the region or are
+operating in only 1 region, set the region in the C<account> object
+(C<Amazon::S3>).
+
+=item logger
+
+Sets the logger object (should be an object capable of providing at
+least a C<debug> and C<trace> method for recording log messages. If no
+logger object is passed the C<account> object's logger object will be used.
+
+=item verify_region
+
+Indicates that the bucket's region should be determined by calling the
+C<get_location_constraint> method.
+
+default: false
+
 =back
 
-NOTE: This method does not check if a bucket actually
-exists. It simply instaniates the bucket.
+I<NOTE:> This method does not check if a bucket actually exists unless
+you set C<verify_region> to true. If the bucket does not exist,
+the constructor will set the region to the default region specified by
+the L<Amazon::S3> object (C<account>) that you passed.
 
 Typically a developer will not call this method directly,
 but work through the interface in L<S3::Amazon> that will
@@ -719,27 +1055,31 @@ handle their creation.
 
 =head2 add_key
 
-Takes three positional parameters:
+ add_key( key, value, configuration)
+
+Write a new or existing object to S3.
 
 =over
 
 =item key
 
-A string identifier for the resource in this bucket
+A string identifier for the object being written to the bucket.
 
 =item value
 
-A SCALAR string representing the contents of the resource.
+A SCALAR string representing the contents of the object..
 
 =item configuration
 
 A HASHREF of configuration data for this key. The configuration
-is generally the HTTP headers you want to pass the S3
+is generally the HTTP headers you want to pass to the S3
 service. The client library will add all necessary headers.
 Adding them to the configuration hash will override what the
 library would send and add headers that are not typically
 required for S3 interactions.
 
+=item acl_short (optional)
+
 In addition to additional and overriden HTTP headers, this
 HASHREF can have a C<acl_short> key to set the permissions
 (access) of the resource without a seperate call via
@@ -748,8 +1088,16 @@ documentation in C<add_acl> for the valu
 
 =back
 
-Returns a boolean indicating its success. Check C<err> and
-C<errstr> for error message if this operation fails.
+Returns a boolean indicating the sucess or failure of the call. Check
+C<err> and C<errstr> for error messages if this operation fails. To
+examine the raw output of the response from the API call, use the
+C<last_response()> method.
+
+  my $retval = $bucket->add_key('foo', $content, {});
+
+  if ( !$retval ) {
+    print STDERR Dumper([$bucket->err, $bucket->errstr, $bucket->last_response]);
+  }
 
 =head2 add_key_filename
 
@@ -762,6 +1110,20 @@ be streamed rather then loaded into memo
 Returns a configuration HASH of the given key. If a key does
 not exist in the bucket C<undef> will be returned.
 
+HASH will contain the following members:
+
+=over
+
+=item content_length
+
+=item content_type
+
+=item etag
+
+=item value
+
+=back
+
 =head2 get_key $key_name, [$method]
 
 Takes a key and an optional HTTP method and fetches it from
@@ -824,37 +1186,11 @@ method.
 
 =head2 list_all_v2
 
-List all keys in this bucket without having to worry about
-'marker'. This may make multiple requests to S3 under the
-hood.
+Same as C<list_all> but uses the version 2 API for listing keys.
 
 See L<Amazon::S3/list_bucket_all_v2> for documentation of this
 method.
 
-=head2 abort_multipart_upload
-
-Abort a multipart upload
-
-=head2 complete_multipart_upload
-
-Signal completion of a multipart upload
-
-=head2 initiate_multipart_upload
-
-Initiate a multipart upload
-
-=head2 list_multipart_upload_parts
-
-List all the uploaded parts of a multipart upload
-
-=head2 list_multipart_uploads
-
-List multipart uploads in progress
-
-=head2 upload_part_of_multipart_upload
-
-Upload a portion of a multipart upload
-
 =head2 get_acl
 
 Retrieves the Access Control List (ACL) for the bucket or
@@ -870,7 +1206,9 @@ bucket itself.
 
 =back
 
-=head2 set_acl $conf
+=head2 set_acl
+
+ set_acl(acl)
 
 Retrieves the Access Control List (ACL) for the bucket or
 resource. Requires a HASHREF argument with one of the following keys:
@@ -928,10 +1266,40 @@ Returns a boolean indicating the operati
 
 =head2 get_location_constraint
 
-Returns the location constraint data on a bucket.
+Returns the location constraint (region the bucket resides in) for a
+bucket.
+
+Valid values that may be returned:
+
+ af-south-1
+ ap-east-1
+ ap-northeast-1
+ ap-northeast-2
+ ap-northeast-3
+ ap-south-1
+ ap-southeast-1
+ ap-southeast-2
+ ca-central-1
+ cn-north-1
+ cn-northwest-1
+ EU
+ eu-central-1
+ eu-north-1
+ eu-south-1
+ eu-west-1
+ eu-west-2
+ eu-west-3
+ me-south-1
+ sa-east-1
+ us-east-2
+ us-gov-east-1
+ us-gov-west-1
+ us-west-1
+ us-west-2
 
 For more information on location constraints, refer to the
-Amazon S3 Developer Guide.
+documentation for
+L<GetBucketLocation|https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLocation.html>.
 
 =head2 err
 
@@ -941,6 +1309,165 @@ The S3 error code for the last error the
 
 A human readable error string for the last error the account encountered.
 
+=head2 error
+
+The decoded XML string as a hash object of the last error.
+
+=head2 last_response
+
+Returns the last C<HTTP::Response> to an API call.
+
+=head1 MULTIPART UPLOAD SUPPORT
+
+From Amazon's website:
+
+I<Multipart upload allows you to upload a single object as a set of
+parts. Each part is a contiguous portion of the object's data. You can
+upload these object parts independently and in any order. If
+transmission of any part fails, you can retransmit that part without
+affecting other parts. After all parts of your object are uploaded,
+Amazon S3 assembles these parts and creates the object. In general,
+when your object size reaches 100 MB, you should consider using
+multipart uploads instead of uploading the object in a single
+operation.>
+
+See L<https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html> for more information about multipart uploads.
+
+=over 5
+
+=item * Maximum object size 5TB
+
+=item * Maximum number of parts 10,000
+
+=item * Part numbers 1 to 10,000 (inclusive)
+
+=item * Part size 5MB to 5GB. There is no limit on the last part of your multipart upload.
+
+=item * Maximum nubmer of parts returned for a list parts request - 1000
+
+=item * Maximum number of multipart uploads returned in a list multipart uploads request - 1000
+
+=back
+
+A multipart upload begins by calling
+C<initiate_multipart_upload()>. This will return an identifier that is
+used in subsequent calls.
+
+ my $bucket = $s3->bucket('my-bucket');
+ my $id = $bucket->initiate_multipart_upload('some-big-object');
+
+ my $part_list = {};
+
+ my $part = 1;
+ my $etag = $bucket->upload_part_of_multipart_upload('my-bucket', $id, $part, $data, length $data);
+ $part_list{$part++} = $etag;
+
+ $bucket->complete_multipart_upload('my-bucket', $id, $part_list);
+
+=heads upload_multipart_object
+
+ upload_multipart_object( ... )
+
+Convenience routine C<upload_multipart_object> that encapsulates the
+multipart upload process. Accepts a hash or hash reference of
+arguments. If successful, a reference to a hash that contains the part
+numbers and etags of the uploaded parts.
+
+You can pass a data object, callback routine or a file handle.
+
+=over 5
+
+=item key
+
+Name of the key to create.
+
+=item data
+
+Scalar object that contains the data to write to S3.
+
+=item callback
+
+Optionally provided a callback routine that will be called until you
+pass a buffer with a length of 0. Your callback will receive no
+arguments but should return a tuple consisting of a B<reference> to a
+scalar object that contains the data to write and a scalar that
+represents the length of data. Once you return a zero length buffer
+the multipart process will be completed.
+
+=item fh
+
+File handle of an open file. The file must be greater than the minimum
+chunk size for multipart uploads otherwise the method will throw an
+exception.
+
+=item abort_on_error
+
+Indicates whether the multipart upload should be aborted if an error
+is encountered. Amazon will charge you for the storage of parts that
+have been uploaded unless you abort the upload.
+
+default: true
+
+=back
+
+=head2 abort_multipart_upload
+
+ abort_multipart_upload(key, multpart-upload-id)
+
+Abort a multipart upload
+
+=head2 complete_multipart_upload
+
+ complete_multipart_upload(key, multpart-upload-id, parts)
+
+Signal completion of a multipart upload. C<parts> is a reference to a
+hash of part numbers and etags.
+
+=head2 initiate_multipart_upload
+
+ initiate_multipart_upload(key, headers)
+
+Initiate a multipart upload. Returns an id used in subsequent call to
+C<upload_part_of_multipart_upload()>.
+
+=head2 list_multipart_upload_parts
+
+List all the uploaded parts of a multipart upload
+
+=head2 list_multipart_uploads
+
+List multipart uploads in progress
+
+=head2 upload_part_of_multipart_upload
+
+  upload_part_of_multipart_upload(key, id, part, data, length)
+
+Upload a portion of a multipart upload
+
+=over 5
+
+=item key
+
+Name of the key in the bucket to create.
+
+=item id
+
+The multipart-upload id return in the C<initiate_multipart_upload> call.
+
+=item part
+
+The next part number (part numbers start at 1).
+
+=item data
+
+Scalar or reference to a scalar that contains the data to upload.
+
+=item length (optional)
+
+Length of the data.
+
+=back
+
 =head1 SEE ALSO
 
 L<Amazon::S3>
diff -pruN 0.54-1/lib/Amazon/S3/Constants.pm 0.55-1/lib/Amazon/S3/Constants.pm
--- 0.54-1/lib/Amazon/S3/Constants.pm	2022-07-13 17:35:03.000000000 +0000
+++ 0.55-1/lib/Amazon/S3/Constants.pm	2022-08-01 19:54:37.000000000 +0000
@@ -7,18 +7,20 @@ use parent qw{Exporter};
 
 use Readonly;
 
-our $VERSION = '0.54';
+our $VERSION = '0.55';
 
 # defaults
-Readonly our $AMAZON_HEADER_PREFIX   => 'x-amz-';
-Readonly our $DEFAULT_BUFFER_SIZE    => 4 * 1024;
-Readonly our $DEFAULT_HOST           => 's3.amazonaws.com';
-Readonly our $DEFAULT_TIMEOUT        => 30;
-Readonly our $KEEP_ALIVE_CACHESIZE   => 0;
-Readonly our $METADATA_PREFIX        => 'x-amz-meta-';
-Readonly our $MAX_BUCKET_NAME_LENGTH => 64;
-Readonly our $MIN_BUCKET_NAME_LENGTH => 3;
-Readonly our $DEFAULT_LOG_LEVEL      => 'error';
+Readonly our $AMAZON_HEADER_PREFIX            => 'x-amz-';
+Readonly our $DEFAULT_BUFFER_SIZE             => 4 * 1024;
+Readonly our $DEFAULT_HOST                    => 's3.amazonaws.com';
+Readonly our $DEFAULT_TIMEOUT                 => 30;
+Readonly our $KEEP_ALIVE_CACHESIZE            => 0;
+Readonly our $METADATA_PREFIX                 => 'x-amz-meta-';
+Readonly our $MAX_BUCKET_NAME_LENGTH          => 64;
+Readonly our $MIN_BUCKET_NAME_LENGTH          => 3;
+Readonly our $MIN_MULTIPART_UPLOAD_CHUNK_SIZE => 5 * 1024 * 1024;
+Readonly our $DEFAULT_LOG_LEVEL               => 'error';
+
 Readonly::Hash our %LOG_LEVELS => (
   trace => 5,
   debug => 4,
@@ -35,6 +37,7 @@ Readonly our $FALSE => 0;
 # chars
 Readonly our $COMMA         => q{,};
 Readonly our $COLON         => q{:};
+Readonly our $DOT           => q{.};
 Readonly our $DOUBLE_COLON  => q{::};
 Readonly our $EMPTY         => q{};
 Readonly our $SLASH         => q{/};
@@ -48,6 +51,7 @@ our %EXPORT_TAGS = (
       $AMPERSAND
       $COLON
       $DOUBLE_COLON
+      $DOT
       $COMMA
       $EMPTY
       $EQUAL_SIGN
@@ -73,6 +77,7 @@ our %EXPORT_TAGS = (
       $DEFAULT_HOST
       $MAX_BUCKET_NAME_LENGTH
       $MIN_BUCKET_NAME_LENGTH
+      $MIN_MULTIPART_UPLOAD_CHUNK_SIZE
     }
   ],
 );
diff -pruN 0.54-1/lib/Amazon/S3/Logger.pm 0.55-1/lib/Amazon/S3/Logger.pm
--- 0.54-1/lib/Amazon/S3/Logger.pm	2022-07-13 17:35:04.000000000 +0000
+++ 0.55-1/lib/Amazon/S3/Logger.pm	2022-08-01 19:54:37.000000000 +0000
@@ -10,7 +10,7 @@ use POSIX;
 use Readonly;
 use Scalar::Util qw{ reftype };
 
-our $VERSION = '0.54'; ## no critic (ValuesAndExpressions::RequireInterpolationOfMetachars)
+our $VERSION = '0.55'; ## no critic (ValuesAndExpressions::RequireInterpolationOfMetachars)
 
 Readonly::Hash our %LOG_LEVELS => (
   trace => 5,
diff -pruN 0.54-1/lib/Amazon/S3/Signature/V4.pm 0.55-1/lib/Amazon/S3/Signature/V4.pm
--- 0.54-1/lib/Amazon/S3/Signature/V4.pm	1970-01-01 00:00:00.000000000 +0000
+++ 0.55-1/lib/Amazon/S3/Signature/V4.pm	2022-08-01 19:54:37.000000000 +0000
@@ -0,0 +1,42 @@
+package Amazon::S3::Signature::V4;
+
+use strict;
+use warnings;
+
+use parent qw{Net::Amazon::Signature::V4};
+
+########################################################################
+sub new {
+########################################################################
+  my ( $class, @args ) = @_;
+
+  my %options;
+
+  if ( !ref $args[0] ) {
+    @options{qw{access_key_id secret endpoint service}} = @args;
+  }
+  else {
+    %options = %{ $args[0] };
+  }
+
+  my $region = delete $options{region};
+  $options{endpoint} //= $region;
+
+  my $self = $class->SUPER::new( \%options );
+
+  return $self;
+}
+
+########################################################################
+sub region {
+########################################################################
+  my ( $self, @args ) = @_;
+
+  if (@args) {
+    $self->{endpoint} = $args[0];
+  }
+
+  return $self->{endpoint};
+}
+
+1;
diff -pruN 0.54-1/lib/Amazon/S3.pm 0.55-1/lib/Amazon/S3.pm
--- 0.54-1/lib/Amazon/S3.pm	2022-07-13 17:35:04.000000000 +0000
+++ 0.55-1/lib/Amazon/S3.pm	2022-08-01 19:54:37.000000000 +0000
@@ -3,25 +3,23 @@ package Amazon::S3;
 use strict;
 use warnings;
 
-use 5.010;
-
 use Amazon::S3::Bucket;
 use Amazon::S3::Constants qw{:all};
 use Amazon::S3::Logger;
+use Amazon::S3::Signature::V4;
 
 use Carp;
 use Data::Dumper;
 use Digest::HMAC_SHA1;
+use Digest::MD5 qw{md5_hex};
 use English qw{-no_match_vars};
 use HTTP::Date;
 use LWP::UserAgent::Determined;
-use MIME::Base64 qw(encode_base64 decode_base64);
+use MIME::Base64 qw{encode_base64 decode_base64};
 use Scalar::Util qw{ reftype blessed };
 use List::Util qw{ any };
-use URI::Escape qw(uri_escape_utf8);
-use XML::Simple;
-
-use Net::Amazon::Signature::V4;
+use URI::Escape qw{uri_escape_utf8};
+use XML::Simple qw{XMLin};
 
 use parent qw{Class::Accessor::Fast};
 
@@ -31,11 +29,13 @@ __PACKAGE__->mk_accessors(
     aws_secret_access_key
     token
     buffer_size
+    cache_signer
     credentials
     dns_bucket_names
     digest
     err
     errstr
+    error
     host
     last_request
     last_response
@@ -44,12 +44,13 @@ __PACKAGE__->mk_accessors(
     retry
     _region
     secure
+    _signer
     timeout
     ua
   }
 );
 
-our $VERSION = '0.54'; ## no critic (ValuesAndExpressions::RequireInterpolationOfMetachars)
+our $VERSION = '0.55'; ## no critic (ValuesAndExpressions::RequireInterpolationOfMetachars)
 
 ########################################################################
 sub new {
@@ -62,7 +63,16 @@ sub new {
   $options{secure}           //= $TRUE;
   $options{host}             //= $DEFAULT_HOST;
   $options{dns_bucket_names} //= $TRUE;
+  $options{cache_signer}     //= $FALSE;
+  $options{retry}            //= $FALSE;
+
   $options{_region} = delete $options{region};
+  $options{_signer} = delete $options{signer};
+
+  # convenience for level => 'debug' & for consistency with Amazon::Credentials
+  if ( delete $options{debug} ) {
+    $options{level} = 'debug';
+  }
 
   # save this for later
   my $level = $options{level};
@@ -99,10 +109,6 @@ sub new {
     }
   );
 
-  if ( $self->_region ) {
-    $self->region( $self->_region ); # reset host if necessary
-  } ## end if ( $self->_region )
-
   if ( !$self->credentials ) {
 
     croak 'No aws_access_key_id'
@@ -110,6 +116,11 @@ sub new {
 
     croak 'No aws_secret_access_key'
       if !$self->aws_secret_access_key;
+
+    # encrypt credentials
+    $self->aws_access_key_id( _encrypt( $self->aws_access_key_id ) );
+    $self->aws_secret_access_key( _encrypt( $self->aws_secret_access_key ) );
+    $self->token( _encrypt( $self->token ) );
   } ## end if ( !$self->credentials)
 
   my $ua;
@@ -129,52 +140,179 @@ sub new {
     );
   } ## end else [ if ( $self->retry ) ]
 
-  # The "default" region for Amazon is us-east-1
-  # This is the region to set it to for listing buckets
-  # We don't actually list buckets in our transport, but
-  # it is sometimes useful to list buckets in a test script
-  # For a specific bucket, it is necessary to call adjust_region
-  # to set the region that is appropriate for that bucket
-  $self->{'signer'} = Net::Amazon::Signature::V4->new(
-    $self->aws_access_key_id,
-    $self->aws_secret_access_key,
-    'us-east-1', 's3'
-  );
-
   $ua->timeout( $self->timeout );
   $ua->env_proxy;
   $self->ua($ua);
+
+  $self->region( $self->_region // 'us-east-1' );
+
+  if ( !$self->_signer && $self->cache_signer ) {
+    $self->_signer( $self->signer );
+  }
+
   $self->turn_on_special_retry();
 
   return $self;
 } ## end sub new
 
-sub turn_on_special_retry {
-    my $self = shift;
+########################################################################
+{
+  my $encryption_key;
+
+########################################################################
+  sub _encrypt {
+########################################################################
+    my ($text) = @_;
 
-    if ($self->retry) {
+    return $text if !$text;
 
-        # In the field we are seeing issue of Amazon returning with a 400 code
-        # in the case of timeout.  From AWS S3 logs:
-        #  REST.PUT.PART Backups/2017-05-04/<account>.tar.gz "PUT /Backups<path>?partNumber=27&uploadId=<id> -
-        #  HTTP/1.1" 400 RequestTimeout 360 20971520 20478 - "-" "libwww-perl/6.15"
-        my $http_codes_hr = $self->ua->codes_to_determinate();
-        $http_codes_hr->{400} = 1;
+    if ( !defined $encryption_key ) {
+      eval {
+        require Crypt::Blowfish;
+        require Crypt::CBC;
+      };
+
+      if ($EVAL_ERROR) {
+        $encryption_key = $EMPTY;
+      }
+      else {
+        $encryption_key = md5_hex( rand $PID );
+      }
     }
+
+    return $text
+      if !$encryption_key;
+
+    my $cipher = Crypt::CBC->new(
+      -pass        => $encryption_key,
+      -cipher      => 'Crypt::Blowfish',
+      -nodeprecate => $TRUE,
+    );
+
+    return $cipher->encrypt($text);
+  }
+
+########################################################################
+  sub _decrypt {
+########################################################################
+    my ($secret) = @_;
+
+    return $secret
+      if !$secret || !$encryption_key;
+
+    my $cipher = Crypt::CBC->new(
+      -pass   => $encryption_key,
+      -cipher => 'Crypt::Blowfish'
+    );
+
+    return $cipher->decrypt($secret);
+  }
+
 }
 
-sub turn_off_special_retry {
-    my $self = shift;
+########################################################################
+sub get_bucket_location {
+########################################################################
+  my ( $self, $bucket ) = @_;
+
+  my $region;
+
+  if ( !ref $bucket || ref $bucket !~ /Amazon::S3::Bucket/xsm ) {
+    $bucket = Amazon::S3::Bucket->new( bucket => $bucket, account => $self );
+  }
+
+  return $bucket->get_location_constraint // 'us-east-1';
+}
+
+########################################################################
+sub get_default_region {
+########################################################################
+  my ($self) = @_;
+
+  my $region = $ENV{AWS_REGION} || $ENV{AWS_DEFAULT_REGION};
+  return $region
+    if $region;
+
+  my $url
+    = 'http://169.254.169.254/latest/meta-data/placement/availability-zone';
+
+  my $request = HTTP::Request->new( 'GET', $url );
 
-    if ($self->retry) {
+  my $ua = LWP::UserAgent->new;
+  $ua->timeout(0);
 
-        # In the field we are seeing issue of Amazon returning with a 400 code
-        # in the case of timeout.  From AWS S3 logs:
-        #  REST.PUT.PART Backups/2017-05-04/<account>.tar.gz "PUT /Backups<path>?partNumber=27&uploadId=<id> -
-        #  HTTP/1.1" 400 RequestTimeout 360 20971520 20478 - "-" "libwww-perl/6.15"
-        my $http_codes_hr = $self->ua->codes_to_determinate();
-        delete $http_codes_hr->{400};
+  my $response = eval { return $ua->request($request); };
+
+  if ( $response && $response->is_success ) {
+    if ( $response->content =~ /\A([[:lower:]]+[-][[:lower:]]+[-]\d+)/xsm ) {
+      $region = $1;
     }
+  }
+
+  return $region || 'us-east-1';
+}
+
+# Amazon::Credentials compatibility methods
+########################################################################
+sub get_aws_access_key_id {
+########################################################################
+  my ($self) = @_;
+
+  return _decrypt( $self->aws_access_key_id );
+}
+
+########################################################################
+sub get_aws_secret_access_key {
+########################################################################
+  my ($self) = @_;
+
+  return _decrypt( $self->aws_secret_access_key );
+}
+
+########################################################################
+sub get_token {
+########################################################################
+  my ($self) = @_;
+
+  return _decrypt( $self->token );
+}
+
+########################################################################
+sub turn_on_special_retry {
+########################################################################
+  my ($self) = @_;
+
+  if ( $self->retry ) {
+
+    # In the field we are seeing issue of Amazon returning with a 400
+    # code in the case of timeout.  From AWS S3 logs: REST.PUT.PART
+    # Backups/2017-05-04/<account>.tar.gz "PUT
+    # /Backups<path>?partNumber=27&uploadId=<id> - HTTP/1.1" 400
+    # RequestTimeout 360 20971520 20478 - "-" "libwww-perl/6.15"
+    my $http_codes_hr = $self->ua->codes_to_determinate();
+    $http_codes_hr->{400} = 1;
+  }
+
+  return;
+}
+
+########################################################################
+sub turn_off_special_retry {
+########################################################################
+  my ($self) = @_;
+
+  if ( $self->retry ) {
+
+    # In the field we are seeing issue with Amazon returning a 400
+    # code in the case of timeout.  From AWS S3 logs: REST.PUT.PART
+    # Backups/2017-05-04/<account>.tar.gz "PUT
+    # /Backups<path>?partNumber=27&uploadId=<id> - HTTP/1.1" 400
+    # RequestTimeout 360 20971520 20478 - "-" "libwww-perl/6.15"
+    my $http_codes_hr = $self->ua->codes_to_determinate();
+    delete $http_codes_hr->{400};
+  }
+
+  return;
 }
 
 ########################################################################
@@ -204,9 +342,24 @@ sub region {
 ########################################################################
 sub buckets {
 ########################################################################
-  my ($self) = @_;
+  my ( $self, $verify_region ) = @_;
+
+  # The "default" region for Amazon is us-east-1
+  # This is the region to set it to for listing buckets
+  # You may need to reset the signer's endpoint to 'us-east-1'
 
-  my $r = $self->_send_request( 'GET', $EMPTY, {} );
+  # temporarily cache signer
+  my $region = $self->_region;
+
+  $self->reset_signer_region('us-east-1'); # default region for buckets op
+
+  my $r = $self->_send_request(
+    { method  => 'GET',
+      path    => $EMPTY,
+      headers => {},
+      region  => 'us-east-1',
+    }
+  );
 
   return if $self->_remember_errors($r);
 
@@ -229,12 +382,15 @@ sub buckets {
           creation_date => $node->{CreationDate},
           account       => $self,
           buffer_size   => $self->buffer_size,
+          verify_region => $verify_region // $FALSE,
         }
         );
 
     } ## end foreach my $node ( @{$buckets...})
   } ## end if ( ref $r->{Buckets})
 
+  $self->reset_signer_region($region); # restore original region
+
   return {
     owner_id          => $owner_id,
     owner_displayname => $owner_displayname,
@@ -243,44 +399,102 @@ sub buckets {
 } ## end sub buckets
 
 ########################################################################
+sub reset_signer_region {
+########################################################################
+  my ( $self, $region ) = @_;
+
+  # reset signer's region, if the region wasn't us-east-1...note this
+  # is probably not needed anymore since bucket operations now send
+  # the region of the bucket to the signer
+  if ( $self->cache_signer ) {
+    if ( $self->region && $self->region ne 'us-east-1' ) {
+      if ( $self->signer->can('region') ) {
+        $self->signer->region($region);
+      }
+    }
+  }
+  else {
+    $self->region($region);
+  }
+
+  return $self->region;
+}
+
+########################################################################
 sub add_bucket {
 ########################################################################
   my ( $self, $conf ) = @_;
 
+  my $region = $conf->{location_constraint} // $conf->{region}
+    // $self->region;
+
+  if ( $region && $region eq 'us-east-1' ) {
+    undef $region;
+  }
+
   my $bucket = $conf->{bucket};
-  croak 'must specify bucket' if !$bucket;
+
+  croak 'must specify bucket'
+    if !$bucket;
+
+  my %header_ref;
 
   if ( $conf->{acl_short} ) {
     $self->_validate_acl_short( $conf->{acl_short} );
+
+    $header_ref{'x-amz-acl'} = $conf->{acl_short};
   } ## end if ( $conf->{acl_short...})
 
-  my $header_ref
-    = ( $conf->{acl_short} )
-    ? { 'x-amz-acl' => $conf->{acl_short} }
-    : {};
-
-  my $data = $EMPTY;
-
-  if ( defined $conf->{location_constraint} ) {
-    $data = <<"XML";
-<CreateBucketConfiguration><LocationConstraint>$conf->{location_constraint}</LocationConstraint></CreateBucketConfiguration>
+  my $xml = <<'XML';
+<CreateBucketConfiguration>
+  <LocationConstraint>%s</LocationConstraint>
+</CreateBucketConfiguration>
 XML
-  } ## end if ( defined $conf->{location_constraint...})
 
-  return $FALSE
-    if !$self->_send_request_expect_nothing( 'PUT', "$bucket/",
-    $header_ref, $data );
+  my $data = defined $region ? sprintf $xml, $region : $EMPTY;
+
+  my $retval = $self->_send_request_expect_nothing(
+    { method  => 'PUT',
+      path    => "$bucket/",
+      headers => \%header_ref,
+      data    => $data,
+      region  => $region,
+    }
+  );
+
+  my $bucket_obj = $retval ? $self->bucket($bucket) : undef;
 
-  return $self->bucket($bucket);
+  return $bucket_obj;
 } ## end sub add_bucket
 
 ########################################################################
 sub bucket {
 ########################################################################
-  my ( $self, $bucketname ) = @_;
+  my ( $self, @args ) = @_;
+
+  my ( $bucketname, $region, $verify_region );
+
+  if ( ref $args[0] && reftype( $args[0] ) eq 'HASH' ) {
+    ( $bucketname, $region, $verify_region )
+      = @{ $args[0] }{qw{bucket region verify_region}};
+  }
+  else {
+    ( $bucketname, $region ) = @args;
+  }
+
+  # only set to default region if a region wasn't passed or region
+  # verification not requested
+  if ( !$region && !$verify_region ) {
+    $region = $self->region;
+  }
 
   return Amazon::S3::Bucket->new(
-    { bucket => $bucketname, account => $self } );
+    { bucket        => $bucketname,
+      account       => $self,
+      region        => $region,
+      verify_region => $verify_region,
+    }
+  );
 } ## end sub bucket
 
 ########################################################################
@@ -289,19 +503,27 @@ sub delete_bucket {
   my ( $self, $conf ) = @_;
 
   my $bucket;
+  my $region;
 
   if ( eval { return $conf->isa('Amazon::S3::Bucket'); } ) {
     $bucket = $conf->bucket;
+    $region = $conf->region;
   } ## end if ( eval { return $conf...})
   else {
     $bucket = $conf->{bucket};
+    $region = $conf->{region} || $self->get_bucket_location($bucket);
   } ## end else [ if ( eval { return $conf...})]
 
   croak 'must specify bucket'
     if !$bucket;
 
-  return $self->_send_request_expect_nothing( 'DELETE', $bucket . $SLASH,
-    {} );
+  return $self->_send_request_expect_nothing(
+    { method  => 'DELETE',
+      path    => $bucket . $SLASH,
+      headers => {},
+      region  => $region,
+    }
+  );
 } ## end sub delete_bucket
 
 ########################################################################
@@ -333,7 +555,13 @@ sub list_bucket {
       keys %{$conf};
   } ## end if ( %{$conf} )
 
-  my $r = $self->_send_request( 'GET', $path, {} );
+  my $r = $self->_send_request(
+    { method  => 'GET',
+      path    => $path,
+      headers => {},
+      region  => $self->region
+    }
+  );
 
   return if $self->_remember_errors($r);
 
@@ -431,7 +659,8 @@ sub list_bucket_all {
     if !$bucket;
 
   my $response = $self->list_bucket($conf);
-  croak 'The server has stopped responding' unless $response;
+  croak 'The server has stopped responding'
+    if !$response;
 
   return $response
     if !$response->{is_truncated};
@@ -446,7 +675,8 @@ sub list_bucket_all {
     $conf->{bucket} = $bucket;
 
     $response = $self->list_bucket($conf);
-    croak 'The server has stopped responding' unless $response;
+    croak 'The server has stopped responding'
+      if !$response;
 
     push @{ $all->{keys} }, @{ $response->{keys} };
 
@@ -506,6 +736,32 @@ sub level {
 } ## end sub level
 
 ########################################################################
+sub signer {
+########################################################################
+  my ($self) = @_;
+
+  return $self->_signer
+    if $self->_signer;
+
+  my $creds = $self->credentials ? $self->credentials : $self;
+
+  my $signer = Amazon::S3::Signature::V4->new(
+    { access_key_id => $creds->get_aws_access_key_id,
+      secret        => $creds->get_aws_secret_access_key,
+      region        => $self->region || $self->get_default_region,
+      service       => 's3',
+      $self->get_token ? ( security_token => $creds->get_token ) : (),
+    }
+  );
+
+  if ( $self->cache_signer ) {
+    $self->_signer($signer);
+  }
+
+  return $signer;
+}
+
+########################################################################
 sub _validate_acl_short {
 ########################################################################
   my ( $self, $policy_name ) = @_;
@@ -532,14 +788,15 @@ sub _can_bucket_be_subdomain {
     return $FALSE;
   } ## end if ( length $bucketname...)
 
-  if (length $bucketname < 1) {
+  if ( length $bucketname < $MIN_BUCKET_NAME_LENGTH ) {
     return $FALSE;
   }
 
-  return $FALSE unless $bucketname =~ m{^[a-z][a-z0-9-]*$};
-  return $FALSE unless $bucketname =~ m{[a-z0-9]$};
+  return $FALSE if $bucketname !~ m{\A[[:lower:]][[:lower:]\d-]*\z}xsm;
+  return $FALSE if $bucketname !~ m{[[:lower:]\d]\z}xsm;
+
   return $TRUE;
-} ## end sub _is_dns_bucket
+}
 
 # make the HTTP::Request object
 
@@ -547,8 +804,18 @@ sub _can_bucket_be_subdomain {
 sub _make_request {
 ########################################################################
   my ( $self, @args ) = @_;
+  my ( $method, $path, $headers, $data, $metadata, $region );
 
-  my ( $method, $path, $headers, $data, $metadata ) = @args;
+  if ( ref $args[0] && reftype( $args[0] ) eq 'HASH' ) {
+    ( $method, $path, $headers, $data, $metadata, $region )
+      = @{ $args[0] }{qw{method path headers data metadata region}};
+  }
+  else {
+    ( $method, $path, $headers, $data, $metadata, $region ) = @args;
+  }
+
+  # reset region on every call...every bucket can have it's own region
+  $self->region( $region // $self->_region );
 
   croak 'must specify method'
     if !$method;
@@ -570,7 +837,10 @@ sub _make_request {
 
   $path =~ s/\A\///xsm;
   my $url = "$protocol://$host/$path";
-  if ($path =~ m{^([^/?]+)(.*)} && _can_bucket_be_subdomain($1)) {
+
+  if ( $path =~ m{\A([^/?]+)(.*)}xsm
+    && $self->dns_bucket_names
+    && _can_bucket_be_subdomain($1) ) {
     $url = "$protocol://$1.$host$2";
   }
 
@@ -580,7 +850,9 @@ sub _make_request {
 
   $request->content($data);
 
-  $self->{'signer'}->sign( $request );
+  $self->signer->region($region); # always set regional endpoint for signing
+
+  $self->signer->sign($request);
 
   $self->get_logger->trace( sub { return Dumper( [$request] ); } );
 
@@ -594,18 +866,25 @@ sub _send_request {
 ########################################################################
   my ( $self, @args ) = @_;
 
-  my $request = @args == 1 ? $args[0] : $self->_make_request(@args);
+  my $request;
+
+  if ( @args == 1 && ref $args[0] =~ /HTTP::Request/xsm ) {
+    $request = $args[0];
+  }
+  else {
+    $request = $self->_make_request(@args);
+  }
 
   my $response = $self->_do_http($request);
 
-  $self->get_logger->trace( Dumper( [$response] ) );
+  $self->get_logger->debug( Dumper( [$response] ) );
 
   $self->last_response($response);
 
   my $content = $response->content;
 
-  if ($response->code !~ /^2\d\d$/) {
-    $self->_remember_errors($response->content, 1);
+  if ( $response->code !~ /\A2\d\d\z/xsm ) {
+    $self->_remember_errors( $response->content, 1 );
     return;
   }
 
@@ -617,93 +896,112 @@ sub _send_request {
 } ## end sub _send_request
 
 #
-# This is the necessary to find and region for a specific bucket
+# This is the necessary to find the region for a specific bucket
 # and set the signer object to use that region when signing requests
-#
+########################################################################
 sub adjust_region {
-    my ( $self, $bucket, $called_from_redirect ) = @_;
+########################################################################
+  my ( $self, $bucket, $called_from_redirect ) = @_;
 
-    my $request = HTTP::Request->new('GET', 'https://' . $bucket . '.' . $self->host );
-    $self->{'signer'}->sign( $request );
+  my $request
+    = HTTP::Request->new( 'GET', 'https://' . $bucket . $DOT . $self->host );
+  $self->{'signer'}->sign($request);
+
+  # We have to turn off our special retry since this will deliberately trigger that code
+  $self->turn_off_special_retry();
+
+  # If the bucket name has a period in it, the certificate validation
+  # will fail since it will expect a certificate for a subdomain.
+  # Setting it to verify against the expected host guards against
+  # that while still being secure since we will have verified
+  # the response as coming from the expected server.
+  $self->ua->ssl_opts( SSL_verifycn_name => $self->host );
 
-    # We have to turn off our special retry since this will deliberately trigger that code
-    $self->turn_off_special_retry();
+  my $response = $self->_do_http($request);
 
-    # If the bucket name has a period in it, the certificate validation
-    # will fail since it will expect a certificate for a subdomain.
-    # Setting it to verify against the expected host guards against
-    # that while still being secure since we will have verified
-    # the response as coming from the expected server.
-    $self->ua->ssl_opts( SSL_verifycn_name => $self->host );
+  # Turn this off, since all other requests have the bucket after
+  # the host in the URL, and the host may change depending on the region
+  $self->ua->ssl_opts( SSL_verifycn_name => undef );
 
-    my $response = $self->_do_http($request);
+  $self->turn_on_special_retry();
 
-    # Turn this off, since all other requests have the bucket after
-    # the host in the URL, and the host may change depending on the region
-    $self->ua->ssl_opts( SSL_verifycn_name => undef );
+  # If No error, then nothing to do
+  return 1 if $response->is_success();
 
-    $self->turn_on_special_retry();
+  # If the error is due to the wrong region, then we will get
+  # back a block of XML with the details
+  if ( $response->content_type eq 'application/xml' and $response->content ) {
 
-    # If No error, then nothing to do
-    return 1 if $response->is_success();
+    my $error_hash = $self->_xpc_of_content( $response->content );
 
-    # If the error is due to the wrong region, then we will get
-    # back a block of XML with the details
-    if ( $response->content_type eq 'application/xml' and $response->content ) {
+    if (  $error_hash->{'Code'} eq 'PermanentRedirect'
+      and $error_hash->{'Endpoint'} ) {
 
-        my $error_hash = $self->_xpc_of_content( $response->content );
+      # Don't recurse through multiple redirects
+      return if $called_from_redirect;
 
-        if ( $error_hash->{'Code'} eq 'PermanentRedirect' and $error_hash->{'Endpoint'} ) {
+      # With a permanent redirect error, they are telling us the explicit
+      # host to use.  The endpoint will be in the form of bucket.host
+      my $host = $error_hash->{'Endpoint'};
 
-            # Don't recurse through multiple redirects
-            return if $called_from_redirect;
+      # Remove the bucket name from the front of the host name
+      # All the requests will need to be of the form https://host/bucket
+      $host =~ s/\A$bucket[.]//xsm;
+      $self->host($host);
 
-            # With a permanent redirect error, they are telling us the explicit
-            # host to use.  The endpoint will be in the form of bucket.host
-            my $host = $error_hash->{'Endpoint'};
+      # We will need to call ourselves again in order to trigger the
+      # AuthorizationHeaderMalformed error in order to get the region
+      return $self->adjust_region( $bucket, 1 );
+    }
 
-            # Remove the bucket name from the front of the host name
-            # All the requests will need to be of the form https://host/bucket
-            $host =~ s/^$bucket\.//;
-            $self->host($host);
+    if (  $error_hash->{'Code'} eq 'AuthorizationHeaderMalformed'
+      and $error_hash->{'Region'} ) {
 
-            # We will need to call ourselves again in order to trigger the
-            # AuthorizationHeaderMalformed error in order to get the region
-            return $self->adjust_region( $bucket, 1 );
-        }
+      # Set the signer to use the correct reader evermore
+      $self->{'signer'}{'endpoint'} = $error_hash->{'Region'};
 
-        if ( $error_hash->{'Code'} eq 'AuthorizationHeaderMalformed' and $error_hash->{'Region'} ) {
+      # Only change the host if we haven't been called as a redirect
+      # where an exact host has been given
+      if ( !$called_from_redirect ) {
+        $self->host( 's3-' . $error_hash->{'Region'} . '.amazonaws.com' );
+      }
 
-            # Set the signer to use the correct reader evermore
-            $self->{'signer'}{'endpoint'} = $error_hash->{'Region'};
+      return 1;
+    }
 
-            # Only change the host if we haven't been called as a redirect where an exact host has been given
-            $self->host( 's3-' . $error_hash->{'Region'} . '.amazonaws.com' ) unless $called_from_redirect;
+    if ( $error_hash->{'Code'} eq 'IllegalLocationConstraintException' ) {
 
-            return 1;
-        }
+      # This is hackish; but in this case the region name only appears in the message
+      if ( $error_hash->{'Message'} =~ /The (\S+) location/xsm ) {
+        my $region = $1;
 
-        if ( $error_hash->{'Code'} eq 'IllegalLocationConstraintException' ) {
+        # Correct the region for the signer
+        $self->{'signer'}{'endpoint'} = $region;
 
-            # This is hackish; but in this case the region name only appears in the message
-            if ( $error_hash->{'Message'} =~ /The (\S+) location/ ) {
-                my $region = $1;
+        # Set the proper host for the region
+        $self->host( 's3.' . $region . '.amazonaws.com' );
 
-                # Correct the region for the signer
-                $self->{'signer'}{'endpoint'} = $region;
+        return 1;
+      }
+    }
 
-                # Set the proper host for the region
-                $self->host( 's3.' . $region . '.amazonaws.com' );
+  }
 
-                return 1;
-            }
-        }
+  # Some other error
+  $self->_remember_errors( $response->content, 1 );
+  return;
+}
 
-    }
+########################################################################
+sub reset_errors {
+########################################################################
+  my ($self) = @_;
 
-    # Some other error
-    $self->_remember_errors($response->content, 1);
-    return;
+  $self->err(undef);
+  $self->errstr(undef);
+  $self->error(undef);
+
+  return $self;
 }
 
 ########################################################################
@@ -712,33 +1010,47 @@ sub _do_http {
   my ( $self, $request, $filename ) = @_;
 
   # convenient time to reset any error conditions
-  $self->err(undef);
-  $self->errstr(undef);
-  my $response = $self->ua->request($request, $filename);
+  $self->reset_errors;
+
+  my $response = $self->ua->request( $request, $filename );
 
   # For new buckets at non-standard locations, amazon will sometimes
-  # respond with a temprary redirect.  In this case it is necessary
+  # respond with a temporary redirect.  In this case it is necessary
   # to try again with the new URL
-  if ($response->code =~ /^3/ and defined $response->header('Location')) {
+  if ( $response->code =~ /\A3/xsm and defined $response->header('Location') )
+  {
 
-    # print "Redirecting to:  " . $response->header('Location') . "\n";
-    $request->uri($response->header('Location'));
-    $response = $self->ua->request($request, $filename);
+    $self->get_logger->debug(
+      'Redirecting to:  ' . $response->header('Location') );
+
+    $request->uri( $response->header('Location') );
+    $response = $self->ua->request( $request, $filename );
   }
 
+  $self->get_logger->debug( Dumper( [$response] ) );
+
+  $self->last_response($response);
+
   return $response;
 }
 
 # Call this if handling any temporary redirect issues
 # (Like needing to probe with a HEAD request when file handle are involved)
+
+########################################################################
 sub _do_http_no_redirect {
-  my ($self, $request, $filename) = @_;
+########################################################################
+  my ( $self, $request, $filename ) = @_;
 
   # convenient time to reset any error conditions
-  $self->err(undef);
-  $self->errstr(undef);
+  $self->reset_errors;
 
-  return $self->ua->request( $request, $filename );
+  my $response = $self->ua->request( $request, $filename );
+  $self->get_logger->debug( Dumper( [$response] ) );
+
+  $self->last_response($response);
+
+  return $response;
 } ## end sub _do_http
 
 ########################################################################
@@ -750,10 +1062,6 @@ sub _send_request_expect_nothing {
 
   my $response = $self->_do_http($request);
 
-  $self->get_logger->trace( Dumper( [$response] ) );
-
-  $self->last_response($response);
-
   my $content = $response->content;
 
   return $TRUE
@@ -775,9 +1083,28 @@ sub _send_request_expect_nothing {
 ########################################################################
 sub _send_request_expect_nothing_probed {
 ########################################################################
-  my ( $self, $method, $path, $conf, $value ) = @_;
+  my ( $self, @args ) = @_;
+
+  my ( $method, $path, $conf, $value, $region );
+
+  if ( @args == 1 && ref $args[0] ) {
+    ( $method, $path, $conf, $value, $region )
+      = @{ $args[0] }{qw{method path headers data region}};
+  }
+  else {
+    ( $method, $path, $conf, $value, $region )
+      = @{ $args[0] }{qw{method path headers data region}};
+  }
+
+  $region = $region // $self->region;
+
+  my $request = $self->_make_request(
+    { method => 'HEAD',
+      path   => $path,
+      region => $region
+    }
+  );
 
-  my $request      = $self->_make_request( 'HEAD', $path );
   my $override_uri = undef;
 
   my $old_redirectable = $self->ua->requests_redirectable;
@@ -785,13 +1112,25 @@ sub _send_request_expect_nothing_probed
 
   my $response = $self->_do_http_no_redirect($request);
 
-  $self->get_logger->trace( Dumper( [$response] ) );
+  if ( $response->code =~ /^3/xsm ) {
+    if ( defined $response->header('Location') ) {
+      $override_uri = $response->header('Location');
+    } ## end if ( $response->code =~...)
+    else {
+      $self->_croak_if_response_error($response);
+    }
 
-  if ( $response->code =~ /^3/xsm && defined $response->header('Location') ) {
-    $override_uri = $response->header('Location');
-  } ## end if ( $response->code =~...)
+    $self->get_logger->debug( 'setting override URI to ', $override_uri );
+  }
 
-  $request = $self->_make_request( $method, $path, $conf, $value );
+  $request = $self->_make_request(
+    { method  => $method,
+      path    => $path,
+      headers => $conf,
+      data    => $value,
+      region  => $region
+    }
+  );
 
   if ( defined $override_uri ) {
     $request->uri($override_uri);
@@ -799,8 +1138,6 @@ sub _send_request_expect_nothing_probed
 
   $response = $self->_do_http_no_redirect($request);
 
-  $self->get_logger->trace( Dumper( [$response] ) );
-
   $self->ua->requests_redirectable($old_redirectable);
 
   my $content = $response->content;
@@ -847,8 +1184,8 @@ sub _xpc_of_content {
     );
   };
 
-  if ($@) {
-    confess "Error parsing $src:  $@";
+  if ($EVAL_ERROR) {
+    confess "Error parsing $src:  $EVAL_ERROR";
   }
 
   return $xml_hr;
@@ -873,6 +1210,8 @@ sub _remember_errors {
 
   my $r = ref $src ? $src : $self->_xpc_of_content( $src, $keep_root );
 
+  $self->error($r);
+
   # apparently buckets() does not keep_root
   if ( $r->{Error} ) {
     $r = $r->{Error};
@@ -1054,7 +1393,7 @@ sub _urlencode {
 ########################################################################
   my ( $self, $unencoded ) = @_;
 
-  return uri_escape_utf8($unencoded, '^A-Za-z0-9\-\._~\x2f');
+  return uri_escape_utf8( $unencoded, '^A-Za-z0-9\-\._~\x2f' );
 } ## end sub _urlencode
 
 1;
@@ -1068,6 +1407,12 @@ __END__
 Amazon::S3 - A portable client library for working with and
 managing Amazon S3 buckets and keys.
 
+=begin markdown
+
+![Amazon::S3](https://github.com/rlauer6/perl-amazon-s3/actions/workflows/build.yml/badge.svg?event=push)
+
+=end markdown
+
 =head1 SYNOPSIS
 
   #!/usr/bin/perl
@@ -1172,53 +1517,92 @@ via L<XML::Simple>.
 
 =back
 
-=head1 LIMITATIONS
+=head1 LIMITATIONS AND DIFFERENCES WITH EARLIER VERSIONS
 
-As noted this module is no longer a I<drop-in> replacement for
-C<Net::Amazon::S3> and has limitations that may make the use of this
-module in your applications questionable. The list of limitations
-below may not be complete.
+As noted, this module is no longer a I<drop-in> replacement for
+C<Net::Amazon::S3> and has limitations and differences that may make
+the use of this module in your applications questionable.
 
 =over 5
 
+=item MINIMUM PERL
+
+Technically, this module should run on versions 5.10 and above,
+however some of the dependencies may require higher versions of
+C<perl> or install new versions some dependencies that conflict with
+other versions of dependencies...it's a crapshoot when dealing with
+older C<perl> version and CPAN modules.
+
+You may however, be able to build this module by installing older
+versions of those dependencies and take your chances that those older
+versions provide enough working features to support C<Amazon::S3>. It
+is likely they do...and this module has recently been tested on
+version 5.10.0 C<perl> using some older CPAN modules to resolve
+dependency issues.
+
+To build this module on an earlier version of C<perl> you may need to
+downgrade some modules.  In particular I have found this recipe to
+work for building and testing on 5.10.0.
+
+In this order install:
+
+ HTML::HeadParser 2.14
+ LWP 6.13
+ Amazon::S3 0.55
+
+...other versions I<may> work...YMMV.
+
 =item * API Signing
 
 Making calls to AWS APIs requires that the calls be signed.  Amazon
 has added a new signing method (Signature Version 4) to increase
-security around their APIs.  This module continues to use the original
-signing method (Signature Version 2).
+security around their APIs. This module no longer utilizes Signature
+Version V2.
 
 B<New regions after January 30, 2014 will only support Signature Version 4.>
 
-There has been some effort to add support of Signature Version 4
-however several method in this package may need significant
-refactoring and testing in order to support the new sigining method.
+See L</Signature Version V4> below for important details.>
 
 =over 10
 
-=item Signature Version 2
+=item Signature Version 4
 
+L<https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html>
 
-L<https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html>
+I<IMPORTANT NOTE:>
 
-=item Signature Version 4
+Unlike Signature Version 2, Version 4 requires a regional
+parameter. This implies that you need to supply the bucket's region
+when signing requests for any API call that involves a specific
+bucket. Starting with version 0.55 of this module,
+C<Amazon::S3::Bucket> provides a new method (C<region()> and accepts
+in the constructor a C<region> parameter.  If a region is not
+supplied, the region for the bucket will be set to the region set in
+the C<account> object (C<Amazon::S3>) that you passed to the bucket's
+new constructor.  Alternatively, you can request that the bucket's new
+constructor determine the bucket's region for you by calling the
+C<get_location_constraint()> method.
+
+When signing API calls, the region for the specific bucket will be
+used. For calls that are not regional (C<buckets()>, e.g.) the default
+region ('us-east-1') will be used.
 
-L<https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html>
+=item Signature Version 2
+
+L<https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html>
 
 =back
 
 =item * New APIs
 
-This module does not support the myriad of new API method calls
-available for S3 since its original creation.
+This module does not support some of the newer API method calls
+for S3 added after the initial creation of this interface.
 
 =item * Multipart Upload Support
 
-While there are undocumented methods for multipart uploads (used for
-files >5Gb), those methods have not been tested and may not in fact
-work today.
+There is limited testing for multipart uploads.
 
-For more information regarding multipart uploads visit the link below.
+For more information regarding multi-part uploads visit the link below.
 
 L<https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html>
 
@@ -1319,7 +1703,7 @@ it is an Amazon endpoint.
 
 The AWS region you where your bucket is located.
 
-default: no region
+default: us-east-1
 
 =item buffer_size
 
@@ -1329,27 +1713,114 @@ default: 4096
 
 =back
 
-=head2 turn_on_special_retry
+=head2 signer
 
-Called to add extry retry codes if retry has been set
+Sets or retrieves the signer object. API calls must be signed using
+your AWS credentials. By default, starting with version 0.54 the
+module will use L<Net::Amazon::Signature::V4> as the signer and
+instantiate a signer object in the constructor. Note however, that
+signers need your credentials and they I<will> get stored by that
+class, making them susceptible to inadvertant exfiltration. You have a
+few options here:
 
-=head2 turn_off_special_retry
+=over 5
 
-Called to turn off special retry codes when we are deliberately triggering them
+=item 1. Use your own signer.
+
+You may have noticed that you can also provide your own credentials
+object forcing this module to use your object for retrieving
+credentials. Likewise, you can use your own signer so that this
+module's signer never sees or stores those credentials.
 
-=head2 adjust_region
+=item 2. Pass the credentials object and set C<cache_signer> to a
+false value.
 
-Sets the region for the signing object to be appropriate for the bucket
+If you pass a credentials object and set C<cache_signer> to a false
+value, the module will use the credentials object to retrieve
+credentials and create a new signer each time an API call is made that
+requires signing. This prevents your credentials from being stored
+inside of the signer class.
+
+I<Note that using your own credentials object that stores your
+credentials in plaintext is also going to expose your credentials when
+someone dumps the class.>
+
+=item 3. Pass credentials, set C<cache_signer> to a false value.
+
+Unfortunately, while this will prevent L<Net::Amazon::Signature::V4>
+from hanging on to your credentials, you credentials will be stored in
+the L<Amazon::S3> object.
+
+Starting with version 0.55 of this module, if you have installed
+L<Crypt::CBC> and L<Crypt::Blowfish>, your credentials will be
+encrypted using a random key created when the class is
+instantiated. While this is more secure than leaving them in
+plaintext, if the key is discovered (the key however is not stored in
+the object's hash) and the object is dumped, your I<encrypted>
+credentials can be exposed.
+
+=item 4. Use very granular credentials for bucket access only.
+
+Use credentials that only allow access to a bucket or portions of a
+bucket required for your application. This will at least limit the
+I<blast radius> of any potential security breach.
+
+=item 5. Do nothing...send the credentials, use the default signer.
+
+In this case, both the L<Amazon::S3> class and the
+L<Net::Amazon::Signature::V4> have your credentials. Caveat Emptor.
+
+See Also L<Amazon::Credentials> for more information about safely
+storing your credentials and preventing exfiltration.
+
+=back
+
+=head2 region
+
+Sets the region for the for the API calls. This will also be the
+default when instantiating the bucket object unless you pass the
+region parameter in the C<bucket> method or use the C<verify_region>
+flag that will I<always> verify the region of the bucket using the
+C<get_location_constraint> method.
+
+default: us-east-1
 
 =head2 buckets
 
-Returns C<undef> on error, else HASHREF of results:
+ buckets([verify-region])
+
+=over
+
+=item verify-region (optional)
+
+C<verify-region> is a boolean value that indicates if the
+bucket's region should be verified when the bucket object is
+instantiated.
+
+If set to true, this method will call the C<bucket> method with
+C<verify_region> set to true causing the constructor to call the
+C<get_location_constraint> for each bucket to set the bucket's
+region. This will cause a significant decrease in the peformance of
+the C<buckets()> method. Setting the region for each bucket is
+necessary since API operations on buckets require the region of the
+bucket when signing API requests. If all of your buckets are in the
+same region and you have passed a region parameter to your S3 object,
+then that region will be used when calling the constructor of your
+bucket objects.
+
+default: false
+
+=back
+
+Returns a HASHREF containging the metadata for all of the buckets
+owned by the accout or (see below) or C<undef> on
+error.
 
 =over
 
 =item owner_id
 
-The owner's ID of the buckets owner.
+The owner ID of the bucket's owner.
 
 =item owner_display_name
 
@@ -1357,38 +1828,61 @@ The name of the owner account.
 
 =item buckets
 
-Any ARRAYREF of L<Amazon::SimpleDB::Bucket> objects for the 
+Any ARRAYREF of L<Amazon::S3::Bucket> objects for the 
 account.
 
 =back
 
-=head2 add_bucket 
+=head2 add_bucket
 
-Takes a HASHREF:
+ add_bucket(bucket-configuration)
+
+C<bucket-configuration> is a reference to a hash with bucket configuration
+parameters.
 
 =over
 
 =item bucket
 
-The name of the bucket you want to add
+The name of the bucket. See L<Bucket name
+rules|https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html>
+for more details on bucket naming rules.
 
 =item acl_short (optional)
 
 See the set_acl subroutine for documenation on the acl_short options
 
+=item location_constraint
+
+=item region
+
+The region the bucket is to be created in.
+
 =back
 
-Returns 0 on failure or a L<Amazon::S3::Bucket> object on success
+Returns a L<Amazon::S3::Bucket> object on success or C<undef> on failure.
+
+=head2 bucket
+
+ bucket(bucket, [region])
 
-=head2 bucket BUCKET
+ bucket({ bucket => bucket-name, verify_region => boolean, region => region });
 
-Takes a scalar argument, the name of the bucket you're creating
+Takes a scalar argument or refernce to a hash of arguments.
 
-Returns an (unverified) bucket object from an account. This method does not access the network.
+You can pass the region or set C<verify_region> indicating that
+you want the bucket constructor to detemine the bucket region.
+
+If you do not pass the region or set the C<verify_region> value, the
+region will be set to the default region set in your C<Amazon::S3>
+object.
+
+See L<Amazon::S3::Bucket> for a complete description of the C<bucket>
+method.
 
 =head2 delete_bucket
 
-Takes either a L<Amazon::S3::Bucket> object or a HASHREF containing 
+Takes either a L<Amazon::S3::Bucket> object or a HASHREF containing:
 
 =over
 
@@ -1396,11 +1890,31 @@ Takes either a L<Amazon::S3::Bucket> obj
 
 The name of the bucket to remove
 
+=item region
+
+Region the bucket is located in. If not provided, the method will
+determine the bucket's region by calling C<get_bucket_location>.
+
 =back
 
-Returns false (and fails) if the bucket isn't empty.
+Returns a boolean indicating the success of failure of the API
+call. Check C<err> or C<errstr> for error messages.
 
-Returns true if the bucket is successfully deleted.
+Note from the L<Amazon's documentation|https://docs.aws.amazon.com/AmazonS3/latest/userguide/BucketRestrictions.html>
+
+=over 10
+
+If a bucket is empty, you can delete it. After a bucket is deleted,
+the name becomes available for reuse. However, after you delete the
+bucket, you might not be able to reuse the name for various reasons.
+
+For example, when you delete the bucket and the name becomes available
+for reuse, another AWS account might create a bucket with that
+name. In addition, B<some time might pass before you can reuse the name
+of a deleted bucket>. If you want to use the same bucket name, we
+recommend that you don't delete the bucket.
+
+=back
 
 =head2 dns_bucket_names
 
@@ -1528,6 +2042,26 @@ Each key is a HASHREF that looks like th
         owner_displayname => $owner_name
     }
 
+=head2 get_bucket_location
+
+ get_bucket_location(bucket-name)
+ get_bucket_locaiton(bucket-obj)
+
+This is a convenience routines for the C<get_location_constraint()> of
+the bucket object.  This method will, however return the default
+region of 'us-east-1' when C<get_location_constraint()> returns a null
+value.
+
+ my $region = $s3->get_bucket_location('my-bucket');
+
+Starting with version 0.55, C<Amazon::S3::Bucket> will call this
+C<get_location_constraint()> to determine the region for the
+bucket. You can get the region for the bucket by using the C<region()>
+method of the bucket object.
+
+  my $bucket = $s3->bucket('my-bucket');
+  my $bucket_region = $bucket->region;
+
 =head2 get_logger
 
 Returns the logger object. If you did not set a logger when you
@@ -1548,6 +2082,18 @@ Takes the same arguments as list_bucket.
 
 I<You are encouraged to use the newer C<list_bucket_all_v2> method.>
 
+=head2 err
+
+The S3 error code for the last error encountered.
+
+=head2 errstr
+
+A human readable error string for the last error encountered.
+
+=head2 error
+
+The decoded XML string as a hash object of the last error.
+
 =head2 last_response
 
 Returns the last L<HTTP::Response> object.
@@ -1562,6 +2108,14 @@ Set the logging level.
 
 default: error
 
+=head2 turn_on_special_retry
+
+Called to add extra retry codes if retry has been set
+
+=head2 turn_off_special_retry
+
+Called to turn off special retry codes when we are deliberately triggering them
+
 =head1 ABOUT
 
 This module contains code modified from Amazon that contains the
diff -pruN 0.54-1/Makefile.PL 0.55-1/Makefile.PL
--- 0.54-1/Makefile.PL	2022-07-13 17:35:04.000000000 +0000
+++ 0.55-1/Makefile.PL	2022-08-01 19:54:38.000000000 +0000
@@ -1,4 +1,4 @@
-# autogenerated by /usr/local/libexec/make-cpan-dist.pl on Wed Jul 13 13:35:04 2022
+# autogenerated by /usr/local/libexec/make-cpan-dist.pl on Mon Aug  1 15:54:37 2022
 
 use strict;
 use warnings;
@@ -11,58 +11,68 @@ if ( -d 'share' ) {
 }
 
 WriteMakefile(
-  NAME             => 'Amazon::S3',
-  MIN_PERL_VERSION => 5.010,
-  AUTHOR           => 'Rob Lauer <rlauer6@comcat.net>',
-  VERSION_FROM     => 'lib/Amazon/S3.pm',
-  ABSTRACT         => 'Perl interface to AWS S3 API',
-  LICENSE          => 'perl',
-  PL_FILES         => {},
-  EXE_FILES        => [],
-  PREREQ_PM        => {
-    'Class::Accessor::Fast'      => '0.51',
-    'Digest::HMAC_SHA1'          => '1.04',
-    'Digest::MD5::File'          => '0.08',
-    'HTTP::Date'                 => '6.02',
-    'LWP::UserAgent::Determined' => '1.07',
+  NAME => 'Amazon::S3',
+
+  AUTHOR       => 'Rob Lauer <rlauer6@comcat.net>',
+  VERSION_FROM => 'lib/Amazon/S3.pm',
+  ABSTRACT     => 'Perl interface to AWS S3 API',
+  LICENSE      => 'perl',
+  PL_FILES     => {},
+  EXE_FILES    => [],
+  PREREQ_PM    => {
+    'Class::Accessor::Fast'      => '0',
+    'Digest::HMAC_SHA1'          => '0',
+    'Digest::MD5::File'          => '0',
+    'HTTP::Date'                 => '0',
+    'IO::Scalar'                 => '0',
+    'JSON:PP'                    => '0',
+    'LWP'                        => '0',
+    'LWP::Protocol::https'       => '0',
+    'LWP::UserAgent::Determined' => '0',
     'List::Util'                 => '1.5',
     'Net::Amazon::Signature::V4' => '0',
-    'Readonly'                   => '2.05',
-    'Test::Output'               => '1.033',
-    'URI'                        => '5.10',
-    'URI::Escape'                => '5.10',
-    'XML::LibXML'                => '0'
+    'Net::HTTP'                  => '0',
+    'Pod::Markdown'              => '0',
+    'Readonly'                   => '0',
+    'URI'                        => '0',
+    'URI::Escape'                => '0',
+    'XML::Simple'                => '0'
   },
   BUILD_REQUIRES => {
-    'ExtUtils::MakeMaker'     => 0,
+    'ExtUtils::MakeMaker'     => '6.64',
     'File::ShareDir::Install' => 0,
   },
   CONFIGURE_REQUIRES => {
-    'ExtUtils::MakeMaker'     => 0,
+    'ExtUtils::MakeMaker'     => '6.64',
     'File::ShareDir::Install' => 0,
   },
   TEST_REQUIRES => {
     'Digest::MD5::File' => '0.08',
-    'Test::More'        => '1.302190'
+    'Test::More'        => '1.302190',
+    'Test::Output'      => '1.033'
   },
   META_MERGE => {
     'meta-spec' => { 'version' => 2 },
     'provides'  => {
       'Amazon::S3' => {
         'file'    => 'lib/Amazon/S3.pm',
-        'version' => '0.54'
+        'version' => '0.55'
       },
       'Amazon::S3::Bucket' => {
         'file'    => 'lib/Amazon/S3/Bucket.pm',
-        'version' => '0.54'
+        'version' => '0.55'
       },
       'Amazon::S3::Constants' => {
         'file'    => 'lib/Amazon/S3/Constants.pm',
-        'version' => '0.54'
+        'version' => '0.55'
       },
       'Amazon::S3::Logger' => {
         'file'    => 'lib/Amazon/S3/Logger.pm',
-        'version' => '0.54'
+        'version' => '0.55'
+      },
+      'Amazon::S3::Signature::V4' => {
+        'file'    => 'lib/Amazon/S3/Signature/V4.pm',
+        'version' => 'undef'
       }
     },
     'resources' => {
diff -pruN 0.54-1/MANIFEST 0.55-1/MANIFEST
--- 0.54-1/MANIFEST	2022-07-13 17:35:05.000000000 +0000
+++ 0.55-1/MANIFEST	2022-08-01 19:54:38.000000000 +0000
@@ -3,6 +3,7 @@ lib/Amazon/S3.pm
 lib/Amazon/S3/Bucket.pm
 lib/Amazon/S3/Constants.pm
 lib/Amazon/S3/Logger.pm
+lib/Amazon/S3/Signature/V4.pm
 Makefile.PL
 MANIFEST			This list of files
 README-TESTING.md
@@ -11,5 +12,7 @@ t/01-api.t
 t/02-logger.t
 t/03-region.t
 t/04-list-buckets.t
+t/05-multipart-upload.t
+t/06-list-multipart-uploads.t
 META.yml                                 Module YAML meta-data (added by MakeMaker)
 META.json                                Module JSON meta-data (added by MakeMaker)
diff -pruN 0.54-1/META.json 0.55-1/META.json
--- 0.54-1/META.json	2022-07-13 17:35:05.000000000 +0000
+++ 0.55-1/META.json	2022-08-01 19:54:38.000000000 +0000
@@ -22,56 +22,64 @@
    "prereqs" : {
       "build" : {
          "requires" : {
-            "ExtUtils::MakeMaker" : "0",
+            "ExtUtils::MakeMaker" : "6.64",
             "File::ShareDir::Install" : "0"
          }
       },
       "configure" : {
          "requires" : {
-            "ExtUtils::MakeMaker" : "0",
+            "ExtUtils::MakeMaker" : "6.64",
             "File::ShareDir::Install" : "0"
          }
       },
       "runtime" : {
          "requires" : {
-            "Class::Accessor::Fast" : "0.51",
-            "Digest::HMAC_SHA1" : "1.04",
-            "Digest::MD5::File" : "0.08",
-            "HTTP::Date" : "6.02",
-            "LWP::UserAgent::Determined" : "1.07",
+            "Class::Accessor::Fast" : "0",
+            "Digest::HMAC_SHA1" : "0",
+            "Digest::MD5::File" : "0",
+            "HTTP::Date" : "0",
+            "IO::Scalar" : "0",
+            "LWP" : "0",
+            "LWP::Protocol::https" : "0",
+            "LWP::UserAgent::Determined" : "0",
             "List::Util" : "1.5",
             "Net::Amazon::Signature::V4" : "0",
-            "Readonly" : "2.05",
-            "Test::Output" : "1.033",
-            "URI" : "5.10",
-            "URI::Escape" : "5.10",
-            "XML::LibXML" : "0",
-            "perl" : "5.01"
+            "Net::HTTP" : "0",
+            "Pod::Markdown" : "0",
+            "Readonly" : "0",
+            "URI" : "0",
+            "URI::Escape" : "0",
+            "XML::Simple" : "0"
          }
       },
       "test" : {
          "requires" : {
             "Digest::MD5::File" : "0.08",
-            "Test::More" : "1.302190"
+            "Test::More" : "1.302190",
+            "Test::Output" : "1.033"
          }
       }
    },
    "provides" : {
       "Amazon::S3" : {
          "file" : "lib/Amazon/S3.pm",
-         "version" : "0.54"
+         "version" : "0.55"
       },
       "Amazon::S3::Bucket" : {
          "file" : "lib/Amazon/S3/Bucket.pm",
-         "version" : "0.54"
+         "version" : "0.55"
       },
       "Amazon::S3::Constants" : {
          "file" : "lib/Amazon/S3/Constants.pm",
-         "version" : "0.54"
+         "version" : "0.55"
       },
       "Amazon::S3::Logger" : {
          "file" : "lib/Amazon/S3/Logger.pm",
-         "version" : "0.54"
+         "version" : "0.55"
+      },
+      "Amazon::S3::Signature::V4" : {
+         "file" : "lib/Amazon/S3/Signature/V4.pm",
+         "version" : "0"
       }
    },
    "release_status" : "stable",
@@ -87,6 +95,6 @@
          "web" : "http://github.com/rlauer6/perl-amazon-s3"
       }
    },
-   "version" : "0.54",
+   "version" : "0.55",
    "x_serialization_backend" : "JSON::PP version 4.10"
 }
diff -pruN 0.54-1/META.yml 0.55-1/META.yml
--- 0.54-1/META.yml	2022-07-13 17:35:05.000000000 +0000
+++ 0.55-1/META.yml	2022-08-01 19:54:38.000000000 +0000
@@ -4,11 +4,12 @@ author:
   - 'Rob Lauer <rlauer6@comcat.net>'
 build_requires:
   Digest::MD5::File: '0.08'
-  ExtUtils::MakeMaker: '0'
+  ExtUtils::MakeMaker: '6.64'
   File::ShareDir::Install: '0'
   Test::More: '1.302190'
+  Test::Output: '1.033'
 configure_requires:
-  ExtUtils::MakeMaker: '0'
+  ExtUtils::MakeMaker: '6.64'
   File::ShareDir::Install: '0'
 dynamic_config: 1
 generated_by: 'ExtUtils::MakeMaker version 7.44, CPAN::Meta::Converter version 2.150010'
@@ -24,33 +25,39 @@ no_index:
 provides:
   Amazon::S3:
     file: lib/Amazon/S3.pm
-    version: '0.54'
+    version: '0.55'
   Amazon::S3::Bucket:
     file: lib/Amazon/S3/Bucket.pm
-    version: '0.54'
+    version: '0.55'
   Amazon::S3::Constants:
     file: lib/Amazon/S3/Constants.pm
-    version: '0.54'
+    version: '0.55'
   Amazon::S3::Logger:
     file: lib/Amazon/S3/Logger.pm
-    version: '0.54'
+    version: '0.55'
+  Amazon::S3::Signature::V4:
+    file: lib/Amazon/S3/Signature/V4.pm
+    version: '0'
 requires:
-  Class::Accessor::Fast: '0.51'
-  Digest::HMAC_SHA1: '1.04'
-  Digest::MD5::File: '0.08'
-  HTTP::Date: '6.02'
-  LWP::UserAgent::Determined: '1.07'
+  Class::Accessor::Fast: '0'
+  Digest::HMAC_SHA1: '0'
+  Digest::MD5::File: '0'
+  HTTP::Date: '0'
+  IO::Scalar: '0'
+  LWP: '0'
+  LWP::Protocol::https: '0'
+  LWP::UserAgent::Determined: '0'
   List::Util: '1.5'
   Net::Amazon::Signature::V4: '0'
-  Readonly: '2.05'
-  Test::Output: '1.033'
-  URI: '5.10'
-  URI::Escape: '5.10'
-  XML::LibXML: '0'
-  perl: '5.01'
+  Net::HTTP: '0'
+  Pod::Markdown: '0'
+  Readonly: '0'
+  URI: '0'
+  URI::Escape: '0'
+  XML::Simple: '0'
 resources:
   bugtracker: http://github.com/rlauer6/perl-amazon-s3/issues
   homepage: http://github.com/rlauer6/perl-amazon-s3
   repository: git://github.com/rlauer6/perl-amazon-s3.git
-version: '0.54'
+version: '0.55'
 x_serialization_backend: 'CPAN::Meta::YAML version 0.018'
diff -pruN 0.54-1/README.md 0.55-1/README.md
--- 0.54-1/README.md	2022-07-13 17:35:03.000000000 +0000
+++ 0.55-1/README.md	2022-08-01 19:54:37.000000000 +0000
@@ -3,6 +3,8 @@
 Amazon::S3 - A portable client library for working with and
 managing Amazon S3 buckets and keys.
 
+![Amazon::S3](https://github.com/rlauer6/perl-amazon-s3/actions/workflows/build.yml/badge.svg?event=push)
+
 # SYNOPSIS
 
     #!/usr/bin/perl
@@ -103,46 +105,86 @@ dependencies. Below is the original desc
 > this module is forked and then modified to use [XML::SAX](https://metacpan.org/pod/XML%3A%3ASAX)
 > via [XML::Simple](https://metacpan.org/pod/XML%3A%3ASimple).
 
-# LIMITATIONS
+# LIMITATIONS AND DIFFERENCES WITH EARLIER VERSIONS
+
+As noted, this module is no longer a _drop-in_ replacement for
+`Net::Amazon::S3` and has limitations and differences that may make
+the use of this module in your applications questionable.
+
+- MINIMUM PERL
+
+    Technically, this module should run on versions 5.10 and above,
+    however some of the dependencies may require higher versions of
+    `perl` or install new versions some dependencies that conflict with
+    other versions of dependencies...it's a crapshoot when dealing with
+    older `perl` version and CPAN modules.
+
+    You may however, be able to build this module by installing older
+    versions of those dependencies and take your chances that those older
+    versions provide enough working features to support `Amazon::S3`. It
+    is likely they do...and this module has recently been tested on
+    version 5.10.0 `perl` using some older CPAN modules to resolve
+    dependency issues.
 
-As noted this module is no longer a _drop-in_ replacement for
-`Net::Amazon::S3` and has limitations that may make the use of this
-module in your applications questionable. The list of limitations
-below may not be complete.
+    To build this module on an earlier version of `perl` you may need to
+    downgrade some modules.  In particular I have found this recipe to
+    work for building and testing on 5.10.0.
 
-- API Signing
+    In this order install:
+
+        HTML::HeadParser 2.14
+        LWP 6.13
+        Amazon::S3 0.55
+
+    ...other versions _may_ work...YMMV.
+
+- \* API Signing
 
     Making calls to AWS APIs requires that the calls be signed.  Amazon
     has added a new signing method (Signature Version 4) to increase
-    security around their APIs.  This module continues to use the original
-    signing method (Signature Version 2).
+    security around their APIs. This module no longer utilizes Signature
+    Version V2.
 
     **New regions after January 30, 2014 will only support Signature Version 4.**
 
-    There has been some effort to add support of Signature Version 4
-    however several method in this package may need significant
-    refactoring and testing in order to support the new sigining method.
-
-    - Signature Version 2
-
-        [https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html)
+    See ["Signature Version V4"](#signature-version-v4) below for important details.>
 
     - Signature Version 4
 
         [https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
 
-- New APIs
+        _IMPORTANT NOTE:_
 
-    This module does not support the myriad of new API method calls
-    available for S3 since its original creation.
+        Unlike Signature Version 2, Version 4 requires a regional
+        parameter. This implies that you need to supply the bucket's region
+        when signing requests for any API call that involves a specific
+        bucket. Starting with version 0.55 of this module,
+        `Amazon::S3::Bucket` provides a new method (`region()` and accepts
+        in the constructor a `region` parameter.  If a region is not
+        supplied, the region for the bucket will be set to the region set in
+        the `account` object (`Amazon::S3`) that you passed to the bucket's
+        new constructor.  Alternatively, you can request that the bucket's new
+        constructor determine the bucket's region for you by calling the
+        `get_location_constraint()` method.
+
+        When signing API calls, the region for the specific bucket will be
+        used. For calls that are not regional (`buckets()`, e.g.) the default
+        region ('us-east-1') will be used.
 
-- Multipart Upload Support
+    - Signature Version 2
+
+        [https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html)
 
-    While there are undocumented methods for multipart uploads (used for
-    files >5Gb), those methods have not been tested and may not in fact
-    work today.
+- \* New APIs
 
-    For more information regarding multipart uploads visit the link below.
+    This module does not support some of the newer API method calls
+    for S3 added after the initial creation of this interface.
+
+- \* Multipart Upload Support
+
+    There is limited testing for multipart uploads.
+
+    For more information regarding multi-part uploads visit the link below.
 
     [https://docs.aws.amazon.com/AmazonS3/latest/API/API\_CreateMultipartUpload.html](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
 
@@ -239,7 +281,7 @@ Create a new S3 client object. Takes som
 
     The AWS region you where your bucket is located.
 
-    default: no region
+    default: us-east-1
 
 - buffer\_size
 
@@ -247,25 +289,104 @@ Create a new S3 client object. Takes som
 
     default: 4096
 
-## turn\_on\_special\_retry
+## signer
 
-Called to add extry retry codes if retry has been set
+Sets or retrieves the signer object. API calls must be signed using
+your AWS credentials. By default, starting with version 0.54 the
+module will use [Net::Amazon::Signature::V4](https://metacpan.org/pod/Net%3A%3AAmazon%3A%3ASignature%3A%3AV4) as the signer and
+instantiate a signer object in the constructor. Note however, that
+signers need your credentials and they _will_ get stored by that
+class, making them susceptible to inadvertant exfiltration. You have a
+few options here:
 
-## turn\_off\_special\_retry
+- 1. Use your own signer.
 
-Called to turn off special retry codes when we are deliberately triggering them
+    You may have noticed that you can also provide your own credentials
+    object forcing this module to use your object for retrieving
+    credentials. Likewise, you can use your own signer so that this
+    module's signer never sees or stores those credentials.
+
+- 2. Pass the credentials object and set `cache_signer` to a
+false value.
+
+    If you pass a credentials object and set `cache_signer` to a false
+    value, the module will use the credentials object to retrieve
+    credentials and create a new signer each time an API call is made that
+    requires signing. This prevents your credentials from being stored
+    inside of the signer class.
+
+    _Note that using your own credentials object that stores your
+    credentials in plaintext is also going to expose your credentials when
+    someone dumps the class._
+
+- 3. Pass credentials, set `cache_signer` to a false value.
+
+    Unfortunately, while this will prevent [Net::Amazon::Signature::V4](https://metacpan.org/pod/Net%3A%3AAmazon%3A%3ASignature%3A%3AV4)
+    from hanging on to your credentials, you credentials will be stored in
+    the [Amazon::S3](https://metacpan.org/pod/Amazon%3A%3AS3) object.
+
+    Starting with version 0.55 of this module, if you have installed
+    [Crypt::CBC](https://metacpan.org/pod/Crypt%3A%3ACBC) and [Crypt::Blowfish](https://metacpan.org/pod/Crypt%3A%3ABlowfish), your credentials will be
+    encrypted using a random key created when the class is
+    instantiated. While this is more secure than leaving them in
+    plaintext, if the key is discovered (the key however is not stored in
+    the object's hash) and the object is dumped, your _encrypted_
+    credentials can be exposed.
+
+- 4. Use very granular credentials for bucket access only.
+
+    Use credentials that only allow access to a bucket or portions of a
+    bucket required for your application. This will at least limit the
+    _blast radius_ of any potential security breach.
 
-## adjust\_region
+- 5. Do nothing...send the credentials, use the default signer.
 
-Sets the region for the signing object to be appropriate for the bucket
+    In this case, both the [Amazon::S3](https://metacpan.org/pod/Amazon%3A%3AS3) class and the
+    [Net::Amazon::Signature::V4](https://metacpan.org/pod/Net%3A%3AAmazon%3A%3ASignature%3A%3AV4) have your credentials. Caveat Emptor.
+
+    See Also [Amazon::Credentials](https://metacpan.org/pod/Amazon%3A%3ACredentials) for more information about safely
+    storing your credentials and preventing exfiltration.
+
+## region
+
+Sets the region for the for the API calls. This will also be the
+default when instantiating the bucket object unless you pass the
+region parameter in the `bucket` method or use the `verify_region`
+flag that will _always_ verify the region of the bucket using the
+`get_location_constraint` method.
+
+default: us-east-1
 
 ## buckets
 
-Returns `undef` on error, else HASHREF of results:
+    buckets([verify-region])
+
+- verify-region (optional)
+
+    `verify-region` is a boolean value that indicates if the
+    bucket's region should be verified when the bucket object is
+    instantiated.
+
+    If set to true, this method will call the `bucket` method with
+    `verify_region` set to true causing the constructor to call the
+    `get_location_constraint` for each bucket to set the bucket's
+    region. This will cause a significant decrease in the peformance of
+    the `buckets()` method. Setting the region for each bucket is
+    necessary since API operations on buckets require the region of the
+    bucket when signing API requests. If all of your buckets are in the
+    same region and you have passed a region parameter to your S3 object,
+    then that region will be used when calling the constructor of your
+    bucket objects.
+
+    default: false
+
+Returns a HASHREF containging the metadata for all of the buckets
+owned by the accout or (see below) or `undef` on
+error.
 
 - owner\_id
 
-    The owner's ID of the buckets owner.
+    The owner ID of the bucket's owner.
 
 - owner\_display\_name
 
@@ -273,40 +394,78 @@ Returns `undef` on error, else HASHREF o
 
 - buckets
 
-    Any ARRAYREF of [Amazon::SimpleDB::Bucket](https://metacpan.org/pod/Amazon%3A%3ASimpleDB%3A%3ABucket) objects for the 
+    Any ARRAYREF of [Amazon::S3::Bucket](https://metacpan.org/pod/Amazon%3A%3AS3%3A%3ABucket) objects for the 
     account.
 
-## add\_bucket 
+## add\_bucket
+
+    add_bucket(bucket-configuration)
 
-Takes a HASHREF:
+`bucket-configuration` is a reference to a hash with bucket configuration
+parameters.
 
 - bucket
 
-    The name of the bucket you want to add
+    The name of the bucket. See [Bucket name
+    rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html)
+    for more details on bucket naming rules.
 
 - acl\_short (optional)
 
     See the set\_acl subroutine for documenation on the acl\_short options
 
-Returns 0 on failure or a [Amazon::S3::Bucket](https://metacpan.org/pod/Amazon%3A%3AS3%3A%3ABucket) object on success
+- location\_constraint
+- region
+
+    The region the bucket is to be created in.
 
-## bucket BUCKET
+Returns a [Amazon::S3::Bucket](https://metacpan.org/pod/Amazon%3A%3AS3%3A%3ABucket) object on success or `undef` on failure.
 
-Takes a scalar argument, the name of the bucket you're creating
+## bucket
 
-Returns an (unverified) bucket object from an account. This method does not access the network.
+    bucket(bucket, [region])
+
+    bucket({ bucket => bucket-name, verify_region => boolean, region => region });
+
+Takes a scalar argument or refernce to a hash of arguments.
+
+You can pass the region or set `verify_region` indicating that
+you want the bucket constructor to detemine the bucket region.
+
+If you do not pass the region or set the `verify_region` value, the
+region will be set to the default region set in your `Amazon::S3`
+object.
+
+See [Amazon::S3::Bucket](https://metacpan.org/pod/Amazon%3A%3AS3%3A%3ABucket) for a complete description of the `bucket`
+method.
 
 ## delete\_bucket
 
-Takes either a [Amazon::S3::Bucket](https://metacpan.org/pod/Amazon%3A%3AS3%3A%3ABucket) object or a HASHREF containing 
+Takes either a [Amazon::S3::Bucket](https://metacpan.org/pod/Amazon%3A%3AS3%3A%3ABucket) object or a HASHREF containing:
 
 - bucket
 
     The name of the bucket to remove
 
-Returns false (and fails) if the bucket isn't empty.
+- region
+
+    Region the bucket is located in. If not provided, the method will
+    determine the bucket's region by calling `get_bucket_location`.
+
+Returns a boolean indicating the success of failure of the API
+call. Check `err` or `errstr` for error messages.
 
-Returns true if the bucket is successfully deleted.
+Note from the [Amazon's documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/BucketRestrictions.html)
+
+> If a bucket is empty, you can delete it. After a bucket is deleted,
+> the name becomes available for reuse. However, after you delete the
+> bucket, you might not be able to reuse the name for various reasons.
+>
+> For example, when you delete the bucket and the name becomes available
+> for reuse, another AWS account might create a bucket with that
+> name. In addition, **some time might pass before you can reuse the name
+> of a deleted bucket**. If you want to use the same bucket name, we
+> recommend that you don't delete the bucket.
 
 ## dns\_bucket\_names
 
@@ -426,6 +585,26 @@ Each key is a HASHREF that looks like th
         owner_displayname => $owner_name
     }
 
+## get\_bucket\_location
+
+    get_bucket_location(bucket-name)
+    get_bucket_locaiton(bucket-obj)
+
+This is a convenience routines for the `get_location_constraint()` of
+the bucket object.  This method will, however return the default
+region of 'us-east-1' when `get_location_constraint()` returns a null
+value.
+
+    my $region = $s3->get_bucket_location('my-bucket');
+
+Starting with version 0.55, `Amazon::S3::Bucket` will call this
+`get_location_constraint()` to determine the region for the
+bucket. You can get the region for the bucket by using the `region()`
+method of the bucket object.
+
+    my $bucket = $s3->bucket('my-bucket');
+    my $bucket_region = $bucket->region;
+
 ## get\_logger
 
 Returns the logger object. If you did not set a logger when you
@@ -446,6 +625,18 @@ Takes the same arguments as list\_bucket
 
 _You are encouraged to use the newer `list_bucket_all_v2` method._
 
+## err
+
+The S3 error code for the last error encountered.
+
+## errstr
+
+A human readable error string for the last error encountered.
+
+## error
+
+The decoded XML string as a hash object of the last error.
+
 ## last\_response
 
 Returns the last [HTTP::Response](https://metacpan.org/pod/HTTP%3A%3AResponse) object.
@@ -460,6 +651,14 @@ Set the logging level.
 
 default: error
 
+## turn\_on\_special\_retry
+
+Called to add extra retry codes if retry has been set
+
+## turn\_off\_special\_retry
+
+Called to turn off special retry codes when we are deliberately triggering them
+
 # ABOUT
 
 This module contains code modified from Amazon that contains the
@@ -619,3 +818,19 @@ terms of the Artistic License are descri
 http://www.perl.com/language/misc/Artistic.html. Except
 where otherwise noted, `Amazon::S3` is Copyright 2008, Timothy
 Appnel, tima@cpan.org. All rights reserved.
+
+# POD ERRORS
+
+Hey! **The above document had some coding errors, which are explained below:**
+
+- Around line 1555:
+
+    Expected text after =item, not a bullet
+
+- Around line 1596:
+
+    Expected text after =item, not a bullet
+
+- Around line 1601:
+
+    Expected text after =item, not a bullet
diff -pruN 0.54-1/t/01-api.t 0.55-1/t/01-api.t
--- 0.54-1/t/01-api.t	2022-07-13 17:35:04.000000000 +0000
+++ 0.55-1/t/01-api.t	2022-08-01 19:54:37.000000000 +0000
@@ -50,8 +50,8 @@ my $secure = $host ? 0 : 1;
 # your tests may fail unless you have DNS entry for the bucket name
 # e.g 127.0.0.1 net-amazon-s3-test-test.localhost
 
-my $dns_bucket_names;
-#  = ( $host && !exists $ENV{AMAZON_S3_DNS_BUCKET_NAMES} ) ? 0 : 1;
+my $dns_bucket_names
+  = ( $host && !exists $ENV{AMAZON_S3_DNS_BUCKET_NAMES} ) ? 0 : 1;
 
 $skip_acls //= exists $ENV{AMAZON_S3_MINIO}
   || exists $ENV{AMAZON_S3_SKIP_ACL_TESTS};
@@ -153,7 +153,7 @@ for my $location (@REGIONS) {
     };
 
     if ( $EVAL_ERROR || !$bucket_obj ) {
-      diag( $s3->err . ": " . $s3->errstr );
+      diag( Dumper( [ $EVAL_ERROR, $s3->err, $s3->errstr, $s3->error ] ) );
     } ## end if ( $EVAL_ERROR || !$bucket_obj)
 
     last if $bucket_obj;
@@ -207,7 +207,7 @@ for my $location (@REGIONS) {
     skip "invalid response to 'list'"
       if !$response;
 
-    is( $response->{bucket}, $bucketname =~ s/^\///r )
+    is( $response->{bucket}, $bucketname_raw )
       or BAIL_OUT( Dumper [$response] );
 
     ok( !$response->{prefix} );
@@ -219,7 +219,7 @@ for my $location (@REGIONS) {
     is( $response->{is_truncated}, 0 );
 
     is_deeply( $response->{keys}, [] )
-      or BAIL_OUT( Dumper( [$response] ) );
+      or diag( Dumper( [$response] ) );
 
     is( undef, $bucket_obj->get_key("non-existing-key") );
   } ## end SKIP:
@@ -373,11 +373,7 @@ for my $location (@REGIONS) {
       BAIL_OUT( $s3->err . ": " . $s3->errstr );
     } ## end if ( !$response )
 
-    is(
-      $response->{bucket},
-      $bucketname =~ s/^\///r,
-      "list($v) - bucketname "
-    );
+    is( $response->{bucket}, $bucketname_raw, "list($v) - bucketname " );
 
     ok( !$response->{prefix}, "list($v) - prefix empty" )
       or diag( Dumper [$response] );
diff -pruN 0.54-1/t/03-region.t 0.55-1/t/03-region.t
--- 0.54-1/t/03-region.t	2022-07-13 17:35:04.000000000 +0000
+++ 0.55-1/t/03-region.t	2022-08-01 19:54:37.000000000 +0000
@@ -21,23 +21,25 @@ my $s3 = Amazon::S3->new(
   }
 );
 
-is( $s3->host, 's3.amazonaws.com', 'default host is s3.amazonaws.com' );
-ok( !defined $s3->region, 'default region is undefined' );
+ok( $s3->region, 'us-east-1' );
+is( $s3->host, 's3.us-east-1.amazonaws.com',
+  'default host is s3.us-east-1.amazonaws.com' );
 
 $s3 = Amazon::S3->new(
   { aws_access_key_id     => 'test',
     aws_secret_access_key => 'test',
-    region                => 'us-east-1',
+    region                => 'us-west-2',
     log_level             => $ENV{DEBUG} ? 'debug' : undef,
   }
 );
-is( $s3->region, 'us-east-1', 'region is set' );
-is( $s3->host, 's3.us-east-1.amazonaws.com',
-  'host is modified during creation' );
-
-$s3->region('us-west-2');
 
 is( $s3->region, 'us-west-2', 'region is set' );
 is( $s3->host, 's3.us-west-2.amazonaws.com',
+  'host is modified during creation' );
+
+$s3->region('us-east-1');
+
+is( $s3->region, 'us-east-1', 'region is set' );
+is( $s3->host, 's3.us-east-1.amazonaws.com',
   'host is modified when region changes' );
 
diff -pruN 0.54-1/t/04-list-buckets.t 0.55-1/t/04-list-buckets.t
--- 0.54-1/t/04-list-buckets.t	2022-07-13 17:35:04.000000000 +0000
+++ 0.55-1/t/04-list-buckets.t	2022-08-01 19:54:37.000000000 +0000
@@ -56,7 +56,9 @@ else {
   );
 } ## end else [ if ( $ENV{AMAZON_S3_CREDENTIALS...})]
 
-my $bucketname = sprintf '/net-amazon-s3-test-%s', lc $aws_access_key_id;
+my $bucketname_raw = sprintf 'net-amazon-s3-test-%s', lc $aws_access_key_id;
+
+my $bucketname = '/' . $bucketname_raw;
 
 my $bucket_obj = eval { $s3->add_bucket( { bucket => $bucketname } ); };
 
@@ -70,11 +72,8 @@ is( ref $bucket_obj, 'Amazon::S3::Bucket
 my $response = $bucket_obj->list
   or BAIL_OUT( $s3->err . ": " . $s3->errstr );
 
-is(
-  $response->{bucket},
-  $bucketname =~ s/^\///r,
-  'no bucket name is list response'
-) or BAIL_OUT( Dumper [$response] );
+is( $response->{bucket}, $bucketname_raw, 'no bucket name is list response' )
+  or BAIL_OUT( Dumper [$response] );
 
 ok( !$response->{prefix}, 'no prefix in list response' );
 ok( !$response->{marker}, 'no marker in list response' );
@@ -137,7 +136,7 @@ subtest 'list' => sub {
       BAIL_OUT( $s3->err . ": " . $s3->errstr );
     } ## end if ( !$response )
 
-    is( $response->{bucket}, $bucketname =~ s/^\///r, 'no bucket name' );
+    is( $response->{bucket}, $bucketname_raw, 'no bucket name' );
 
     ok( !$response->{prefix}, 'no prefix' )
       or diag( Dumper [$response] );
@@ -176,7 +175,7 @@ subtest 'list-v2' => sub {
       BAIL_OUT( $s3->err . ": " . $s3->errstr );
     } ## end if ( !$response )
 
-    is( $response->{bucket}, $bucketname =~ s/^\///r, 'no bucket name' );
+    is( $response->{bucket}, $bucketname_raw, 'no bucket name' );
 
     ok( !$response->{prefix}, 'no prefix' )
       or diag( Dumper [$response] );
diff -pruN 0.54-1/t/05-multipart-upload.t 0.55-1/t/05-multipart-upload.t
--- 0.54-1/t/05-multipart-upload.t	1970-01-01 00:00:00.000000000 +0000
+++ 0.55-1/t/05-multipart-upload.t	2022-08-01 19:54:37.000000000 +0000
@@ -0,0 +1,228 @@
+#!/usr/bin/perl -w
+
+## no critic
+
+use warnings;
+use strict;
+
+use lib 'lib';
+
+use Carp;
+
+use Data::Dumper;
+use Digest::MD5::File qw(file_md5_hex);
+use English qw{-no_match_vars};
+use File::Temp qw{ tempfile };
+use Test::More;
+
+my $host;
+
+if ( exists $ENV{AMAZON_S3_LOCALSTACK} ) {
+  $host = 'localhost:4566';
+
+  $ENV{'AWS_ACCESS_KEY_ID'}     = 'test';
+  $ENV{'AWS_ACCESS_KEY_SECRET'} = 'test';
+
+  $ENV{'AMAZON_S3_EXPENSIVE_TESTS'} = 1;
+
+} ## end if ( exists $ENV{AMAZON_S3_LOCALSTACK...})
+else {
+  $host = $ENV{AMAZON_S3_HOST};
+} ## end else [ if ( exists $ENV{AMAZON_S3_LOCALSTACK...})]
+
+my $secure = $host ? 0 : 1;
+
+# do not use DNS bucket names for testing if a mocking service is used
+# override this by setting AMAZON_S3_DNS_BUCKET_NAMES to any value
+# your tests may fail unless you have DNS entry for the bucket name
+# e.g 127.0.0.1 net-amazon-s3-test-test.localhost
+
+my $dns_bucket_names
+  = ( $host && !exists $ENV{AMAZON_S3_DNS_BUCKET_NAMES} ) ? 0 : 1;
+
+my $aws_access_key_id     = $ENV{'AWS_ACCESS_KEY_ID'};
+my $aws_secret_access_key = $ENV{'AWS_ACCESS_KEY_SECRET'};
+my $token                 = $ENV{'AWS_SESSION_TOKEN'};
+
+if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) {
+  plan skip_all => 'Testing this module for real costs money.';
+} ## end if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'...})
+else {
+  plan tests => 7;
+}
+
+use_ok('Amazon::S3');
+use_ok('Amazon::S3::Bucket');
+
+my $s3;
+
+if ( $ENV{AMAZON_S3_CREDENTIALS} ) {
+  require Amazon::Credentials;
+
+  $s3 = Amazon::S3->new(
+    { credentials      => Amazon::Credentials->new,
+      host             => $host,
+      secure           => $secure,
+      dns_bucket_names => $dns_bucket_names,
+      level            => $ENV{DEBUG} ? 'trace' : 'error',
+    }
+  );
+  ( $aws_access_key_id, $aws_secret_access_key, $token )
+    = $s3->get_credentials;
+} ## end if ( $ENV{AMAZON_S3_CREDENTIALS...})
+else {
+  $s3 = Amazon::S3->new(
+    { aws_access_key_id     => $aws_access_key_id,
+      aws_secret_access_key => $aws_secret_access_key,
+      token                 => $token,
+      host                  => $host,
+      secure                => $secure,
+      dns_bucket_names      => $dns_bucket_names,
+      level                 => $ENV{DEBUG} ? 'trace' : 'error',
+    }
+  );
+} ## end else [ if ( $ENV{AMAZON_S3_CREDENTIALS...})]
+
+sub create_bucket {
+  my ($bucket_name) = @_;
+
+  $bucket_name = '/' . $bucket_name;
+  my $bucket_obj
+    = eval { return $s3->add_bucket( { bucket => $bucket_name } ); };
+
+  return $bucket_obj;
+}
+
+my $bucket_obj = create_bucket sprintf 'net-amazon-s3-test-%s',
+  lc $aws_access_key_id;
+
+ok( ref $bucket_obj, 'created bucket' );
+
+if ( $EVAL_ERROR || !$bucket_obj ) {
+  BAIL_OUT( $s3->err . ": " . $s3->errstr );
+} ## end if ( $EVAL_ERROR || !$bucket_obj)
+
+subtest 'multipart-manual' => sub {
+  my $key = 'big-object-1';
+
+  my $id = $bucket_obj->initiate_multipart_upload($key);
+
+  my $part_list = {};
+
+  my $part = 0;
+  my $data = 'x' x ( 1024 * 1024 * 5 ); # 5 MB part
+
+  my $etag
+    = $bucket_obj->upload_part_of_multipart_upload( $key, $id, ++$part, $data,
+    length $data );
+
+  $part_list->{$part} = $etag;
+
+  $bucket_obj->complete_multipart_upload( $key, $id, $part_list );
+
+  my $head = $bucket_obj->head_key($key);
+
+  ok( $head, 'uploaded file' );
+
+  ok( $head->{content_length} == 5 * 1024 * 1024, 'uploaded 1 part' )
+    or diag( Dumper( [$head] ) );
+
+  ok( $bucket_obj->delete_key($key) );
+};
+
+subtest 'multipart-file' => sub {
+  my ( $fh, $file ) = tempfile();
+
+  my $buffer = 'x' x ( 1024 * 1024 );
+
+  # 11MB
+  foreach ( 0 .. 10 ) {
+    $fh->syswrite($buffer);
+  }
+
+  $fh->close;
+
+  if ( !open( $fh, '<', $file ) ) {
+    carp "could not open $file after writing";
+
+    return;
+  }
+
+  my $key = 'big-object-2';
+
+  $bucket_obj->upload_multipart_object( fh => $fh, key => $key );
+
+  close $fh;
+
+  my $head = $bucket_obj->head_key($key);
+
+  ok( $head, 'uploaded file' );
+
+  isa_ok( $head, 'HASH', 'head is a hash' );
+
+  ok( $head->{content_length} == 11 * 1024 * 1024, 'uploaded all parts' );
+
+  $bucket_obj->delete_key($key);
+
+  unlink $file;
+};
+
+subtest 'multipart-2-parts' => sub {
+  my $length = 1024 * 1024 * 7;
+
+  my $data = 'x' x $length;
+
+  my $key = 'big-object-3';
+
+  $bucket_obj->upload_multipart_object(
+    key  => $key,
+    data => $data
+  );
+
+  my $head = $bucket_obj->head_key($key);
+
+  isa_ok( $head, 'HASH', 'head is a hash' );
+
+  ok( $head, 'uploaded data' );
+
+  ok( $head->{content_length} == $length, 'uploaded all parts' );
+
+  $bucket_obj->delete_key($key);
+};
+
+subtest 'multipart-callback' => sub {
+  my $key = 'big-object-4';
+
+  my @part = ( 5, 5, 5, 1 );
+  my $size;
+
+  $bucket_obj->upload_multipart_object(
+    key      => $key,
+    callback => sub {
+      return ( q{}, 0 ) unless @part;
+
+      my $length = shift @part;
+      $length *= 1024 * 1024;
+
+      $size += $length;
+
+      my $data = 'x' x $length;
+
+      return ( \$data, $length );
+    }
+  );
+
+  my $head = $bucket_obj->head_key($key);
+
+  isa_ok( $head, 'HASH', 'head is a hash' );
+
+  ok( $head, 'uploaded data' );
+
+  ok( $head->{content_length} == $size, 'uploaded all parts' );
+
+  $bucket_obj->delete_key($key);
+};
+
+$bucket_obj->delete_bucket()
+  or diag( $s3->errstr );
+
diff -pruN 0.54-1/t/06-list-multipart-uploads.t 0.55-1/t/06-list-multipart-uploads.t
--- 0.54-1/t/06-list-multipart-uploads.t	1970-01-01 00:00:00.000000000 +0000
+++ 0.55-1/t/06-list-multipart-uploads.t	2022-08-01 19:54:37.000000000 +0000
@@ -0,0 +1,200 @@
+#!/usr/bin/perl -w
+
+## no critic
+
+use warnings;
+use strict;
+
+use lib 'lib';
+
+use Carp;
+
+use Data::Dumper;
+use Digest::MD5::File qw(file_md5_hex);
+use English qw{-no_match_vars};
+use File::Temp qw{ tempfile };
+use Test::More;
+use XML::Simple qw{XMLin};
+
+my $host;
+
+if ( exists $ENV{AMAZON_S3_LOCALSTACK} ) {
+  $host = 'localhost:4566';
+
+  $ENV{'AWS_ACCESS_KEY_ID'}     = 'test';
+  $ENV{'AWS_ACCESS_KEY_SECRET'} = 'test';
+
+  $ENV{'AMAZON_S3_EXPENSIVE_TESTS'} = 1;
+
+} ## end if ( exists $ENV{AMAZON_S3_LOCALSTACK...})
+else {
+  $host = $ENV{AMAZON_S3_HOST};
+} ## end else [ if ( exists $ENV{AMAZON_S3_LOCALSTACK...})]
+
+my $secure = $host ? 0 : 1;
+
+# do not use DNS bucket names for testing if a mocking service is used
+# override this by setting AMAZON_S3_DNS_BUCKET_NAMES to any value
+# your tests may fail unless you have DNS entry for the bucket name
+# e.g 127.0.0.1 net-amazon-s3-test-test.localhost
+
+my $dns_bucket_names
+  = ( $host && !exists $ENV{AMAZON_S3_DNS_BUCKET_NAMES} ) ? 0 : 1;
+
+my $aws_access_key_id     = $ENV{'AWS_ACCESS_KEY_ID'};
+my $aws_secret_access_key = $ENV{'AWS_ACCESS_KEY_SECRET'};
+my $token                 = $ENV{'AWS_SESSION_TOKEN'};
+
+if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) {
+  plan skip_all => 'Testing this module for real costs money.';
+} ## end if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'...})
+else {
+  plan tests => 6;
+}
+
+use_ok('Amazon::S3');
+use_ok('Amazon::S3::Bucket');
+
+my $s3;
+
+if ( $ENV{AMAZON_S3_CREDENTIALS} ) {
+  require Amazon::Credentials;
+
+  $s3 = Amazon::S3->new(
+    { credentials      => Amazon::Credentials->new,
+      host             => $host,
+      secure           => $secure,
+      dns_bucket_names => $dns_bucket_names,
+      level            => $ENV{DEBUG} ? 'trace' : 'error',
+    }
+  );
+  ( $aws_access_key_id, $aws_secret_access_key, $token )
+    = $s3->get_credentials;
+} ## end if ( $ENV{AMAZON_S3_CREDENTIALS...})
+else {
+  $s3 = Amazon::S3->new(
+    { aws_access_key_id     => $aws_access_key_id,
+      aws_secret_access_key => $aws_secret_access_key,
+      token                 => $token,
+      host                  => $host,
+      secure                => $secure,
+      dns_bucket_names      => $dns_bucket_names,
+      level                 => $ENV{DEBUG} ? 'trace' : 'error',
+    }
+  );
+} ## end else [ if ( $ENV{AMAZON_S3_CREDENTIALS...})]
+
+########################################################################
+sub list_multipart_uploads {
+########################################################################
+  my ($bucket_obj) = @_;
+
+  my $xml = $bucket_obj->list_multipart_uploads;
+
+  ok( $xml =~ /^</xms, 'is xml result' );
+
+  my $uploads = XMLin( $xml, KeepRoot => 1 );
+
+  isa_ok( $uploads, 'HASH', 'made a hash object' )
+    or diag($uploads);
+
+  ok( defined $uploads->{ListMultipartUploadsResult},
+    'looks like a results object' )
+    or diag($xml);
+
+  my $upload_list = $uploads->{ListMultipartUploadsResult}->{Upload};
+
+  return $upload_list;
+}
+
+########################################################################
+sub partial_upload {
+########################################################################
+  my ( $key, $bucket_obj, $size_in_mb ) = @_;
+
+  my $id     = $bucket_obj->initiate_multipart_upload($key);
+  my $length = ( $size_in_mb || 5 ) * 1024 * 1024;
+
+  my $data = 'x' x $length;
+
+  my $etag
+    = $bucket_obj->upload_part_of_multipart_upload( $key, $id, 1, $data,
+    $length );
+
+  return $id;
+}
+
+########################################################################
+sub create_bucket {
+########################################################################
+  my ($bucket_name) = @_;
+
+  $bucket_name = '/' . $bucket_name;
+  my $bucket_obj
+    = eval { return $s3->add_bucket( { bucket => $bucket_name } ); };
+
+  return $bucket_obj;
+}
+
+my $bucket_name = sprintf 'net-amazon-s3-test-%s', lc $aws_access_key_id;
+my $bucket_obj  = create_bucket $bucket_name;
+
+ok( ref $bucket_obj, 'created bucket - ' . $bucket_name );
+
+if ( $EVAL_ERROR || !$bucket_obj ) {
+  BAIL_OUT( $s3->err . ": " . $s3->errstr );
+} ## end if ( $EVAL_ERROR || !$bucket_obj)
+
+my $id;
+my $key = 'big-object-1';
+
+subtest 'list-multipart-uploads' => sub {
+
+  my $upload_list = list_multipart_uploads($bucket_obj);
+  ok( !defined $upload_list, 'no in-progress uploads' )
+    or diag( Dumper( [$upload_list] ) );
+
+  $id = partial_upload( $key, $bucket_obj );
+
+  $upload_list = list_multipart_uploads($bucket_obj);
+
+  ok( $upload_list->{UploadId} eq $id, 'UploadId eq $id' );
+};
+
+subtest 'abort-multipart-upload' => sub {
+
+  $bucket_obj->abort_multipart_upload( $key, $id );
+
+  my $upload_list = list_multipart_uploads($bucket_obj);
+
+  ok( !defined $upload_list, 'aborted upload' );
+};
+
+subtest 'abort-on-error' => sub {
+  my $id = $bucket_obj->initiate_multipart_upload($key);
+
+  my $part_list = {};
+
+  my $part = 0;
+  my $data = 'x' x ( 1024 * 1024 * 1 ); # should be too small
+
+  # do this twice...
+  foreach ( 0 .. 1 ) {
+    my $etag
+      = $bucket_obj->upload_part_of_multipart_upload( $key, $id, ++$part,
+      $data, length $data );
+
+    $part_list->{$part} = $etag;
+  }
+
+  eval { $bucket_obj->complete_multipart_upload( $key, $id, $part_list ); };
+
+  ok( $EVAL_ERROR =~ /Bad Request/, 'abort-on-error successful' )
+    or diag( Dumper( [ $EVAL_ERROR, $id ] ) );
+
+  $bucket_obj->abort_multipart_upload( $key, $id );
+};
+
+$bucket_obj->delete_bucket()
+  or diag( $s3->errstr );
+
