diff -pruN 0.19.3+ds1-4/.github/labeler.yml 0.21.3-0ubuntu1/.github/labeler.yml
--- 0.19.3+ds1-4/.github/labeler.yml	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/.github/labeler.yml	2025-03-17 16:14:25.000000000 +0000
@@ -96,6 +96,11 @@ area/hack:
   - changed-files:
     - any-glob-to-any-file: 'hack/**'
 
+# Add 'area/history' label to changes in history command
+area/history:
+  - changed-files:
+    - any-glob-to-any-file: 'commands/history/**'
+
 # Add 'area/tests' label to changes in test files
 area/tests:
   - changed-files:
diff -pruN 0.19.3+ds1-4/.github/workflows/build.yml 0.21.3-0ubuntu1/.github/workflows/build.yml
--- 0.19.3+ds1-4/.github/workflows/build.yml	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/.github/workflows/build.yml	2025-03-17 16:14:25.000000000 +0000
@@ -28,8 +28,8 @@ on:
       - 'docs/**'
 
 env:
-  BUILDX_VERSION: "latest"
-  BUILDKIT_IMAGE: "moby/buildkit:latest"
+  SETUP_BUILDX_VERSION: "edge"
+  SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
   SCOUT_VERSION: "1.11.0"
   REPO_SLUG: "docker/buildx-bin"
   DESTDIR: "./bin"
@@ -54,9 +54,9 @@ jobs:
           - master
           - latest
           - buildx-stable-1
+          - v0.19.0
+          - v0.18.2
           - v0.17.2
-          - v0.16.0
-          - v0.15.2
         worker:
           - docker-container
           - remote
@@ -121,13 +121,14 @@ jobs:
         name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
         with:
-          version: ${{ env.BUILDX_VERSION }}
-          driver-opts: image=${{ env.BUILDKIT_IMAGE }}
+          version: ${{ env.SETUP_BUILDX_VERSION }}
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
           buildkitd-flags: --debug
       -
         name: Build test image
-        uses: docker/bake-action@v5
+        uses: docker/bake-action@v6
         with:
+          source: .
           targets: integration-test
           set: |
             *.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
@@ -174,6 +175,11 @@ jobs:
       SKIP_INTEGRATION_TESTS: 1
     steps:
       -
+        name: Setup Git config
+        run: |
+          git config --global core.autocrlf false
+          git config --global core.eol lf
+      -
         name: Checkout
         uses: actions/checkout@v4
       -
@@ -234,6 +240,65 @@ jobs:
           name: test-reports-${{ env.TESTREPORTS_NAME }}
           path: ${{ env.TESTREPORTS_BASEDIR }}
 
+  test-bsd-unit:
+    runs-on: ubuntu-22.04
+    continue-on-error: true
+    strategy:
+      fail-fast: false
+      matrix:
+        os:
+          - freebsd
+          - openbsd
+    steps:
+      -
+        name: Prepare
+        run: |
+          echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
+      -
+        name: Checkout
+        uses: actions/checkout@v4
+      -
+        name: Cache Vagrant boxes
+        uses: actions/cache@v4
+        with:
+          path: ~/.vagrant.d/boxes
+          key: ${{ runner.os }}-vagrant-${{ matrix.os }}-${{ hashFiles(env.VAGRANT_FILE) }}
+          restore-keys: |
+            ${{ runner.os }}-vagrant-${{ matrix.os }}-
+      -
+        name: Install vagrant
+        run: |
+          set -x
+          wget -O - https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
+          echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
+          sudo apt-get update
+          sudo apt-get install -y libvirt-dev libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt ruby-libvirt
+          sudo systemctl enable --now libvirtd
+          sudo chmod a+rw /var/run/libvirt/libvirt-sock
+          vagrant plugin install vagrant-libvirt
+          vagrant --version
+      -
+        name: Set up vagrant
+        run: |
+          ln -sf ${{ env.VAGRANT_FILE }} Vagrantfile
+          vagrant up --no-tty
+      -
+        name: Test
+        run: |
+          vagrant ssh -- "cd /vagrant; SKIP_INTEGRATION_TESTS=1 go test -mod=vendor -coverprofile=coverage.txt -covermode=atomic ${{ env.TESTFLAGS }} ./..."
+          vagrant ssh -c "sudo cat /vagrant/coverage.txt" > coverage.txt
+      -
+        name: Upload coverage
+        if: always()
+        uses: codecov/codecov-action@v5
+        with:
+          files: ./coverage.txt
+          env_vars: RUNNER_OS
+          flags: unit,${{ matrix.os }}
+          token: ${{ secrets.CODECOV_TOKEN }}
+        env:
+          RUNNER_OS: ${{ matrix.os }}
+
   govulncheck:
     runs-on: ubuntu-24.04
     permissions:
@@ -243,18 +308,15 @@ jobs:
       security-events: write
     steps:
       -
-        name: Checkout
-        uses: actions/checkout@v4
-      -
         name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
         with:
-          version: ${{ env.BUILDX_VERSION }}
-          driver-opts: image=${{ env.BUILDKIT_IMAGE }}
+          version: ${{ env.SETUP_BUILDX_VERSION }}
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
           buildkitd-flags: --debug
       -
         name: Run
-        uses: docker/bake-action@v5
+        uses: docker/bake-action@v6
         with:
           targets: govulncheck
         env:
@@ -308,8 +370,8 @@ jobs:
         name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
         with:
-          version: ${{ env.BUILDX_VERSION }}
-          driver-opts: image=${{ env.BUILDKIT_IMAGE }}
+          version: ${{ env.SETUP_BUILDX_VERSION }}
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
           buildkitd-flags: --debug
       -
         name: Build
@@ -335,17 +397,14 @@ jobs:
     if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
     steps:
       -
-        name: Checkout
-        uses: actions/checkout@v4
-      -
         name: Set up QEMU
         uses: docker/setup-qemu-action@v3
       -
         name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
         with:
-          version: ${{ env.BUILDX_VERSION }}
-          driver-opts: image=${{ env.BUILDKIT_IMAGE }}
+          version: ${{ env.SETUP_BUILDX_VERSION }}
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
           buildkitd-flags: --debug
       -
         name: Docker meta
@@ -368,11 +427,11 @@ jobs:
           password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
       -
         name: Build and push image
-        uses: docker/bake-action@v5
+        uses: docker/bake-action@v6
         with:
           files: |
             ./docker-bake.hcl
-            ${{ steps.meta.outputs.bake-file }}
+            cwd://${{ steps.meta.outputs.bake-file }}
           targets: image-cross
           push: ${{ github.event_name != 'pull_request' }}
           sbom: true
@@ -392,9 +451,6 @@ jobs:
       - bin-image
     steps:
       -
-        name: Checkout
-        uses: actions/checkout@v4
-      -
         name: Login to DockerHub
         uses: docker/login-action@v3
         with:
@@ -448,7 +504,7 @@ jobs:
       -
         name: GitHub Release
         if: startsWith(github.ref, 'refs/tags/v')
-        uses: softprops/action-gh-release@01570a1f39cb168c169c802c3bceb9e93fb10974  # v2.1.0
+        uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda  # v2.2.1
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
         with:
diff -pruN 0.19.3+ds1-4/.github/workflows/docs-release.yml 0.21.3-0ubuntu1/.github/workflows/docs-release.yml
--- 0.19.3+ds1-4/.github/workflows/docs-release.yml	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/.github/workflows/docs-release.yml	2025-03-17 16:14:25.000000000 +0000
@@ -19,6 +19,10 @@ on:
     types:
       - released
 
+env:
+  SETUP_BUILDX_VERSION: "edge"
+  SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
+
 jobs:
   open-pr:
     runs-on: ubuntu-24.04
@@ -46,9 +50,13 @@ jobs:
       -
         name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
+        with:
+          version: ${{ env.SETUP_BUILDX_VERSION }}
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
+          buildkitd-flags: --debug
       -
         name: Generate yaml
-        uses: docker/bake-action@v5
+        uses: docker/bake-action@v6
         with:
           source: ${{ github.server_url }}/${{ github.repository }}.git#${{ env.RELEASE_NAME }}
           targets: update-docs
@@ -69,7 +77,7 @@ jobs:
           VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
       -
         name: Create PR on docs repo
-        uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f  # v7.0.5
+        uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f  # v7.0.6
         with:
           token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
           push-to-fork: docker-tools-robot/docker.github.io
diff -pruN 0.19.3+ds1-4/.github/workflows/docs-upstream.yml 0.21.3-0ubuntu1/.github/workflows/docs-upstream.yml
--- 0.19.3+ds1-4/.github/workflows/docs-upstream.yml	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/.github/workflows/docs-upstream.yml	2025-03-17 16:14:25.000000000 +0000
@@ -29,21 +29,24 @@ on:
       - '.github/workflows/docs-upstream.yml'
       - 'docs/**'
 
+env:
+  SETUP_BUILDX_VERSION: "edge"
+  SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
+
 jobs:
   docs-yaml:
     runs-on: ubuntu-24.04
     steps:
       -
-        name: Checkout
-        uses: actions/checkout@v4
-      -
         name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
         with:
-          version: latest
+          version: ${{ env.SETUP_BUILDX_VERSION }}
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
+          buildkitd-flags: --debug
       -
         name: Build reference YAML docs
-        uses: docker/bake-action@v5
+        uses: docker/bake-action@v6
         with:
           targets: update-docs
           provenance: false
@@ -62,7 +65,7 @@ jobs:
           retention-days: 1
 
   validate:
-    uses: docker/docs/.github/workflows/validate-upstream.yml@6b73b05acb21edf7995cc5b3c6672d8e314cee7a  # pin for artifact v4 support: https://github.com/docker/docs/pull/19220
+    uses: docker/docs/.github/workflows/validate-upstream.yml@main
     needs:
       - docs-yaml
     with:
diff -pruN 0.19.3+ds1-4/.github/workflows/e2e.yml 0.21.3-0ubuntu1/.github/workflows/e2e.yml
--- 0.19.3+ds1-4/.github/workflows/e2e.yml	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/.github/workflows/e2e.yml	2025-03-17 16:14:25.000000000 +0000
@@ -26,6 +26,8 @@ on:
       - 'docs/**'
 
 env:
+  SETUP_BUILDX_VERSION: "edge"
+  SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
   DESTDIR: "./bin"
   K3S_VERSION: "v1.21.2-k3s1"
 
@@ -33,16 +35,16 @@ jobs:
   build:
     runs-on: ubuntu-24.04
     steps:
-      - name: Checkout
-        uses: actions/checkout@v4
       -
         name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
         with:
-          version: latest
+          version: ${{ env.SETUP_BUILDX_VERSION }}
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
+          buildkitd-flags: --debug
       -
         name: Build
-        uses: docker/bake-action@v5
+        uses: docker/bake-action@v6
         with:
           targets: binaries
           set: |
@@ -175,3 +177,78 @@ jobs:
           DRIVER_OPT: ${{ matrix.driver-opt }}
           ENDPOINT: ${{ matrix.endpoint }}
           PLATFORMS: ${{ matrix.platforms }}
+
+  bake:
+    runs-on: ubuntu-24.04
+    needs:
+      - build
+    env:
+      DOCKER_BUILD_CHECKS_ANNOTATIONS: false
+      DOCKER_BUILD_SUMMARY: false
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+          -
+            # https://github.com/docker/bake-action/blob/v5.11.0/.github/workflows/ci.yml#L227-L237
+            source: "https://github.com/docker/bake-action.git#v5.11.0:test/go"
+            overrides: |
+              *.output=/tmp/bake-build
+          -
+            # https://github.com/tonistiigi/xx/blob/2fc85604e7280bfb3f626569bd4c5413c43eb4af/.github/workflows/ld.yml#L90-L98
+            source: "https://github.com/tonistiigi/xx.git#2fc85604e7280bfb3f626569bd4c5413c43eb4af"
+            targets: |
+              ld64-static-tgz
+            overrides: |
+              ld64-static-tgz.output=type=local,dest=./dist
+              ld64-static-tgz.platform=linux/amd64
+              ld64-static-tgz.cache-from=type=gha,scope=xx-ld64-static-tgz
+              ld64-static-tgz.cache-to=type=gha,scope=xx-ld64-static-tgz
+          -
+            # https://github.com/moby/buildkit-bench/blob/54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27/docker-bake.hcl#L154-L160
+            source: "https://github.com/moby/buildkit-bench.git#54c194011c4fc99a94aa75d4b3d4f3ffd4c4ce27"
+            targets: |
+              tests-buildkit
+            envs: |
+              BUILDKIT_REFS=v0.18.2
+    steps:
+      -
+        name: Checkout
+        uses: actions/checkout@v4
+      -
+        name: Expose GitHub Runtime
+        uses: crazy-max/ghaction-github-runtime@v3
+      -
+        name: Environment variables
+        if: matrix.envs != ''
+        run: |
+          for l in "${{ matrix.envs }}"; do
+            echo "${l?}" >> $GITHUB_ENV
+          done
+      -
+        name: Set up QEMU
+        uses: docker/setup-qemu-action@v3
+      -
+        name: Install buildx
+        uses: actions/download-artifact@v4
+        with:
+          name: binary
+          path: /home/runner/.docker/cli-plugins
+      -
+        name: Fix perms and check
+        run: |
+          chmod +x /home/runner/.docker/cli-plugins/docker-buildx
+          docker buildx version
+      -
+        name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v3
+        with:
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
+          buildkitd-flags: --debug
+      -
+        name: Build
+        uses: docker/bake-action@v6
+        with:
+          source: ${{ matrix.source }}
+          targets: ${{ matrix.targets }}
+          set: ${{ matrix.overrides }}
diff -pruN 0.19.3+ds1-4/.github/workflows/validate.yml 0.21.3-0ubuntu1/.github/workflows/validate.yml
--- 0.19.3+ds1-4/.github/workflows/validate.yml	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/.github/workflows/validate.yml	2025-03-17 16:14:25.000000000 +0000
@@ -25,6 +25,10 @@ on:
     paths-ignore:
       - '.github/releases.json'
 
+env:
+  SETUP_BUILDX_VERSION: "edge"
+  SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
+
 jobs:
   prepare:
     runs-on: ubuntu-24.04
@@ -91,16 +95,15 @@ jobs:
             echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
           fi
       -
-        name: Checkout
-        uses: actions/checkout@v4
-      -
         name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
         with:
-          version: latest
+          version: ${{ env.SETUP_BUILDX_VERSION }}
+          driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
+          buildkitd-flags: --debug
       -
         name: Validate
-        uses: docker/bake-action@v5
+        uses: docker/bake-action@v6
         with:
           targets: ${{ matrix.target }}
           set: |
diff -pruN 0.19.3+ds1-4/.golangci.yml 0.21.3-0ubuntu1/.golangci.yml
--- 0.19.3+ds1-4/.golangci.yml	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/.golangci.yml	2025-03-17 16:14:25.000000000 +0000
@@ -43,6 +43,9 @@ linters-settings:
       # buildkit errdefs package (or vice-versa).
       - pkg: "github.com/containerd/errdefs"
         alias: "cerrdefs"
+      # Use a consistent alias to prevent confusion with "github.com/moby/buildkit/client"
+      - pkg: "github.com/docker/docker/client"
+        alias: "dockerclient"
       - pkg: "github.com/opencontainers/image-spec/specs-go/v1"
         alias: "ocispecs"
       - pkg: "github.com/opencontainers/go-digest"
diff -pruN 0.19.3+ds1-4/Dockerfile 0.21.3-0ubuntu1/Dockerfile
--- 0.19.3+ds1-4/Dockerfile	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/Dockerfile	2025-03-17 16:14:25.000000000 +0000
@@ -1,19 +1,20 @@
 # syntax=docker/dockerfile:1
 
 ARG GO_VERSION=1.23
+ARG ALPINE_VERSION=3.21
 ARG XX_VERSION=1.6.1
 
 # for testing
-ARG DOCKER_VERSION=27.4.0-rc.2
+ARG DOCKER_VERSION=28.0.0-rc.1
 ARG DOCKER_VERSION_ALT_26=26.1.3
 ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
 ARG GOTESTSUM_VERSION=v1.12.0
 ARG REGISTRY_VERSION=2.8.3
-ARG BUILDKIT_VERSION=v0.17.2
-ARG UNDOCK_VERSION=0.8.0
+ARG BUILDKIT_VERSION=v0.19.0
+ARG UNDOCK_VERSION=0.9.0
 
 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
-FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
+FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
 FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
 FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
 FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt
@@ -138,7 +139,7 @@ FROM integration-test-base AS integratio
 COPY . .
 
 # Release
-FROM --platform=$BUILDPLATFORM alpine AS releaser
+FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS releaser
 WORKDIR /work
 ARG TARGETPLATFORM
 RUN --mount=from=binaries \
@@ -153,7 +154,7 @@ COPY --from=releaser /out/ /
 
 # Shell
 FROM docker:$DOCKER_VERSION AS dockerd-release
-FROM alpine AS shell
+FROM alpine:${ALPINE_VERSION} AS shell
 RUN apk add --no-cache iptables tmux git vim less openssh
 RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
 COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
diff -pruN 0.19.3+ds1-4/bake/bake.go 0.21.3-0ubuntu1/bake/bake.go
--- 0.19.3+ds1-4/bake/bake.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/bake.go	2025-03-17 16:14:25.000000000 +0000
@@ -2,6 +2,7 @@ package bake
 
 import (
 	"context"
+	"encoding"
 	"io"
 	"os"
 	"path"
@@ -26,9 +27,7 @@ import (
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/session/auth/authprovider"
-	"github.com/moby/buildkit/util/entitlements"
 	"github.com/pkg/errors"
-	"github.com/tonistiigi/go-csvvalue"
 	"github.com/zclconf/go-cty/cty"
 	"github.com/zclconf/go-cty/cty/convert"
 )
@@ -53,8 +52,8 @@ func defaultFilenames() []string {
 	names = append(names, composecli.DefaultFileNames...)
 	names = append(names, []string{
 		"docker-bake.json",
-		"docker-bake.override.json",
 		"docker-bake.hcl",
+		"docker-bake.override.json",
 		"docker-bake.override.hcl",
 	}...)
 	return names
@@ -207,8 +206,9 @@ func ReadTargets(ctx context.Context, fi
 	if err != nil {
 		return nil, nil, err
 	}
-	m := map[string]*Target{}
-	n := map[string]*Group{}
+
+	targetsMap := map[string]*Target{}
+	groupsMap := map[string]*Group{}
 	for _, target := range targets {
 		ts, gs := c.ResolveGroup(target)
 		for _, tname := range ts {
@@ -217,13 +217,13 @@ func ReadTargets(ctx context.Context, fi
 				return nil, nil, err
 			}
 			if t != nil {
-				m[tname] = t
+				targetsMap[tname] = t
 			}
 		}
 		for _, gname := range gs {
 			for _, group := range c.Groups {
 				if group.Name == gname {
-					n[gname] = group
+					groupsMap[gname] = group
 					break
 				}
 			}
@@ -231,25 +231,26 @@ func ReadTargets(ctx context.Context, fi
 	}
 
 	for _, target := range targets {
-		if target == "default" {
+		if _, ok := groupsMap["default"]; ok && target == "default" {
 			continue
 		}
-		if _, ok := n["default"]; !ok {
-			n["default"] = &Group{Name: "default"}
+		if _, ok := groupsMap["default"]; !ok {
+			groupsMap["default"] = &Group{Name: "default"}
 		}
-		n["default"].Targets = append(n["default"].Targets, target)
+		groupsMap["default"].Targets = append(groupsMap["default"].Targets, target)
 	}
-	if g, ok := n["default"]; ok {
+	if g, ok := groupsMap["default"]; ok {
 		g.Targets = dedupSlice(g.Targets)
+		sort.Strings(g.Targets)
 	}
 
-	for name, t := range m {
-		if err := c.loadLinks(name, t, m, o, nil, ent); err != nil {
+	for name, t := range targetsMap {
+		if err := c.loadLinks(name, t, targetsMap, o, nil, ent); err != nil {
 			return nil, nil, err
 		}
 	}
 
-	return m, n, nil
+	return targetsMap, groupsMap, nil
 }
 
 func dedupSlice(s []string) []string {
@@ -496,7 +497,9 @@ func (c Config) loadLinks(name string, t
 				if err != nil {
 					return err
 				}
-				t2.Outputs = []string{"type=cacheonly"}
+				t2.Outputs = []*buildflags.ExportEntry{
+					{Type: "cacheonly"},
+				}
 				t2.linked = true
 				m[target] = t2
 			}
@@ -512,8 +515,8 @@ func (c Config) loadLinks(name string, t
 			}
 
 			if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
-				if !sliceEqual(t.Platforms, t2.Platforms) {
-					return errors.Errorf("target %s can't be used by %s because it is defined for different platforms %v and %v", target, name, t2.Platforms, t.Platforms)
+				if !isSubset(t.Platforms, t2.Platforms) {
+					return errors.Errorf("target %s can't be used by %s because its platforms %v are not a subset of %v", target, name, t.Platforms, t2.Platforms)
 				}
 			}
 		}
@@ -551,6 +554,8 @@ func (c Config) newOverrides(v []string)
 
 			o := t[kk[1]]
 
+			// IMPORTANT: if you add more fields here, do not forget to update
+			// docs/bake-reference.md and https://docs.docker.com/build/bake/overrides/
 			switch keys[1] {
 			case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
 				if len(parts) == 2 {
@@ -695,30 +700,30 @@ type Target struct {
 	// Inherits is the only field that cannot be overridden with --set
 	Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"`
 
-	Annotations      []string           `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"`
-	Attest           []string           `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
-	Context          *string            `json:"context,omitempty" hcl:"context,optional" cty:"context"`
-	Contexts         map[string]string  `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
-	Dockerfile       *string            `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
-	DockerfileInline *string            `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"`
-	Args             map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"`
-	Labels           map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"`
-	Tags             []string           `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"`
-	CacheFrom        []string           `json:"cache-from,omitempty"  hcl:"cache-from,optional" cty:"cache-from"`
-	CacheTo          []string           `json:"cache-to,omitempty"  hcl:"cache-to,optional" cty:"cache-to"`
-	Target           *string            `json:"target,omitempty" hcl:"target,optional" cty:"target"`
-	Secrets          []string           `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
-	SSH              []string           `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
-	Platforms        []string           `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"`
-	Outputs          []string           `json:"output,omitempty" hcl:"output,optional" cty:"output"`
-	Pull             *bool              `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
-	NoCache          *bool              `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
-	NetworkMode      *string            `json:"network,omitempty" hcl:"network,optional" cty:"network"`
-	NoCacheFilter    []string           `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
-	ShmSize          *string            `json:"shm-size,omitempty" hcl:"shm-size,optional"`
-	Ulimits          []string           `json:"ulimits,omitempty" hcl:"ulimits,optional"`
-	Call             *string            `json:"call,omitempty" hcl:"call,optional" cty:"call"`
-	Entitlements     []string           `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
+	Annotations      []string                `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"`
+	Attest           buildflags.Attests      `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"`
+	Context          *string                 `json:"context,omitempty" hcl:"context,optional" cty:"context"`
+	Contexts         map[string]string       `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"`
+	Dockerfile       *string                 `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"`
+	DockerfileInline *string                 `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"`
+	Args             map[string]*string      `json:"args,omitempty" hcl:"args,optional" cty:"args"`
+	Labels           map[string]*string      `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"`
+	Tags             []string                `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"`
+	CacheFrom        buildflags.CacheOptions `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"`
+	CacheTo          buildflags.CacheOptions `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"`
+	Target           *string                 `json:"target,omitempty" hcl:"target,optional" cty:"target"`
+	Secrets          buildflags.Secrets      `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"`
+	SSH              buildflags.SSHKeys      `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"`
+	Platforms        []string                `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"`
+	Outputs          buildflags.Exports      `json:"output,omitempty" hcl:"output,optional" cty:"output"`
+	Pull             *bool                   `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"`
+	NoCache          *bool                   `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"`
+	NetworkMode      *string                 `json:"network,omitempty" hcl:"network,optional" cty:"network"`
+	NoCacheFilter    []string                `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"`
+	ShmSize          *string                 `json:"shm-size,omitempty" hcl:"shm-size,optional" cty:"shm-size"`
+	Ulimits          []string                `json:"ulimits,omitempty" hcl:"ulimits,optional" cty:"ulimits"`
+	Call             *string                 `json:"call,omitempty" hcl:"call,optional" cty:"call"`
+	Entitlements     []string                `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"`
 	// IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md.
 
 	// linked is a private field to mark a target used as a linked one
@@ -733,23 +738,23 @@ var (
 )
 
 func (t *Target) normalize() {
-	t.Annotations = removeDupes(t.Annotations)
-	t.Attest = removeAttestDupes(t.Attest)
-	t.Tags = removeDupes(t.Tags)
-	t.Secrets = removeDupes(t.Secrets)
-	t.SSH = removeDupes(t.SSH)
-	t.Platforms = removeDupes(t.Platforms)
-	t.CacheFrom = removeDupes(t.CacheFrom)
-	t.CacheTo = removeDupes(t.CacheTo)
-	t.Outputs = removeDupes(t.Outputs)
-	t.NoCacheFilter = removeDupes(t.NoCacheFilter)
-	t.Ulimits = removeDupes(t.Ulimits)
+	t.Annotations = removeDupesStr(t.Annotations)
+	t.Attest = t.Attest.Normalize()
+	t.Tags = removeDupesStr(t.Tags)
+	t.Secrets = t.Secrets.Normalize()
+	t.SSH = t.SSH.Normalize()
+	t.Platforms = removeDupesStr(t.Platforms)
+	t.CacheFrom = t.CacheFrom.Normalize()
+	t.CacheTo = t.CacheTo.Normalize()
+	t.Outputs = t.Outputs.Normalize()
+	t.NoCacheFilter = removeDupesStr(t.NoCacheFilter)
+	t.Ulimits = removeDupesStr(t.Ulimits)
 
 	if t.NetworkMode != nil && *t.NetworkMode == "host" {
 		t.Entitlements = append(t.Entitlements, "network.host")
 	}
 
-	t.Entitlements = removeDupes(t.Entitlements)
+	t.Entitlements = removeDupesStr(t.Entitlements)
 
 	for k, v := range t.Contexts {
 		if v == "" {
@@ -808,20 +813,19 @@ func (t *Target) Merge(t2 *Target) {
 		t.Annotations = append(t.Annotations, t2.Annotations...)
 	}
 	if t2.Attest != nil { // merge
-		t.Attest = append(t.Attest, t2.Attest...)
-		t.Attest = removeAttestDupes(t.Attest)
+		t.Attest = t.Attest.Merge(t2.Attest)
 	}
 	if t2.Secrets != nil { // merge
-		t.Secrets = append(t.Secrets, t2.Secrets...)
+		t.Secrets = t.Secrets.Merge(t2.Secrets)
 	}
 	if t2.SSH != nil { // merge
-		t.SSH = append(t.SSH, t2.SSH...)
+		t.SSH = t.SSH.Merge(t2.SSH)
 	}
 	if t2.Platforms != nil { // no merge
 		t.Platforms = t2.Platforms
 	}
 	if t2.CacheFrom != nil { // merge
-		t.CacheFrom = append(t.CacheFrom, t2.CacheFrom...)
+		t.CacheFrom = t.CacheFrom.Merge(t2.CacheFrom)
 	}
 	if t2.CacheTo != nil { // no merge
 		t.CacheTo = t2.CacheTo
@@ -857,6 +861,8 @@ func (t *Target) Merge(t2 *Target) {
 }
 
 func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementConf) error {
+	// IMPORTANT: if you add more fields here, do not forget to update
+	// docs/bake-reference.md and https://docs.docker.com/build/bake/overrides/
 	for key, o := range overrides {
 		value := o.Value
 		keys := strings.SplitN(key, ".", 2)
@@ -892,12 +898,12 @@ func (t *Target) AddOverrides(overrides
 		case "tags":
 			t.Tags = o.ArrValue
 		case "cache-from":
-			t.CacheFrom = o.ArrValue
 			cacheFrom, err := buildflags.ParseCacheEntry(o.ArrValue)
 			if err != nil {
 				return err
 			}
-			for _, c := range cacheFrom {
+			t.CacheFrom = cacheFrom
+			for _, c := range t.CacheFrom {
 				if c.Type == "local" {
 					if v, ok := c.Attrs["src"]; ok {
 						ent.FSRead = append(ent.FSRead, v)
@@ -905,12 +911,12 @@ func (t *Target) AddOverrides(overrides
 				}
 			}
 		case "cache-to":
-			t.CacheTo = o.ArrValue
 			cacheTo, err := buildflags.ParseCacheEntry(o.ArrValue)
 			if err != nil {
 				return err
 			}
-			for _, c := range cacheTo {
+			t.CacheTo = cacheTo
+			for _, c := range t.CacheTo {
 				if c.Type == "local" {
 					if v, ok := c.Attrs["dest"]; ok {
 						ent.FSWrite = append(ent.FSWrite, v)
@@ -922,34 +928,34 @@ func (t *Target) AddOverrides(overrides
 		case "call":
 			t.Call = &value
 		case "secrets":
-			t.Secrets = o.ArrValue
-			secrets, err := buildflags.ParseSecretSpecs(o.ArrValue)
+			secrets, err := parseArrValue[buildflags.Secret](o.ArrValue)
 			if err != nil {
 				return errors.Wrap(err, "invalid value for outputs")
 			}
-			for _, s := range secrets {
+			t.Secrets = secrets
+			for _, s := range t.Secrets {
 				if s.FilePath != "" {
 					ent.FSRead = append(ent.FSRead, s.FilePath)
 				}
 			}
 		case "ssh":
-			t.SSH = o.ArrValue
-			ssh, err := buildflags.ParseSSHSpecs(o.ArrValue)
+			ssh, err := parseArrValue[buildflags.SSH](o.ArrValue)
 			if err != nil {
 				return errors.Wrap(err, "invalid value for outputs")
 			}
-			for _, s := range ssh {
+			t.SSH = ssh
+			for _, s := range t.SSH {
 				ent.FSRead = append(ent.FSRead, s.Paths...)
 			}
 		case "platform":
 			t.Platforms = o.ArrValue
 		case "output":
-			t.Outputs = o.ArrValue
-			outputs, err := buildflags.ParseExports(o.ArrValue)
+			outputs, err := parseArrValue[buildflags.ExportEntry](o.ArrValue)
 			if err != nil {
 				return errors.Wrap(err, "invalid value for outputs")
 			}
-			for _, o := range outputs {
+			t.Outputs = outputs
+			for _, o := range t.Outputs {
 				if o.Destination != "" {
 					ent.FSWrite = append(ent.FSWrite, o.Destination)
 				}
@@ -966,7 +972,11 @@ func (t *Target) AddOverrides(overrides
 		case "annotations":
 			t.Annotations = append(t.Annotations, o.ArrValue...)
 		case "attest":
-			t.Attest = append(t.Attest, o.ArrValue...)
+			attest, err := parseArrValue[buildflags.Attest](o.ArrValue)
+			if err != nil {
+				return errors.Wrap(err, "invalid value for attest")
+			}
+			t.Attest = t.Attest.Merge(attest)
 		case "no-cache":
 			noCache, err := strconv.ParseBool(value)
 			if err != nil {
@@ -1119,7 +1129,9 @@ func (t *Target) GetName(ectx *hcl.EvalC
 func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
 	// make sure local credentials are loaded multiple times for different targets
 	dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
-	authProvider := authprovider.NewDockerAuthProvider(dockerConfig, nil)
+	authProvider := authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
+		ConfigFile: dockerConfig,
+	})
 
 	m2 := make(map[string]build.Options, len(m))
 	for k, v := range m {
@@ -1171,6 +1183,16 @@ func updateContext(t *build.Inputs, inp
 	t.ContextState = &st
 }
 
+func isRemoteContext(t build.Inputs, inp *Input) bool {
+	if build.IsRemoteURL(t.ContextPath) {
+		return true
+	}
+	if inp != nil && build.IsRemoteURL(inp.URL) && !strings.HasPrefix(t.ContextPath, "cwd://") {
+		return true
+	}
+	return false
+}
+
 func collectLocalPaths(t build.Inputs) []string {
 	var out []string
 	if t.ContextState == nil {
@@ -1330,28 +1352,35 @@ func toBuildOpt(t *Target, inp *Input) (
 	}
 	bo.Platforms = platforms
 
-	secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
-	if err != nil {
-		return nil, err
-	}
-	bo.SecretSpecs = secrets
-
-	secretAttachment, err := controllerapi.CreateSecrets(secrets)
+	secrets := t.Secrets
+	if isRemoteContext(bi, inp) {
+		if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_TOKEN"); ok {
+			secrets = append(secrets, &buildflags.Secret{
+				ID:  llb.GitAuthTokenKey,
+				Env: "BUILDX_BAKE_GIT_AUTH_TOKEN",
+			})
+		}
+		if _, ok := os.LookupEnv("BUILDX_BAKE_GIT_AUTH_HEADER"); ok {
+			secrets = append(secrets, &buildflags.Secret{
+				ID:  llb.GitAuthHeaderKey,
+				Env: "BUILDX_BAKE_GIT_AUTH_HEADER",
+			})
+		}
+	}
+	secrets = secrets.Normalize()
+	bo.SecretSpecs = secrets.ToPB()
+	secretAttachment, err := controllerapi.CreateSecrets(bo.SecretSpecs)
 	if err != nil {
 		return nil, err
 	}
 	bo.Session = append(bo.Session, secretAttachment)
 
-	sshSpecs, err := buildflags.ParseSSHSpecs(t.SSH)
-	if err != nil {
-		return nil, err
-	}
-	if len(sshSpecs) == 0 && (buildflags.IsGitSSH(bi.ContextPath) || (inp != nil && buildflags.IsGitSSH(inp.URL))) {
-		sshSpecs = append(sshSpecs, &controllerapi.SSH{ID: "default"})
+	bo.SSHSpecs = t.SSH.ToPB()
+	if len(bo.SSHSpecs) == 0 && buildflags.IsGitSSH(bi.ContextPath) || (inp != nil && buildflags.IsGitSSH(inp.URL)) {
+		bo.SSHSpecs = []*controllerapi.SSH{{ID: "default"}}
 	}
-	bo.SSHSpecs = sshSpecs
 
-	sshAttachment, err := controllerapi.CreateSSH(sshSpecs)
+	sshAttachment, err := controllerapi.CreateSSH(bo.SSHSpecs)
 	if err != nil {
 		return nil, err
 	}
@@ -1367,24 +1396,14 @@ func toBuildOpt(t *Target, inp *Input) (
 		}
 	}
 
-	cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
-	if err != nil {
-		return nil, err
+	if t.CacheFrom != nil {
+		bo.CacheFrom = controllerapi.CreateCaches(t.CacheFrom.ToPB())
 	}
-	bo.CacheFrom = controllerapi.CreateCaches(cacheImports)
-
-	cacheExports, err := buildflags.ParseCacheEntry(t.CacheTo)
-	if err != nil {
-		return nil, err
-	}
-	bo.CacheTo = controllerapi.CreateCaches(cacheExports)
-
-	outputs, err := buildflags.ParseExports(t.Outputs)
-	if err != nil {
-		return nil, err
+	if t.CacheTo != nil {
+		bo.CacheTo = controllerapi.CreateCaches(t.CacheTo.ToPB())
 	}
 
-	bo.Exports, bo.ExportsLocalPathsTemporary, err = controllerapi.CreateExports(outputs)
+	bo.Exports, bo.ExportsLocalPathsTemporary, err = controllerapi.CreateExports(t.Outputs.ToPB())
 	if err != nil {
 		return nil, err
 	}
@@ -1399,11 +1418,7 @@ func toBuildOpt(t *Target, inp *Input) (
 		}
 	}
 
-	attests, err := buildflags.ParseAttests(t.Attest)
-	if err != nil {
-		return nil, err
-	}
-	bo.Attests = controllerapi.CreateAttestations(attests)
+	bo.Attests = controllerapi.CreateAttestations(t.Attest.ToPB())
 
 	bo.SourcePolicy, err = build.ReadSourcePolicy()
 	if err != nil {
@@ -1418,9 +1433,7 @@ func toBuildOpt(t *Target, inp *Input) (
 	}
 	bo.Ulimits = ulimits
 
-	for _, ent := range t.Entitlements {
-		bo.Allow = append(bo.Allow, entitlements.Entitlement(ent))
-	}
+	bo.Allow = append(bo.Allow, t.Entitlements...)
 
 	return bo, nil
 }
@@ -1429,7 +1442,7 @@ func defaultTarget() *Target {
 	return &Target{}
 }
 
-func removeDupes(s []string) []string {
+func removeDupesStr(s []string) []string {
 	i := 0
 	seen := make(map[string]struct{}, len(s))
 	for _, v := range s {
@@ -1446,106 +1459,76 @@ func removeDupes(s []string) []string {
 	return s[:i]
 }
 
-func removeAttestDupes(s []string) []string {
-	res := []string{}
-	m := map[string]int{}
-	for _, v := range s {
-		att, err := buildflags.ParseAttest(v)
-		if err != nil {
-			res = append(res, v)
-			continue
-		}
-
-		if i, ok := m[att.Type]; ok {
-			res[i] = v
-		} else {
-			m[att.Type] = len(res)
-			res = append(res, v)
-		}
-	}
-	return res
-}
-
-func parseOutput(str string) map[string]string {
-	fields, err := csvvalue.Fields(str, nil)
-	if err != nil {
-		return nil
-	}
-	res := map[string]string{}
-	for _, field := range fields {
-		parts := strings.SplitN(field, "=", 2)
-		if len(parts) == 2 {
-			res[parts[0]] = parts[1]
-		}
-	}
-	return res
-}
-
-func parseOutputType(str string) string {
-	if out := parseOutput(str); out != nil {
-		if v, ok := out["type"]; ok {
-			return v
+func setPushOverride(outputs []*buildflags.ExportEntry, push bool) []*buildflags.ExportEntry {
+	if !push {
+		// Disable push for any relevant export types
+		for i := 0; i < len(outputs); {
+			output := outputs[i]
+			switch output.Type {
+			case "registry":
+				// Filter out registry output type
+				outputs[i], outputs[len(outputs)-1] = outputs[len(outputs)-1], outputs[i]
+				outputs = outputs[:len(outputs)-1]
+				continue
+			case "image":
+				// Override push attribute
+				output.Attrs["push"] = "false"
+			}
+			i++
 		}
+		return outputs
 	}
-	return ""
-}
 
-func setPushOverride(outputs []string, push bool) []string {
-	var out []string
+	// Force push to be enabled
 	setPush := true
 	for _, output := range outputs {
-		typ := parseOutputType(output)
-		if typ == "image" || typ == "registry" {
-			// no need to set push if image or registry types already defined
+		if output.Type != "docker" {
+			// If there is an output type that is not docker, don't set "push"
 			setPush = false
-			if typ == "registry" {
-				if !push {
-					// don't set registry output if "push" is false
-					continue
-				}
-				// no need to set "push" attribute to true for registry
-				out = append(out, output)
-				continue
-			}
-			out = append(out, output+",push="+strconv.FormatBool(push))
-		} else {
-			if typ != "docker" {
-				// if there is any output that is not docker, don't set "push"
-				setPush = false
-			}
-			out = append(out, output)
+		}
+
+		// Set push attribute for image
+		if output.Type == "image" {
+			output.Attrs["push"] = "true"
 		}
 	}
-	if push && setPush {
-		out = append(out, "type=image,push=true")
+
+	if setPush {
+		// No existing output that pushes so add one
+		outputs = append(outputs, &buildflags.ExportEntry{
+			Type: "image",
+			Attrs: map[string]string{
+				"push": "true",
+			},
+		})
 	}
-	return out
+	return outputs
 }
 
-func setLoadOverride(outputs []string, load bool) []string {
+func setLoadOverride(outputs []*buildflags.ExportEntry, load bool) []*buildflags.ExportEntry {
 	if !load {
 		return outputs
 	}
-	setLoad := true
+
 	for _, output := range outputs {
-		if typ := parseOutputType(output); typ == "docker" {
-			if v := parseOutput(output); v != nil {
-				// dest set means we want to output as tar so don't set load
-				if _, ok := v["dest"]; !ok {
-					setLoad = false
-					break
-				}
+		switch output.Type {
+		case "docker":
+			// if dest is not set, we can reuse this entry and do not need to set load
+			if output.Destination == "" {
+				return outputs
 			}
-		} else if typ != "image" && typ != "registry" && typ != "oci" {
+		case "image", "registry", "oci":
+			// Ignore
+		default:
 			// if there is any output that is not an image, registry
 			// or oci, don't set "load" similar to push override
-			setLoad = false
-			break
+			return outputs
 		}
 	}
-	if setLoad {
-		outputs = append(outputs, "type=docker")
-	}
+
+	outputs = append(outputs, &buildflags.ExportEntry{
+		Type: "docker",
+	})
 	return outputs
 }
 
@@ -1563,14 +1546,9 @@ func sanitizeTargetName(target string) s
 	return strings.ReplaceAll(target, ".", "_")
 }
 
-func sliceEqual(s1, s2 []string) bool {
-	if len(s1) != len(s2) {
-		return false
-	}
-	sort.Strings(s1)
-	sort.Strings(s2)
-	for i := range s1 {
-		if s1[i] != s2[i] {
+func isSubset(s1, s2 []string) bool {
+	for _, item := range s1 {
+		if !slices.Contains(s2, item) {
 			return false
 		}
 	}
@@ -1584,3 +1562,24 @@ func toNamedContexts(m map[string]string
 	}
 	return m2
 }
+
+type arrValue[B any] interface {
+	encoding.TextUnmarshaler
+	*B
+}
+
+func parseArrValue[T any, PT arrValue[T]](s []string) ([]*T, error) {
+	outputs := make([]*T, 0, len(s))
+	for _, text := range s {
+		if text == "" {
+			continue
+		}
+
+		output := new(T)
+		if err := PT(output).UnmarshalText([]byte(text)); err != nil {
+			return nil, err
+		}
+		outputs = append(outputs, output)
+	}
+	return outputs, nil
+}
diff -pruN 0.19.3+ds1-4/bake/bake_test.go 0.21.3-0ubuntu1/bake/bake_test.go
--- 0.19.3+ds1-4/bake/bake_test.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/bake_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -2,12 +2,14 @@ package bake
 
 import (
 	"context"
+	"fmt"
 	"os"
 	"path/filepath"
 	"sort"
 	"strings"
 	"testing"
 
+	"github.com/docker/buildx/util/buildflags"
 	"github.com/moby/buildkit/util/entitlements"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -228,7 +230,7 @@ func TestPushOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 1, len(m["app"].Outputs))
-		require.Equal(t, "type=image,push=true", m["app"].Outputs[0])
+		require.Equal(t, "type=image,push=true", m["app"].Outputs[0].String())
 	})
 
 	t.Run("type image", func(t *testing.T) {
@@ -242,7 +244,7 @@ func TestPushOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 1, len(m["app"].Outputs))
-		require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
+		require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0].String())
 	})
 
 	t.Run("type image push false", func(t *testing.T) {
@@ -256,7 +258,7 @@ func TestPushOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=false"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 1, len(m["app"].Outputs))
-		require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
+		require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0].String())
 	})
 
 	t.Run("type registry", func(t *testing.T) {
@@ -270,7 +272,7 @@ func TestPushOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 1, len(m["app"].Outputs))
-		require.Equal(t, "type=registry", m["app"].Outputs[0])
+		require.Equal(t, "type=registry", m["app"].Outputs[0].String())
 	})
 
 	t.Run("type registry push false", func(t *testing.T) {
@@ -300,9 +302,9 @@ func TestPushOverride(t *testing.T) {
 		require.NoError(t, err)
 		require.Equal(t, 2, len(m))
 		require.Equal(t, 1, len(m["foo"].Outputs))
-		require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
+		require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
 		require.Equal(t, 1, len(m["bar"].Outputs))
-		require.Equal(t, []string{"type=image,push=true"}, m["bar"].Outputs)
+		require.Equal(t, []string{"type=image,push=true"}, stringify(m["bar"].Outputs))
 	})
 }
 
@@ -317,7 +319,7 @@ func TestLoadOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 1, len(m["app"].Outputs))
-		require.Equal(t, "type=docker", m["app"].Outputs[0])
+		require.Equal(t, "type=docker", m["app"].Outputs[0].String())
 	})
 
 	t.Run("type docker", func(t *testing.T) {
@@ -331,7 +333,7 @@ func TestLoadOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 1, len(m["app"].Outputs))
-		require.Equal(t, []string{"type=docker"}, m["app"].Outputs)
+		require.Equal(t, []string{"type=docker"}, stringify(m["app"].Outputs))
 	})
 
 	t.Run("type image", func(t *testing.T) {
@@ -345,7 +347,7 @@ func TestLoadOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 2, len(m["app"].Outputs))
-		require.Equal(t, []string{"type=image", "type=docker"}, m["app"].Outputs)
+		require.Equal(t, []string{"type=docker", "type=image"}, stringify(m["app"].Outputs))
 	})
 
 	t.Run("type image load false", func(t *testing.T) {
@@ -359,7 +361,7 @@ func TestLoadOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=false"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 1, len(m["app"].Outputs))
-		require.Equal(t, []string{"type=image"}, m["app"].Outputs)
+		require.Equal(t, []string{"type=image"}, stringify(m["app"].Outputs))
 	})
 
 	t.Run("type registry", func(t *testing.T) {
@@ -373,7 +375,7 @@ func TestLoadOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 2, len(m["app"].Outputs))
-		require.Equal(t, []string{"type=registry", "type=docker"}, m["app"].Outputs)
+		require.Equal(t, []string{"type=docker", "type=registry"}, stringify(m["app"].Outputs))
 	})
 
 	t.Run("type oci", func(t *testing.T) {
@@ -387,7 +389,7 @@ func TestLoadOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 2, len(m["app"].Outputs))
-		require.Equal(t, []string{"type=oci,dest=out", "type=docker"}, m["app"].Outputs)
+		require.Equal(t, []string{"type=docker", "type=oci,dest=out"}, stringify(m["app"].Outputs))
 	})
 
 	t.Run("type docker with dest", func(t *testing.T) {
@@ -401,7 +403,7 @@ func TestLoadOverride(t *testing.T) {
 		m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil, &EntitlementConf{})
 		require.NoError(t, err)
 		require.Equal(t, 2, len(m["app"].Outputs))
-		require.Equal(t, []string{"type=docker,dest=out", "type=docker"}, m["app"].Outputs)
+		require.Equal(t, []string{"type=docker", "type=docker,dest=out"}, stringify(m["app"].Outputs))
 	})
 
 	t.Run("type local and empty target", func(t *testing.T) {
@@ -418,9 +420,9 @@ func TestLoadOverride(t *testing.T) {
 		require.NoError(t, err)
 		require.Equal(t, 2, len(m))
 		require.Equal(t, 1, len(m["foo"].Outputs))
-		require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
+		require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
 		require.Equal(t, 1, len(m["bar"].Outputs))
-		require.Equal(t, []string{"type=docker"}, m["bar"].Outputs)
+		require.Equal(t, []string{"type=docker"}, stringify(m["bar"].Outputs))
 	})
 }
 
@@ -440,12 +442,10 @@ func TestLoadAndPushOverride(t *testing.
 		require.Equal(t, 2, len(m))
 
 		require.Equal(t, 1, len(m["foo"].Outputs))
-		sort.Strings(m["foo"].Outputs)
-		require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs)
+		require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs))
 
 		require.Equal(t, 2, len(m["bar"].Outputs))
-		sort.Strings(m["bar"].Outputs)
-		require.Equal(t, []string{"type=docker", "type=image,push=true"}, m["bar"].Outputs)
+		require.Equal(t, []string{"type=docker", "type=image,push=true"}, stringify(m["bar"].Outputs))
 	})
 
 	t.Run("type registry", func(t *testing.T) {
@@ -461,8 +461,7 @@ func TestLoadAndPushOverride(t *testing.
 		require.Equal(t, 1, len(m))
 
 		require.Equal(t, 2, len(m["foo"].Outputs))
-		sort.Strings(m["foo"].Outputs)
-		require.Equal(t, []string{"type=docker", "type=registry"}, m["foo"].Outputs)
+		require.Equal(t, []string{"type=docker", "type=registry"}, stringify(m["foo"].Outputs))
 	})
 }
 
@@ -674,7 +673,7 @@ func TestOverrideMerge(t *testing.T) {
 
 	require.Equal(t, []string{"linux/arm", "linux/ppc64le"}, m["app"].Platforms)
 	require.Equal(t, 1, len(m["app"].Outputs))
-	require.Equal(t, "type=registry", m["app"].Outputs[0])
+	require.Equal(t, "type=registry", m["app"].Outputs[0].String())
 }
 
 func TestReadContexts(t *testing.T) {
@@ -840,7 +839,7 @@ func TestReadContextFromTargetChain(t *t
 	mid, ok := m["mid"]
 	require.True(t, ok)
 	require.Equal(t, 1, len(mid.Outputs))
-	require.Equal(t, "type=cacheonly", mid.Outputs[0])
+	require.Equal(t, "type=cacheonly", mid.Outputs[0].String())
 	require.Equal(t, 1, len(mid.Contexts))
 
 	base, ok := m["base"]
@@ -912,7 +911,28 @@ func TestReadContextFromTargetInvalidPla
 	}
 	_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil, &EntitlementConf{})
 	require.Error(t, err)
-	require.Contains(t, err.Error(), "defined for different platforms")
+	require.Contains(t, err.Error(), "are not a subset of")
+}
+
+func TestReadContextFromTargetSubsetPlatforms(t *testing.T) {
+	ctx := context.TODO()
+	fp := File{
+		Name: "docker-bake.hcl",
+		Data: []byte(`
+		target "mid" {
+			output = ["foo"]
+			platforms = ["linux/amd64", "linux/riscv64", "linux/arm64"]
+		}
+		target "app" {
+			contexts = {
+				bar: "target:mid"
+			}
+			platforms = ["linux/amd64", "linux/arm64"]
+		}
+		`),
+	}
+	_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil, &EntitlementConf{})
+	require.NoError(t, err)
 }
 
 func TestReadTargetsDefault(t *testing.T) {
@@ -924,11 +944,12 @@ func TestReadTargetsDefault(t *testing.T
 		Data: []byte(`
 target "default" {
   dockerfile = "test"
-}`)}
+}`),
+	}
 
 	m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
-	require.Equal(t, 0, len(g))
+	require.Equal(t, 1, len(g))
 	require.Equal(t, 1, len(m))
 	require.Equal(t, "test", *m["default"].Dockerfile)
 }
@@ -942,7 +963,8 @@ func TestReadTargetsSpecified(t *testing
 		Data: []byte(`
 target "image" {
   dockerfile = "test"
-}`)}
+}`),
+	}
 
 	_, _, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil, &EntitlementConf{})
 	require.Error(t, err)
@@ -967,7 +989,8 @@ group "foo" {
 }
 target "image" {
   dockerfile = "test"
-}`)}
+}`),
+	}
 
 	m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -993,7 +1016,8 @@ target "foo" {
 }
 target "image" {
   dockerfile = "test"
-}`)}
+}`),
+	}
 
 	m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -1036,7 +1060,8 @@ target "image-release" {
   inherits = ["image"]
   output = ["type=image,push=true"]
   tags = ["user/app:latest"]
-}`)}
+}`),
+	}
 
 	fyml := File{
 		Name: "docker-compose.yml",
@@ -1060,7 +1085,8 @@ services:
       args:
         CT_ECR: foo
         CT_TAG: bar
-    image: ct-fake-aws:bar`)}
+    image: ct-fake-aws:bar`),
+	}
 
 	fjson := File{
 		Name: "docker-bake.json",
@@ -1081,7 +1107,8 @@ services:
 	     ]
 	   }
 	 }
-	}`)}
+	}`),
+	}
 
 	m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -1089,7 +1116,7 @@ services:
 	require.Equal(t, []string{"image"}, g["default"].Targets)
 	require.Equal(t, 1, len(m))
 	require.Equal(t, 1, len(m["image"].Outputs))
-	require.Equal(t, "type=docker", m["image"].Outputs[0])
+	require.Equal(t, "type=docker", m["image"].Outputs[0].String())
 
 	m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -1097,7 +1124,7 @@ services:
 	require.Equal(t, []string{"image-release"}, g["default"].Targets)
 	require.Equal(t, 1, len(m))
 	require.Equal(t, 1, len(m["image-release"].Outputs))
-	require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
+	require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0].String())
 
 	m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -1106,7 +1133,7 @@ services:
 	require.Equal(t, 2, len(m))
 	require.Equal(t, ".", *m["image"].Context)
 	require.Equal(t, 1, len(m["image-release"].Outputs))
-	require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
+	require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0].String())
 
 	m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -1166,7 +1193,8 @@ target "foo" {
 }
 target "image" {
   output = ["type=docker"]
-}`)}
+}`),
+	}
 
 	m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -1200,7 +1228,8 @@ target "foo" {
 }
 target "image" {
   output = ["type=docker"]
-}`)}
+}`),
+	}
 
 	m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -1209,7 +1238,7 @@ target "image" {
 	require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
 	require.Equal(t, 2, len(m))
 	require.Equal(t, "bar", *m["foo"].Dockerfile)
-	require.Equal(t, "type=docker", m["image"].Outputs[0])
+	require.Equal(t, "type=docker", m["image"].Outputs[0].String())
 
 	m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
@@ -1218,7 +1247,7 @@ target "image" {
 	require.Equal(t, []string{"foo", "image"}, g["foo"].Targets)
 	require.Equal(t, 2, len(m))
 	require.Equal(t, "bar", *m["foo"].Dockerfile)
-	require.Equal(t, "type=docker", m["image"].Outputs[0])
+	require.Equal(t, "type=docker", m["image"].Outputs[0].String())
 }
 
 func TestNestedInherits(t *testing.T) {
@@ -1247,7 +1276,8 @@ target "c" {
 }
 target "d" {
   inherits = ["b", "c"]
-}`)}
+}`),
+	}
 
 	cases := []struct {
 		name      string
@@ -1315,7 +1345,8 @@ group "default" {
     "child1",
     "child2"
   ]
-}`)}
+}`),
+	}
 
 	cases := []struct {
 		name      string
@@ -1351,9 +1382,9 @@ group "default" {
 			require.Equal(t, []string{"child1", "child2"}, g["default"].Targets)
 			require.Equal(t, 2, len(m))
 			require.Equal(t, tt.wantch1, m["child1"].Args)
-			require.Equal(t, []string{"type=docker"}, m["child1"].Outputs)
+			require.Equal(t, []string{"type=docker"}, stringify(m["child1"].Outputs))
 			require.Equal(t, tt.wantch2, m["child2"].Args)
-			require.Equal(t, []string{"type=docker"}, m["child2"].Outputs)
+			require.Equal(t, []string{"type=docker"}, stringify(m["child2"].Outputs))
 		})
 	}
 }
@@ -1442,7 +1473,8 @@ group "e" {
 
 target "f" {
   context = "./foo"
-}`)}
+}`),
+	}
 
 	cases := []struct {
 		names   []string
@@ -1678,7 +1710,7 @@ func TestAttestDuplicates(t *testing.T)
 	ctx := context.TODO()
 
 	m, _, err := ReadTargets(ctx, []File{fp}, []string{"default"}, nil, nil, &EntitlementConf{})
-	require.Equal(t, []string{"type=sbom,foo=bar", "type=provenance,mode=max"}, m["default"].Attest)
+	require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,foo=bar"}, stringify(m["default"].Attest))
 	require.NoError(t, err)
 
 	opts, err := TargetsToBuildOpt(m, &Input{})
@@ -1689,7 +1721,7 @@ func TestAttestDuplicates(t *testing.T)
 	}, opts["default"].Attests)
 
 	m, _, err = ReadTargets(ctx, []File{fp}, []string{"default"}, []string{"*.attest=type=sbom,disabled=true"}, nil, &EntitlementConf{})
-	require.Equal(t, []string{"type=sbom,disabled=true", "type=provenance,mode=max"}, m["default"].Attest)
+	require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true"}, stringify(m["default"].Attest))
 	require.NoError(t, err)
 
 	opts, err = TargetsToBuildOpt(m, &Input{})
@@ -1721,13 +1753,34 @@ func TestAnnotations(t *testing.T) {
 
 	require.Equal(t, 1, len(m))
 	require.Contains(t, m, "app")
-	require.Equal(t, "type=image,name=foo", m["app"].Outputs[0])
+	require.Equal(t, "type=image,name=foo", m["app"].Outputs[0].String())
 	require.Equal(t, "manifest[linux/amd64]:foo=bar", m["app"].Annotations[0])
 
 	require.Len(t, bo["app"].Exports, 1)
 	require.Equal(t, "bar", bo["app"].Exports[0].Attrs["annotation-manifest[linux/amd64].foo"])
 }
 
+func TestRefOnlyCacheOptions(t *testing.T) {
+	fp := File{
+		Name: "docker-bake.hcl",
+		Data: []byte(
+			`target "app" {
+				output = ["type=image,name=foo"]
+        cache-from = ["ref1,ref2"]
+			}`),
+	}
+	ctx := context.TODO()
+	m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
+	require.NoError(t, err)
+
+	require.Len(t, m, 1)
+	require.Contains(t, m, "app")
+	require.Equal(t, buildflags.CacheOptions{
+		{Type: "registry", Attrs: map[string]string{"ref": "ref1"}},
+		{Type: "registry", Attrs: map[string]string{"ref": "ref2"}},
+	}, m["app"].CacheFrom)
+}
+
 func TestHCLEntitlements(t *testing.T) {
 	fp := File{
 		Name: "docker-bake.hcl",
@@ -1753,8 +1806,8 @@ func TestHCLEntitlements(t *testing.T) {
 	require.Equal(t, "network.host", m["app"].Entitlements[1])
 
 	require.Len(t, bo["app"].Allow, 2)
-	require.Equal(t, entitlements.EntitlementSecurityInsecure, bo["app"].Allow[0])
-	require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[1])
+	require.Equal(t, entitlements.EntitlementSecurityInsecure.String(), bo["app"].Allow[0])
+	require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[1])
 }
 
 func TestEntitlementsForNetHostCompose(t *testing.T) {
@@ -1793,7 +1846,7 @@ func TestEntitlementsForNetHostCompose(t
 	require.Equal(t, "host", *m["app"].NetworkMode)
 
 	require.Len(t, bo["app"].Allow, 1)
-	require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
+	require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[0])
 	require.Equal(t, "host", bo["app"].NetworkMode)
 }
 
@@ -1824,7 +1877,7 @@ func TestEntitlementsForNetHost(t *testi
 	require.Equal(t, "host", *m["app"].NetworkMode)
 
 	require.Len(t, bo["app"].Allow, 1)
-	require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
+	require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[0])
 	require.Equal(t, "host", bo["app"].NetworkMode)
 }
 
@@ -2024,9 +2077,10 @@ target "app" {
 	}
 
 	ctx := context.TODO()
-
-	_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
+	m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
 	require.NoError(t, err)
+	require.Contains(t, m, "app")
+	require.Len(t, m["app"].Outputs, 0)
 }
 
 // https://github.com/docker/buildx/issues/2858
@@ -2041,7 +2095,69 @@ target "app" {
 	}
 
 	ctx := context.TODO()
-
-	_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.output="}, nil, &EntitlementConf{})
+	m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.output="}, nil, &EntitlementConf{})
 	require.NoError(t, err)
+	require.Contains(t, m, "app")
+	require.Len(t, m["app"].Outputs, 0)
+}
+
+// https://github.com/docker/buildx/issues/2859
+func TestGroupTargetsWithDefault(t *testing.T) {
+	t.Run("OnTarget", func(t *testing.T) {
+		fp := File{
+			Name: "docker-bake.hcl",
+			Data: []byte(
+				`target "default" {
+					dockerfile = "Dockerfile"
+					platforms = ["linux/amd64"]
+				}
+				target "multiarch" {
+					dockerfile = "Dockerfile"
+					platforms = ["linux/amd64","linux/arm64","linux/arm/v7","linux/arm/v6"]
+				}`),
+		}
+		ctx := context.TODO()
+		_, g, err := ReadTargets(ctx, []File{fp}, []string{"default", "multiarch"}, nil, nil, &EntitlementConf{})
+		require.NoError(t, err)
+
+		require.Equal(t, 1, len(g))
+		require.Equal(t, 2, len(g["default"].Targets))
+		require.Equal(t, []string{"default", "multiarch"}, g["default"].Targets)
+	})
+
+	t.Run("OnGroup", func(t *testing.T) {
+		fp := File{
+			Name: "docker-bake.hcl",
+			Data: []byte(
+				`group "default" {
+					targets = ["app", "multiarch"]
+				}
+				target "app" {
+					dockerfile = "app.Dockerfile"
+				}
+				target "foo" {
+					dockerfile = "foo.Dockerfile"
+				}
+				target "multiarch" {
+					dockerfile = "Dockerfile"
+					platforms = ["linux/amd64","linux/arm64","linux/arm/v7","linux/arm/v6"]
+				}`),
+		}
+		ctx := context.TODO()
+		_, g, err := ReadTargets(ctx, []File{fp}, []string{"default", "foo"}, nil, nil, &EntitlementConf{})
+		require.NoError(t, err)
+
+		require.Equal(t, 1, len(g))
+		require.Equal(t, 3, len(g["default"].Targets))
+		require.Equal(t, []string{"app", "foo", "multiarch"}, g["default"].Targets)
+	})
+}
+
+func stringify[V fmt.Stringer](values []V) []string {
+	s := make([]string, len(values))
+	for i, v := range values {
+		s[i] = v.String()
+	}
+	sort.Strings(s)
+	return s
 }
diff -pruN 0.19.3+ds1-4/bake/compose.go 0.21.3-0ubuntu1/bake/compose.go
--- 0.19.3+ds1-4/bake/compose.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/compose.go	2025-03-17 16:14:25.000000000 +0000
@@ -5,13 +5,14 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
-	"sort"
+	"slices"
 	"strings"
 
 	"github.com/compose-spec/compose-go/v2/consts"
 	"github.com/compose-spec/compose-go/v2/dotenv"
 	"github.com/compose-spec/compose-go/v2/loader"
 	composetypes "github.com/compose-spec/compose-go/v2/types"
+	"github.com/docker/buildx/util/buildflags"
 	dockeropts "github.com/docker/cli/opts"
 	"github.com/docker/go-units"
 	"github.com/pkg/errors"
@@ -119,14 +120,16 @@ func ParseCompose(cfgs []composetypes.Co
 				}
 			}
 
-			var ssh []string
+			var ssh []*buildflags.SSH
 			for _, bkey := range s.Build.SSH {
 				sshkey := composeToBuildkitSSH(bkey)
 				ssh = append(ssh, sshkey)
 			}
-			sort.Strings(ssh)
+			slices.SortFunc(ssh, func(a, b *buildflags.SSH) int {
+				return a.Less(b)
+			})
 
-			var secrets []string
+			var secrets []*buildflags.Secret
 			for _, bs := range s.Build.Secrets {
 				secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
 				if err != nil {
@@ -142,6 +145,16 @@ func ParseCompose(cfgs []composetypes.Co
 				labels[k] = &v
 			}
 
+			cacheFrom, err := buildflags.ParseCacheEntry(s.Build.CacheFrom)
+			if err != nil {
+				return nil, err
+			}
+
+			cacheTo, err := buildflags.ParseCacheEntry(s.Build.CacheTo)
+			if err != nil {
+				return nil, err
+			}
+
 			g.Targets = append(g.Targets, targetName)
 			t := &Target{
 				Name:             targetName,
@@ -158,8 +171,8 @@ func ParseCompose(cfgs []composetypes.Co
 					val, ok := cfg.Environment[val]
 					return val, ok
 				})),
-				CacheFrom:   s.Build.CacheFrom,
-				CacheTo:     s.Build.CacheTo,
+				CacheFrom:   cacheFrom,
+				CacheTo:     cacheTo,
 				NetworkMode: networkModeP,
 				SSH:         ssh,
 				Secrets:     secrets,
@@ -297,8 +310,10 @@ type xbake struct {
 	// https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake
 }
 
-type stringMap map[string]string
-type stringArray []string
+type (
+	stringMap   map[string]string
+	stringArray []string
+)
 
 func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
 	var multi []string
@@ -334,23 +349,45 @@ func (t *Target) composeExtTarget(exts m
 		t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
 	}
 	if len(xb.CacheFrom) > 0 {
-		t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
+		cacheFrom, err := buildflags.ParseCacheEntry(xb.CacheFrom)
+		if err != nil {
+			return err
+		}
+		t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
 	}
 	if len(xb.CacheTo) > 0 {
-		t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
+		cacheTo, err := buildflags.ParseCacheEntry(xb.CacheTo)
+		if err != nil {
+			return err
+		}
+		t.CacheTo = t.CacheTo.Merge(cacheTo)
 	}
 	if len(xb.Secrets) > 0 {
-		t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
+		secrets, err := parseArrValue[buildflags.Secret](xb.Secrets)
+		if err != nil {
+			return err
+		}
+		t.Secrets = t.Secrets.Merge(secrets)
 	}
 	if len(xb.SSH) > 0 {
-		t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
-		sort.Strings(t.SSH)
+		ssh, err := parseArrValue[buildflags.SSH](xb.SSH)
+		if err != nil {
+			return err
+		}
+		t.SSH = t.SSH.Merge(ssh)
+		slices.SortFunc(t.SSH, func(a, b *buildflags.SSH) int {
+			return a.Less(b)
+		})
 	}
 	if len(xb.Platforms) > 0 {
 		t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
 	}
 	if len(xb.Outputs) > 0 {
-		t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
+		outputs, err := parseArrValue[buildflags.ExportEntry](xb.Outputs)
+		if err != nil {
+			return err
+		}
+		t.Outputs = t.Outputs.Merge(outputs)
 	}
 	if xb.Pull != nil {
 		t.Pull = xb.Pull
@@ -370,35 +407,30 @@ func (t *Target) composeExtTarget(exts m
 
 // composeToBuildkitSecret converts secret from compose format to buildkit's
 // csv format.
-func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (string, error) {
+func composeToBuildkitSecret(inp composetypes.ServiceSecretConfig, psecret composetypes.SecretConfig) (*buildflags.Secret, error) {
 	if psecret.External {
-		return "", errors.Errorf("unsupported external secret %s", psecret.Name)
+		return nil, errors.Errorf("unsupported external secret %s", psecret.Name)
 	}
 
-	var bkattrs []string
+	secret := &buildflags.Secret{}
 	if inp.Source != "" {
-		bkattrs = append(bkattrs, "id="+inp.Source)
+		secret.ID = inp.Source
 	}
 	if psecret.File != "" {
-		bkattrs = append(bkattrs, "src="+psecret.File)
+		secret.FilePath = psecret.File
 	}
 	if psecret.Environment != "" {
-		bkattrs = append(bkattrs, "env="+psecret.Environment)
+		secret.Env = psecret.Environment
 	}
-
-	return strings.Join(bkattrs, ","), nil
+	return secret, nil
 }
 
 // composeToBuildkitSSH converts secret from compose format to buildkit's
 // csv format.
-func composeToBuildkitSSH(sshKey composetypes.SSHKey) string {
-	var bkattrs []string
-
-	bkattrs = append(bkattrs, sshKey.ID)
-
+func composeToBuildkitSSH(sshKey composetypes.SSHKey) *buildflags.SSH {
+	bkssh := &buildflags.SSH{ID: sshKey.ID}
 	if sshKey.Path != "" {
-		bkattrs = append(bkattrs, sshKey.Path)
+		bkssh.Paths = []string{sshKey.Path}
 	}
-
-	return strings.Join(bkattrs, "=")
+	return bkssh
 }
diff -pruN 0.19.3+ds1-4/bake/compose_test.go 0.21.3-0ubuntu1/bake/compose_test.go
--- 0.19.3+ds1-4/bake/compose_test.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/compose_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -12,7 +12,7 @@ import (
 )
 
 func TestParseCompose(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   db:
     build: ./db
@@ -33,7 +33,7 @@ services:
       cache_to:
         - type=local,dest=path/to/cache
       ssh:
-        - key=path/to/key
+        - key=/path/to/key
         - default
       secrets:
         - token
@@ -74,14 +74,14 @@ secrets:
 	require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
 	require.Equal(t, 1, len(c.Targets[1].Args))
 	require.Equal(t, ptrstr("123"), c.Targets[1].Args["buildno"])
-	require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
-	require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
+	require.Equal(t, []string{"type=local,src=path/to/cache"}, stringify(c.Targets[1].CacheFrom))
+	require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[1].CacheTo))
 	require.Equal(t, "none", *c.Targets[1].NetworkMode)
-	require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[1].SSH)
+	require.Equal(t, []string{"default", "key=/path/to/key"}, stringify(c.Targets[1].SSH))
 	require.Equal(t, []string{
-		"id=token,env=ENV_TOKEN",
 		"id=aws,src=/root/.aws/credentials",
-	}, c.Targets[1].Secrets)
+		"id=token,env=ENV_TOKEN",
+	}, stringify(c.Targets[1].Secrets))
 
 	require.Equal(t, "webapp2", c.Targets[2].Name)
 	require.Equal(t, "dir", *c.Targets[2].Context)
@@ -89,7 +89,7 @@ secrets:
 }
 
 func TestNoBuildOutOfTreeService(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
     external:
         image: "verycooldb:1337"
@@ -103,7 +103,7 @@ services:
 }
 
 func TestParseComposeTarget(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   db:
     build:
@@ -129,7 +129,7 @@ services:
 }
 
 func TestComposeBuildWithoutContext(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   db:
     build:
@@ -153,7 +153,7 @@ services:
 }
 
 func TestBuildArgEnvCompose(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 version: "3.8"
 services:
   example:
@@ -179,7 +179,7 @@ services:
 }
 
 func TestInconsistentComposeFile(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   webapp:
     entrypoint: echo 1
@@ -190,7 +190,7 @@ services:
 }
 
 func TestAdvancedNetwork(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   db:
     networks:
@@ -215,7 +215,7 @@ networks:
 }
 
 func TestTags(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   example:
     image: example
@@ -233,7 +233,7 @@ services:
 }
 
 func TestDependsOnList(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 version: "3.8"
 
 services:
@@ -269,7 +269,7 @@ networks:
 }
 
 func TestComposeExt(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   addon:
     image: ct-addon:bar
@@ -283,7 +283,7 @@ services:
       tags:
         - ct-addon:baz
       ssh:
-        key: path/to/key
+        key: /path/to/key
       args:
         CT_ECR: foo
         CT_TAG: bar
@@ -336,23 +336,23 @@ services:
 	require.Equal(t, map[string]*string{"CT_ECR": ptrstr("foo"), "CT_TAG": ptrstr("bar")}, c.Targets[0].Args)
 	require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
 	require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
-	require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
-	require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
-	require.Equal(t, []string{"default", "key=path/to/key", "other=path/to/otherkey"}, c.Targets[0].SSH)
+	require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
+	require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
+	require.Equal(t, []string{"default", "key=/path/to/key", "other=path/to/otherkey"}, stringify(c.Targets[0].SSH))
 	require.Equal(t, newBool(true), c.Targets[0].Pull)
 	require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
 	require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
-	require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
-	require.Equal(t, []string{"default"}, c.Targets[1].SSH)
+	require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, stringify(c.Targets[1].Secrets))
+	require.Equal(t, []string{"default"}, stringify(c.Targets[1].SSH))
 	require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
-	require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
+	require.Equal(t, []string{"type=docker"}, stringify(c.Targets[1].Outputs))
 	require.Equal(t, newBool(true), c.Targets[1].NoCache)
 	require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize)
 	require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits)
 }
 
 func TestComposeExtDedup(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   webapp:
     image: app:bar
@@ -383,9 +383,9 @@ services:
 	require.NoError(t, err)
 	require.Equal(t, 1, len(c.Targets))
 	require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
-	require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
-	require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
-	require.Equal(t, []string{"default", "key=path/to/key"}, c.Targets[0].SSH)
+	require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
+	require.Equal(t, []string{"type=local,dest=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheTo))
+	require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
 }
 
 func TestEnv(t *testing.T) {
@@ -396,7 +396,7 @@ func TestEnv(t *testing.T) {
 	_, err = envf.WriteString("FOO=bsdf -csdf\n")
 	require.NoError(t, err)
 
-	var dt = []byte(`
+	dt := []byte(`
 services:
   scratch:
     build:
@@ -424,7 +424,7 @@ func TestDotEnv(t *testing.T) {
 	err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
 	require.NoError(t, err)
 
-	var dt = []byte(`
+	dt := []byte(`
 services:
   scratch:
     build:
@@ -443,7 +443,7 @@ services:
 }
 
 func TestPorts(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   foo:
     build:
@@ -664,7 +664,7 @@ target "default" {
 }
 
 func TestComposeNullArgs(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   scratch:
     build:
@@ -680,7 +680,7 @@ services:
 }
 
 func TestDependsOn(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   foo:
     build:
@@ -711,7 +711,7 @@ services:
 `), 0644)
 	require.NoError(t, err)
 
-	var dt = []byte(`
+	dt := []byte(`
 include:
   - compose-foo.yml
 
@@ -740,7 +740,7 @@ services:
 }
 
 func TestDevelop(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   scratch:
     build:
@@ -759,7 +759,7 @@ services:
 }
 
 func TestCgroup(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   scratch:
     build:
@@ -772,7 +772,7 @@ services:
 }
 
 func TestProjectName(t *testing.T) {
-	var dt = []byte(`
+	dt := []byte(`
 services:
   scratch:
     build:
diff -pruN 0.19.3+ds1-4/bake/entitlements.go 0.21.3-0ubuntu1/bake/entitlements.go
--- 0.19.3+ds1-4/bake/entitlements.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/entitlements.go	2025-03-17 16:14:25.000000000 +0000
@@ -20,6 +20,7 @@ import (
 	"github.com/moby/buildkit/util/entitlements"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
+	"github.com/tonistiigi/go-csvvalue"
 )
 
 type EntitlementKey string
@@ -27,6 +28,7 @@ type EntitlementKey string
 const (
 	EntitlementKeyNetworkHost      EntitlementKey = "network.host"
 	EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
+	EntitlementKeyDevice           EntitlementKey = "device"
 	EntitlementKeyFSRead           EntitlementKey = "fs.read"
 	EntitlementKeyFSWrite          EntitlementKey = "fs.write"
 	EntitlementKeyFS               EntitlementKey = "fs"
@@ -39,6 +41,7 @@ const (
 type EntitlementConf struct {
 	NetworkHost      bool
 	SecurityInsecure bool
+	Devices          *EntitlementsDevicesConf
 	FSRead           []string
 	FSWrite          []string
 	ImagePush        []string
@@ -46,6 +49,11 @@ type EntitlementConf struct {
 	SSH              bool
 }
 
+type EntitlementsDevicesConf struct {
+	All     bool
+	Devices map[string]struct{}
+}
+
 func ParseEntitlements(in []string) (EntitlementConf, error) {
 	var conf EntitlementConf
 	for _, e := range in {
@@ -59,6 +67,22 @@ func ParseEntitlements(in []string) (Ent
 		default:
 			k, v, _ := strings.Cut(e, "=")
 			switch k {
+			case string(EntitlementKeyDevice):
+				if v == "" {
+					conf.Devices = &EntitlementsDevicesConf{All: true}
+					continue
+				}
+				fields, err := csvvalue.Fields(v, nil)
+				if err != nil {
+					return EntitlementConf{}, errors.Wrapf(err, "failed to parse device entitlement %q", v)
+				}
+				if conf.Devices == nil {
+					conf.Devices = &EntitlementsDevicesConf{}
+				}
+				if conf.Devices.Devices == nil {
+					conf.Devices.Devices = make(map[string]struct{}, 0)
+				}
+				conf.Devices.Devices[fields[0]] = struct{}{}
 			case string(EntitlementKeyFSRead):
 				conf.FSRead = append(conf.FSRead, v)
 			case string(EntitlementKeyFSWrite):
@@ -95,12 +119,34 @@ func (c EntitlementConf) Validate(m map[
 
 func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
 	for _, e := range bo.Allow {
+		k, rest, _ := strings.Cut(e, "=")
+		switch k {
+		case entitlements.EntitlementDevice.String():
+			if rest == "" {
+				if c.Devices == nil || !c.Devices.All {
+					expected.Devices = &EntitlementsDevicesConf{All: true}
+				}
+				continue
+			}
+			fields, err := csvvalue.Fields(rest, nil)
+			if err != nil {
+				return errors.Wrapf(err, "failed to parse device entitlement %q", rest)
+			}
+			if expected.Devices == nil {
+				expected.Devices = &EntitlementsDevicesConf{}
+			}
+			if expected.Devices.Devices == nil {
+				expected.Devices.Devices = make(map[string]struct{}, 0)
+			}
+			expected.Devices.Devices[fields[0]] = struct{}{}
+		}
+
 		switch e {
-		case entitlements.EntitlementNetworkHost:
+		case entitlements.EntitlementNetworkHost.String():
 			if !c.NetworkHost {
 				expected.NetworkHost = true
 			}
-		case entitlements.EntitlementSecurityInsecure:
+		case entitlements.EntitlementSecurityInsecure.String():
 			if !c.SecurityInsecure {
 				expected.SecurityInsecure = true
 			}
@@ -145,7 +191,9 @@ func (c EntitlementConf) check(bo build.
 			roPaths[p] = struct{}{}
 		}
 		if len(ssh.Paths) == 0 {
-			expected.SSH = true
+			if !c.SSH {
+				expected.SSH = true
+			}
 		}
 	}
 
@@ -185,6 +233,18 @@ func (c EntitlementConf) Prompt(ctx cont
 		flags = append(flags, string(EntitlementKeySecurityInsecure))
 	}
 
+	if c.Devices != nil {
+		if c.Devices.All {
+			msgs = append(msgs, " - Access to CDI devices")
+			flags = append(flags, string(EntitlementKeyDevice))
+		} else {
+			for d := range c.Devices.Devices {
+				msgs = append(msgs, fmt.Sprintf(" - Access to device %s", d))
+				flags = append(flags, string(EntitlementKeyDevice)+"="+d)
+			}
+		}
+	}
+
 	if c.SSH {
 		msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
 		flagsFS = append(flagsFS, string(EntitlementKeySSH))
@@ -257,7 +317,7 @@ func (c EntitlementConf) Prompt(ctx cont
 		fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(slices.Concat(flags, flagsFS), " "), strings.Join(args[idx+1:], " "))
 	}
 
-	fsEntitlementsEnabled := false
+	fsEntitlementsEnabled := true
 	if isRemote {
 		if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
 			vv, err := strconv.ParseBool(v)
@@ -265,8 +325,6 @@ func (c EntitlementConf) Prompt(ctx cont
 				return errors.Wrapf(err, "failed to parse BAKE_ALLOW_REMOTE_FS_ACCESS value %q", v)
 			}
 			fsEntitlementsEnabled = !vv
-		} else {
-			fsEntitlementsEnabled = true
 		}
 	}
 	v, fsEntitlementsSet := os.LookupEnv("BUILDX_BAKE_ENTITLEMENTS_FS")
@@ -279,11 +337,11 @@ func (c EntitlementConf) Prompt(ctx cont
 	}
 
 	if !fsEntitlementsEnabled && len(msgs) == 0 {
-		if !fsEntitlementsSet {
-			fmt.Fprintf(out, "This warning will become an error in a future release. To enable filesystem entitlements checks at the moment, set BUILDX_BAKE_ENTITLEMENTS_FS=1 .\n\n")
-		}
 		return nil
 	}
+	if fsEntitlementsEnabled && !fsEntitlementsSet && len(msgsFS) != 0 {
+		fmt.Fprintf(out, "To disable filesystem entitlements checks, you can set BUILDX_BAKE_ENTITLEMENTS_FS=0 .\n\n")
+	}
 
 	if term {
 		fmt.Fprintf(out, "Do you want to grant requested privileges and continue? [y/N] ")
diff -pruN 0.19.3+ds1-4/bake/entitlements_test.go 0.21.3-0ubuntu1/bake/entitlements_test.go
--- 0.19.3+ds1-4/bake/entitlements_test.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/entitlements_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -208,8 +208,8 @@ func TestValidateEntitlements(t *testing
 		{
 			name: "NetworkHostMissing",
 			opt: build.Options{
-				Allow: []entitlements.Entitlement{
-					entitlements.EntitlementNetworkHost,
+				Allow: []string{
+					entitlements.EntitlementNetworkHost.String(),
 				},
 			},
 			expected: EntitlementConf{
@@ -223,8 +223,8 @@ func TestValidateEntitlements(t *testing
 				NetworkHost: true,
 			},
 			opt: build.Options{
-				Allow: []entitlements.Entitlement{
-					entitlements.EntitlementNetworkHost,
+				Allow: []string{
+					entitlements.EntitlementNetworkHost.String(),
 				},
 			},
 			expected: EntitlementConf{
@@ -234,9 +234,9 @@ func TestValidateEntitlements(t *testing
 		{
 			name: "SecurityAndNetworkHostMissing",
 			opt: build.Options{
-				Allow: []entitlements.Entitlement{
-					entitlements.EntitlementNetworkHost,
-					entitlements.EntitlementSecurityInsecure,
+				Allow: []string{
+					entitlements.EntitlementNetworkHost.String(),
+					entitlements.EntitlementSecurityInsecure.String(),
 				},
 			},
 			expected: EntitlementConf{
@@ -251,9 +251,9 @@ func TestValidateEntitlements(t *testing
 				NetworkHost: true,
 			},
 			opt: build.Options{
-				Allow: []entitlements.Entitlement{
-					entitlements.EntitlementNetworkHost,
-					entitlements.EntitlementSecurityInsecure,
+				Allow: []string{
+					entitlements.EntitlementNetworkHost.String(),
+					entitlements.EntitlementSecurityInsecure.String(),
 				},
 			},
 			expected: EntitlementConf{
diff -pruN 0.19.3+ds1-4/bake/hcl_test.go 0.21.3-0ubuntu1/bake/hcl_test.go
--- 0.19.3+ds1-4/bake/hcl_test.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hcl_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -2,8 +2,10 @@ package bake
 
 import (
 	"reflect"
+	"regexp"
 	"testing"
 
+	hcl "github.com/hashicorp/hcl/v2"
 	"github.com/stretchr/testify/require"
 )
 
@@ -17,6 +19,7 @@ func TestHCLBasic(t *testing.T) {
 		target "db" {
 			context = "./db"
 			tags = ["docker.io/tonistiigi/db"]
+			output = ["type=image"]
 		}
 
 		target "webapp" {
@@ -25,6 +28,9 @@ func TestHCLBasic(t *testing.T) {
 			args = {
 				buildno = "123"
 			}
+			output = [
+				{ type = "image" }
+			]
 		}
 
 		target "cross" {
@@ -597,6 +603,167 @@ func TestHCLAttrsCustomType(t *testing.T
 	require.Equal(t, ptrstr("linux/arm64"), c.Targets[0].Args["v1"])
 }
 
+func TestHCLAttrsCapsuleType(t *testing.T) {
+	dt := []byte(`
+	target "app" {
+		attest = [
+			{ type = "provenance", mode = "max" },
+			"type=sbom,disabled=true,generator=foo,\"ENV1=bar,baz\",ENV2=hello",
+		]
+
+		cache-from = [
+			{ type = "registry", ref = "user/app:cache" },
+			"type=local,src=path/to/cache",
+		]
+
+		cache-to = [
+			{ type = "local", dest = "path/to/cache" },
+		]
+
+		output = [
+			{ type = "oci", dest = "../out.tar" },
+			"type=local,dest=../out",
+		]
+
+		secret = [
+			{ id = "mysecret", src = "/local/secret" },
+			{ id = "mysecret2", env = "TOKEN" },
+		]
+
+		ssh = [
+			{ id = "default" },
+			{ id = "key", paths = ["path/to/key"] },
+		]
+	}
+	`)
+
+	c, err := ParseFile(dt, "docker-bake.hcl")
+	require.NoError(t, err)
+
+	require.Equal(t, 1, len(c.Targets))
+	require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true,\"ENV1=bar,baz\",ENV2=hello,generator=foo"}, stringify(c.Targets[0].Attest))
+	require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
+	require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
+	require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[0].CacheTo))
+	require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,env=TOKEN"}, stringify(c.Targets[0].Secrets))
+	require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
+}
+
+func TestHCLAttrsCapsuleType_ObjectVars(t *testing.T) {
+	dt := []byte(`
+	variable "foo" {
+		default = "bar"
+	}
+
+	target "app" {
+		cache-from = [
+			{ type = "registry", ref = "user/app:cache" },
+			"type=local,src=path/to/cache",
+		]
+
+		cache-to = [ target.app.cache-from[0] ]
+
+		output = [
+			{ type = "oci", dest = "../out.tar" },
+			"type=local,dest=../out",
+		]
+
+		secret = [
+			{ id = "mysecret", src = "/local/secret" },
+		]
+
+		ssh = [
+			{ id = "default" },
+			{ id = "key", paths = ["path/to/${target.app.output[0].type}"] },
+		]
+	}
+
+	target "web" {
+		cache-from = target.app.cache-from
+
+		output = [ "type=oci,dest=../${foo}.tar" ]
+
+		secret = [
+			{ id = target.app.output[0].type, src = "/${target.app.cache-from[1].type}/secret" },
+		]
+	}
+	`)
+
+	c, err := ParseFile(dt, "docker-bake.hcl")
+	require.NoError(t, err)
+
+	require.Equal(t, 2, len(c.Targets))
+
+	findTarget := func(t *testing.T, name string) *Target {
+		t.Helper()
+		for _, tgt := range c.Targets {
+			if tgt.Name == name {
+				return tgt
+			}
+		}
+		t.Fatalf("could not find target %q", name)
+		return nil
+	}
+
+	app := findTarget(t, "app")
+	require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(app.Outputs))
+	require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(app.CacheFrom))
+	require.Equal(t, []string{"user/app:cache"}, stringify(app.CacheTo))
+	require.Equal(t, []string{"id=mysecret,src=/local/secret"}, stringify(app.Secrets))
+	require.Equal(t, []string{"default", "key=path/to/oci"}, stringify(app.SSH))
+
+	web := findTarget(t, "web")
+	require.Equal(t, []string{"type=oci,dest=../bar.tar"}, stringify(web.Outputs))
+	require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(web.CacheFrom))
+	require.Equal(t, []string{"id=oci,src=/local/secret"}, stringify(web.Secrets))
+}
+
+func TestHCLAttrsCapsuleType_MissingVars(t *testing.T) {
+	dt := []byte(`
+	target "app" {
+		attest = [
+			"type=sbom,disabled=${SBOM}",
+		]
+
+		cache-from = [
+			{ type = "registry", ref = "user/app:${FOO1}" },
+      "type=local,src=path/to/cache:${FOO2}",
+		]
+
+		cache-to = [
+			{ type = "local", dest = "path/to/${BAR}" },
+		]
+
+		output = [
+			{ type = "oci", dest = "../${OUTPUT}.tar" },
+		]
+
+		secret = [
+			{ id = "mysecret", src = "/local/${SECRET}" },
+		]
+
+		ssh = [
+			{ id = "key", paths = ["path/to/${SSH_KEY}"] },
+		]
+	}
+	`)
+
+	var diags hcl.Diagnostics
+	_, err := ParseFile(dt, "docker-bake.hcl")
+	require.ErrorAs(t, err, &diags)
+
+	re := regexp.MustCompile(`There is no variable named "([\w\d_]+)"`)
+	var actual []string
+	for _, diag := range diags {
+		if m := re.FindStringSubmatch(diag.Error()); m != nil {
+			actual = append(actual, m[1])
+		}
+	}
+	require.ElementsMatch(t,
+		[]string{"SBOM", "FOO1", "FOO2", "BAR", "OUTPUT", "SECRET", "SSH_KEY"},
+		actual)
+}
+
 func TestHCLMultiFileAttrs(t *testing.T) {
 	dt := []byte(`
 		variable "FOO" {
diff -pruN 0.19.3+ds1-4/bake/hclparser/LICENSE 0.21.3-0ubuntu1/bake/hclparser/LICENSE
--- 0.19.3+ds1-4/bake/hclparser/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,355 @@
+Copyright (c) 2014 HashiCorp, Inc.
+
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
diff -pruN 0.19.3+ds1-4/bake/hclparser/gohcl/decode.go 0.21.3-0ubuntu1/bake/hclparser/gohcl/decode.go
--- 0.19.3+ds1-4/bake/hclparser/gohcl/decode.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/gohcl/decode.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,348 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package gohcl
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/hashicorp/hcl/v2"
+	"github.com/zclconf/go-cty/cty"
+	"github.com/zclconf/go-cty/cty/convert"
+	"github.com/zclconf/go-cty/cty/gocty"
+)
+
+// DecodeOptions allows customizing sections of the decoding process.
+type DecodeOptions struct {
+	ImpliedType func(gv interface{}) (cty.Type, error)
+	Convert     func(in cty.Value, want cty.Type) (cty.Value, error)
+}
+
+func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
+	o = o.withDefaults()
+
+	rv := reflect.ValueOf(val)
+	if rv.Kind() != reflect.Ptr {
+		panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
+	}
+
+	return o.decodeBodyToValue(body, ctx, rv.Elem())
+}
+
+// DecodeBody extracts the configuration within the given body into the given
+// value. This value must be a non-nil pointer to either a struct or
+// a map, where in the former case the configuration will be decoded using
+// struct tags and in the latter case only attributes are allowed and their
+// values are decoded into the map.
+//
+// The given EvalContext is used to resolve any variables or functions in
+// expressions encountered while decoding. This may be nil to require only
+// constant values, for simple applications that do not support variables or
+// functions.
+//
+// The returned diagnostics should be inspected with its HasErrors method to
+// determine if the populated value is valid and complete. If error diagnostics
+// are returned then the given value may have been partially-populated but
+// may still be accessed by a careful caller for static analysis and editor
+// integration use-cases.
+func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
+	return DecodeOptions{}.DecodeBody(body, ctx, val)
+}
+
+func (o DecodeOptions) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
+	et := val.Type()
+	switch et.Kind() {
+	case reflect.Struct:
+		return o.decodeBodyToStruct(body, ctx, val)
+	case reflect.Map:
+		return o.decodeBodyToMap(body, ctx, val)
+	default:
+		panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
+	}
+}
+
+func (o DecodeOptions) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
+	schema, partial := ImpliedBodySchema(val.Interface())
+
+	var content *hcl.BodyContent
+	var leftovers hcl.Body
+	var diags hcl.Diagnostics
+	if partial {
+		content, leftovers, diags = body.PartialContent(schema)
+	} else {
+		content, diags = body.Content(schema)
+	}
+	if content == nil {
+		return diags
+	}
+
+	tags := getFieldTags(val.Type())
+
+	if tags.Body != nil {
+		fieldIdx := *tags.Body
+		field := val.Type().Field(fieldIdx)
+		fieldV := val.Field(fieldIdx)
+		switch {
+		case bodyType.AssignableTo(field.Type):
+			fieldV.Set(reflect.ValueOf(body))
+
+		default:
+			diags = append(diags, o.decodeBodyToValue(body, ctx, fieldV)...)
+		}
+	}
+
+	if tags.Remain != nil {
+		fieldIdx := *tags.Remain
+		field := val.Type().Field(fieldIdx)
+		fieldV := val.Field(fieldIdx)
+		switch {
+		case bodyType.AssignableTo(field.Type):
+			fieldV.Set(reflect.ValueOf(leftovers))
+		case attrsType.AssignableTo(field.Type):
+			attrs, attrsDiags := leftovers.JustAttributes()
+			if len(attrsDiags) > 0 {
+				diags = append(diags, attrsDiags...)
+			}
+			fieldV.Set(reflect.ValueOf(attrs))
+		default:
+			diags = append(diags, o.decodeBodyToValue(leftovers, ctx, fieldV)...)
+		}
+	}
+
+	for name, fieldIdx := range tags.Attributes {
+		attr := content.Attributes[name]
+		field := val.Type().Field(fieldIdx)
+		fieldV := val.Field(fieldIdx)
+
+		if attr == nil {
+			if !exprType.AssignableTo(field.Type) {
+				continue
+			}
+
+			// As a special case, if the target is of type hcl.Expression then
+			// we'll assign an actual expression that evalues to a cty null,
+			// so the caller can deal with it within the cty realm rather
+			// than within the Go realm.
+			synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
+			fieldV.Set(reflect.ValueOf(synthExpr))
+			continue
+		}
+
+		switch {
+		case attrType.AssignableTo(field.Type):
+			fieldV.Set(reflect.ValueOf(attr))
+		case exprType.AssignableTo(field.Type):
+			fieldV.Set(reflect.ValueOf(attr.Expr))
+		default:
+			diags = append(diags, o.DecodeExpression(
+				attr.Expr, ctx, fieldV.Addr().Interface(),
+			)...)
+		}
+	}
+
+	blocksByType := content.Blocks.ByType()
+
+	for typeName, fieldIdx := range tags.Blocks {
+		blocks := blocksByType[typeName]
+		field := val.Type().Field(fieldIdx)
+
+		ty := field.Type
+		isSlice := false
+		isPtr := false
+		if ty.Kind() == reflect.Slice {
+			isSlice = true
+			ty = ty.Elem()
+		}
+		if ty.Kind() == reflect.Ptr {
+			isPtr = true
+			ty = ty.Elem()
+		}
+
+		if len(blocks) > 1 && !isSlice {
+			diags = append(diags, &hcl.Diagnostic{
+				Severity: hcl.DiagError,
+				Summary:  fmt.Sprintf("Duplicate %s block", typeName),
+				Detail: fmt.Sprintf(
+					"Only one %s block is allowed. Another was defined at %s.",
+					typeName, blocks[0].DefRange.String(),
+				),
+				Subject: &blocks[1].DefRange,
+			})
+			continue
+		}
+
+		if len(blocks) == 0 {
+			if isSlice || isPtr {
+				if val.Field(fieldIdx).IsNil() {
+					val.Field(fieldIdx).Set(reflect.Zero(field.Type))
+				}
+			} else {
+				diags = append(diags, &hcl.Diagnostic{
+					Severity: hcl.DiagError,
+					Summary:  fmt.Sprintf("Missing %s block", typeName),
+					Detail:   fmt.Sprintf("A %s block is required.", typeName),
+					Subject:  body.MissingItemRange().Ptr(),
+				})
+			}
+			continue
+		}
+
+		switch {
+		case isSlice:
+			elemType := ty
+			if isPtr {
+				elemType = reflect.PointerTo(ty)
+			}
+			sli := val.Field(fieldIdx)
+			if sli.IsNil() {
+				sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
+			}
+
+			for i, block := range blocks {
+				if isPtr {
+					if i >= sli.Len() {
+						sli = reflect.Append(sli, reflect.New(ty))
+					}
+					v := sli.Index(i)
+					if v.IsNil() {
+						v = reflect.New(ty)
+					}
+					diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
+					sli.Index(i).Set(v)
+				} else {
+					if i >= sli.Len() {
+						sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty)))
+					}
+					diags = append(diags, o.decodeBlockToValue(block, ctx, sli.Index(i))...)
+				}
+			}
+
+			if sli.Len() > len(blocks) {
+				sli.SetLen(len(blocks))
+			}
+
+			val.Field(fieldIdx).Set(sli)
+
+		default:
+			block := blocks[0]
+			if isPtr {
+				v := val.Field(fieldIdx)
+				if v.IsNil() {
+					v = reflect.New(ty)
+				}
+				diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...)
+				val.Field(fieldIdx).Set(v)
+			} else {
+				diags = append(diags, o.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
+			}
+		}
+	}
+
+	return diags
+}
+
+func (o DecodeOptions) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
+	attrs, diags := body.JustAttributes()
+	if attrs == nil {
+		return diags
+	}
+
+	mv := reflect.MakeMap(v.Type())
+
+	for k, attr := range attrs {
+		switch {
+		case attrType.AssignableTo(v.Type().Elem()):
+			mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
+		case exprType.AssignableTo(v.Type().Elem()):
+			mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
+		default:
+			ev := reflect.New(v.Type().Elem())
+			diags = append(diags, o.DecodeExpression(attr.Expr, ctx, ev.Interface())...)
+			mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
+		}
+	}
+
+	v.Set(mv)
+
+	return diags
+}
+
+func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
+	diags := o.decodeBodyToValue(block.Body, ctx, v)
+
+	if len(block.Labels) > 0 {
+		blockTags := getFieldTags(v.Type())
+		for li, lv := range block.Labels {
+			lfieldIdx := blockTags.Labels[li].FieldIndex
+			v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
+		}
+	}
+
+	return diags
+}
+
+func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
+	o = o.withDefaults()
+
+	srcVal, diags := expr.Value(ctx)
+
+	convTy, err := o.ImpliedType(val)
+	if err != nil {
+		panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
+	}
+
+	srcVal, err = o.Convert(srcVal, convTy)
+	if err != nil {
+		diags = append(diags, &hcl.Diagnostic{
+			Severity: hcl.DiagError,
+			Summary:  "Unsuitable value type",
+			Detail:   fmt.Sprintf("Unsuitable value: %s", err.Error()),
+			Subject:  expr.StartRange().Ptr(),
+			Context:  expr.Range().Ptr(),
+		})
+		return diags
+	}
+
+	err = gocty.FromCtyValue(srcVal, val)
+	if err != nil {
+		diags = append(diags, &hcl.Diagnostic{
+			Severity: hcl.DiagError,
+			Summary:  "Unsuitable value type",
+			Detail:   fmt.Sprintf("Unsuitable value: %s", err.Error()),
+			Subject:  expr.StartRange().Ptr(),
+			Context:  expr.Range().Ptr(),
+		})
+	}
+
+	return diags
+}
+
+// DecodeExpression extracts the value of the given expression into the given
+// value. This value must be something that gocty is able to decode into,
+// since the final decoding is delegated to that package.
+//
+// The given EvalContext is used to resolve any variables or functions in
+// expressions encountered while decoding. This may be nil to require only
+// constant values, for simple applications that do not support variables or
+// functions.
+//
+// The returned diagnostics should be inspected with its HasErrors method to
+// determine if the populated value is valid and complete. If error diagnostics
+// are returned then the given value may have been partially-populated but
+// may still be accessed by a careful caller for static analysis and editor
+// integration use-cases.
+func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
+	return DecodeOptions{}.DecodeExpression(expr, ctx, val)
+}
+
+func (o DecodeOptions) withDefaults() DecodeOptions {
+	if o.ImpliedType == nil {
+		o.ImpliedType = gocty.ImpliedType
+	}
+
+	if o.Convert == nil {
+		o.Convert = convert.Convert
+	}
+	return o
+}
diff -pruN 0.19.3+ds1-4/bake/hclparser/gohcl/decode_test.go 0.21.3-0ubuntu1/bake/hclparser/gohcl/decode_test.go
--- 0.19.3+ds1-4/bake/hclparser/gohcl/decode_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/gohcl/decode_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,806 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package gohcl
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"testing"
+
+	"github.com/davecgh/go-spew/spew"
+	"github.com/hashicorp/hcl/v2"
+	hclJSON "github.com/hashicorp/hcl/v2/json"
+	"github.com/zclconf/go-cty/cty"
+)
+
+func TestDecodeBody(t *testing.T) {
+	deepEquals := func(other interface{}) func(v interface{}) bool {
+		return func(v interface{}) bool {
+			return reflect.DeepEqual(v, other)
+		}
+	}
+
+	type withNameExpression struct {
+		Name hcl.Expression `hcl:"name"`
+	}
+
+	type withTwoAttributes struct {
+		A string `hcl:"a,optional"`
+		B string `hcl:"b,optional"`
+	}
+
+	type withNestedBlock struct {
+		Plain  string             `hcl:"plain,optional"`
+		Nested *withTwoAttributes `hcl:"nested,block"`
+	}
+
+	type withListofNestedBlocks struct {
+		Nested []*withTwoAttributes `hcl:"nested,block"`
+	}
+
+	type withListofNestedBlocksNoPointers struct {
+		Nested []withTwoAttributes `hcl:"nested,block"`
+	}
+
+	tests := []struct {
+		Body      map[string]interface{}
+		Target    func() interface{}
+		Check     func(v interface{}) bool
+		DiagCount int
+	}{
+		{
+			map[string]interface{}{},
+			makeInstantiateType(struct{}{}),
+			deepEquals(struct{}{}),
+			0,
+		},
+		{
+			map[string]interface{}{},
+			makeInstantiateType(struct {
+				Name string `hcl:"name"`
+			}{}),
+			deepEquals(struct {
+				Name string `hcl:"name"`
+			}{}),
+			1, // name is required
+		},
+		{
+			map[string]interface{}{},
+			makeInstantiateType(struct {
+				Name *string `hcl:"name"`
+			}{}),
+			deepEquals(struct {
+				Name *string `hcl:"name"`
+			}{}),
+			0,
+		}, // name nil
+		{
+			map[string]interface{}{},
+			makeInstantiateType(struct {
+				Name string `hcl:"name,optional"`
+			}{}),
+			deepEquals(struct {
+				Name string `hcl:"name,optional"`
+			}{}),
+			0,
+		}, // name optional
+		{
+			map[string]interface{}{},
+			makeInstantiateType(withNameExpression{}),
+			func(v interface{}) bool {
+				if v == nil {
+					return false
+				}
+
+				wne, valid := v.(withNameExpression)
+				if !valid {
+					return false
+				}
+
+				if wne.Name == nil {
+					return false
+				}
+
+				nameVal, _ := wne.Name.Value(nil)
+				return nameVal.IsNull()
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+			},
+			makeInstantiateType(withNameExpression{}),
+			func(v interface{}) bool {
+				if v == nil {
+					return false
+				}
+
+				wne, valid := v.(withNameExpression)
+				if !valid {
+					return false
+				}
+
+				if wne.Name == nil {
+					return false
+				}
+
+				nameVal, _ := wne.Name.Value(nil)
+				return nameVal.Equals(cty.StringVal("Ermintrude")).True()
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+			},
+			makeInstantiateType(struct {
+				Name string `hcl:"name"`
+			}{}),
+			deepEquals(struct {
+				Name string `hcl:"name"`
+			}{"Ermintrude"}),
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+				"age":  23,
+			},
+			makeInstantiateType(struct {
+				Name string `hcl:"name"`
+			}{}),
+			deepEquals(struct {
+				Name string `hcl:"name"`
+			}{"Ermintrude"}),
+			1, // Extraneous "age" property
+		},
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+				"age":  50,
+			},
+			makeInstantiateType(struct {
+				Name  string         `hcl:"name"`
+				Attrs hcl.Attributes `hcl:",remain"`
+			}{}),
+			func(gotI interface{}) bool {
+				got := gotI.(struct {
+					Name  string         `hcl:"name"`
+					Attrs hcl.Attributes `hcl:",remain"`
+				})
+				return got.Name == "Ermintrude" && len(got.Attrs) == 1 && got.Attrs["age"] != nil
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+				"age":  50,
+			},
+			makeInstantiateType(struct {
+				Name   string   `hcl:"name"`
+				Remain hcl.Body `hcl:",remain"`
+			}{}),
+			func(gotI interface{}) bool {
+				got := gotI.(struct {
+					Name   string   `hcl:"name"`
+					Remain hcl.Body `hcl:",remain"`
+				})
+
+				attrs, _ := got.Remain.JustAttributes()
+
+				return got.Name == "Ermintrude" && len(attrs) == 1 && attrs["age"] != nil
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name":   "Ermintrude",
+				"living": true,
+			},
+			makeInstantiateType(struct {
+				Name   string               `hcl:"name"`
+				Remain map[string]cty.Value `hcl:",remain"`
+			}{}),
+			deepEquals(struct {
+				Name   string               `hcl:"name"`
+				Remain map[string]cty.Value `hcl:",remain"`
+			}{
+				Name: "Ermintrude",
+				Remain: map[string]cty.Value{
+					"living": cty.True,
+				},
+			}),
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+				"age":  50,
+			},
+			makeInstantiateType(struct {
+				Name   string   `hcl:"name"`
+				Body   hcl.Body `hcl:",body"`
+				Remain hcl.Body `hcl:",remain"`
+			}{}),
+			func(gotI interface{}) bool {
+				got := gotI.(struct {
+					Name   string   `hcl:"name"`
+					Body   hcl.Body `hcl:",body"`
+					Remain hcl.Body `hcl:",remain"`
+				})
+
+				attrs, _ := got.Body.JustAttributes()
+
+				return got.Name == "Ermintrude" && len(attrs) == 2 &&
+					attrs["name"] != nil && attrs["age"] != nil
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": map[string]interface{}{},
+			},
+			makeInstantiateType(struct {
+				Noodle struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				// Generating no diagnostics is good enough for this one.
+				return true
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{{}},
+			},
+			makeInstantiateType(struct {
+				Noodle struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				// Generating no diagnostics is good enough for this one.
+				return true
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{{}, {}},
+			},
+			makeInstantiateType(struct {
+				Noodle struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				// Generating one diagnostic is good enough for this one.
+				return true
+			},
+			1,
+		},
+		{
+			map[string]interface{}{},
+			makeInstantiateType(struct {
+				Noodle struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				// Generating one diagnostic is good enough for this one.
+				return true
+			},
+			1,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{},
+			},
+			makeInstantiateType(struct {
+				Noodle struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				// Generating one diagnostic is good enough for this one.
+				return true
+			},
+			1,
+		},
+		{
+			map[string]interface{}{
+				"noodle": map[string]interface{}{},
+			},
+			makeInstantiateType(struct {
+				Noodle *struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				return gotI.(struct {
+					Noodle *struct{} `hcl:"noodle,block"`
+				}).Noodle != nil
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{{}},
+			},
+			makeInstantiateType(struct {
+				Noodle *struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				return gotI.(struct {
+					Noodle *struct{} `hcl:"noodle,block"`
+				}).Noodle != nil
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{},
+			},
+			makeInstantiateType(struct {
+				Noodle *struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				return gotI.(struct {
+					Noodle *struct{} `hcl:"noodle,block"`
+				}).Noodle == nil
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{{}, {}},
+			},
+			makeInstantiateType(struct {
+				Noodle *struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				// Generating one diagnostic is good enough for this one.
+				return true
+			},
+			1,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{},
+			},
+			makeInstantiateType(struct {
+				Noodle []struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				noodle := gotI.(struct {
+					Noodle []struct{} `hcl:"noodle,block"`
+				}).Noodle
+				return len(noodle) == 0
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{{}},
+			},
+			makeInstantiateType(struct {
+				Noodle []struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				noodle := gotI.(struct {
+					Noodle []struct{} `hcl:"noodle,block"`
+				}).Noodle
+				return len(noodle) == 1
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": []map[string]interface{}{{}, {}},
+			},
+			makeInstantiateType(struct {
+				Noodle []struct{} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				noodle := gotI.(struct {
+					Noodle []struct{} `hcl:"noodle,block"`
+				}).Noodle
+				return len(noodle) == 2
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": map[string]interface{}{},
+			},
+			makeInstantiateType(struct {
+				Noodle struct {
+					Name string `hcl:"name,label"`
+				} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				//nolint:misspell
+				// Generating two diagnostics is good enough for this one.
+				// (one for the missing noodle block and the other for
+				// the JSON serialization detecting the missing level of
+				// heirarchy for the label.)
+				return true
+			},
+			2,
+		},
+		{
+			map[string]interface{}{
+				"noodle": map[string]interface{}{
+					"foo_foo": map[string]interface{}{},
+				},
+			},
+			makeInstantiateType(struct {
+				Noodle struct {
+					Name string `hcl:"name,label"`
+				} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				noodle := gotI.(struct {
+					Noodle struct {
+						Name string `hcl:"name,label"`
+					} `hcl:"noodle,block"`
+				}).Noodle
+				return noodle.Name == "foo_foo"
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": map[string]interface{}{
+					"foo_foo": map[string]interface{}{},
+					"bar_baz": map[string]interface{}{},
+				},
+			},
+			makeInstantiateType(struct {
+				Noodle struct {
+					Name string `hcl:"name,label"`
+				} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				// One diagnostic is enough for this one.
+				return true
+			},
+			1,
+		},
+		{
+			map[string]interface{}{
+				"noodle": map[string]interface{}{
+					"foo_foo": map[string]interface{}{},
+					"bar_baz": map[string]interface{}{},
+				},
+			},
+			makeInstantiateType(struct {
+				Noodles []struct {
+					Name string `hcl:"name,label"`
+				} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				noodles := gotI.(struct {
+					Noodles []struct {
+						Name string `hcl:"name,label"`
+					} `hcl:"noodle,block"`
+				}).Noodles
+				return len(noodles) == 2 && (noodles[0].Name == "foo_foo" || noodles[0].Name == "bar_baz") && (noodles[1].Name == "foo_foo" || noodles[1].Name == "bar_baz") && noodles[0].Name != noodles[1].Name
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"noodle": map[string]interface{}{
+					"foo_foo": map[string]interface{}{
+						"type": "rice",
+					},
+				},
+			},
+			makeInstantiateType(struct {
+				Noodle struct {
+					Name string `hcl:"name,label"`
+					Type string `hcl:"type"`
+				} `hcl:"noodle,block"`
+			}{}),
+			func(gotI interface{}) bool {
+				noodle := gotI.(struct {
+					Noodle struct {
+						Name string `hcl:"name,label"`
+						Type string `hcl:"type"`
+					} `hcl:"noodle,block"`
+				}).Noodle
+				return noodle.Name == "foo_foo" && noodle.Type == "rice"
+			},
+			0,
+		},
+
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+				"age":  34,
+			},
+			makeInstantiateType(map[string]string(nil)),
+			deepEquals(map[string]string{
+				"name": "Ermintrude",
+				"age":  "34",
+			}),
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+				"age":  89,
+			},
+			makeInstantiateType(map[string]*hcl.Attribute(nil)),
+			func(gotI interface{}) bool {
+				got := gotI.(map[string]*hcl.Attribute)
+				return len(got) == 2 && got["name"] != nil && got["age"] != nil
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name": "Ermintrude",
+				"age":  13,
+			},
+			makeInstantiateType(map[string]hcl.Expression(nil)),
+			func(gotI interface{}) bool {
+				got := gotI.(map[string]hcl.Expression)
+				return len(got) == 2 && got["name"] != nil && got["age"] != nil
+			},
+			0,
+		},
+		{
+			map[string]interface{}{
+				"name":   "Ermintrude",
+				"living": true,
+			},
+			makeInstantiateType(map[string]cty.Value(nil)),
+			deepEquals(map[string]cty.Value{
+				"name":   cty.StringVal("Ermintrude"),
+				"living": cty.True,
+			}),
+			0,
+		},
+		{
+			// Retain "nested" block while decoding
+			map[string]interface{}{
+				"plain": "foo",
+			},
+			func() interface{} {
+				return &withNestedBlock{
+					Plain: "bar",
+					Nested: &withTwoAttributes{
+						A: "bar",
+					},
+				}
+			},
+			func(gotI interface{}) bool {
+				foo := gotI.(withNestedBlock)
+				return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
+			},
+			0,
+		},
+		{
+			// Retain values in "nested" block while decoding
+			map[string]interface{}{
+				"nested": map[string]interface{}{
+					"a": "foo",
+				},
+			},
+			func() interface{} {
+				return &withNestedBlock{
+					Nested: &withTwoAttributes{
+						B: "bar",
+					},
+				}
+			},
+			func(gotI interface{}) bool {
+				foo := gotI.(withNestedBlock)
+				return foo.Nested.A == "foo" && foo.Nested.B == "bar"
+			},
+			0,
+		},
+		{
+			// Retain values in "nested" block list while decoding
+			map[string]interface{}{
+				"nested": []map[string]interface{}{
+					{
+						"a": "foo",
+					},
+				},
+			},
+			func() interface{} {
+				return &withListofNestedBlocks{
+					Nested: []*withTwoAttributes{
+						{
+							B: "bar",
+						},
+					},
+				}
+			},
+			func(gotI interface{}) bool {
+				n := gotI.(withListofNestedBlocks)
+				return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
+			},
+			0,
+		},
+		{
+			// Remove additional elements from the list while decoding nested blocks
+			map[string]interface{}{
+				"nested": []map[string]interface{}{
+					{
+						"a": "foo",
+					},
+				},
+			},
+			func() interface{} {
+				return &withListofNestedBlocks{
+					Nested: []*withTwoAttributes{
+						{
+							B: "bar",
+						},
+						{
+							B: "bar",
+						},
+					},
+				}
+			},
+			func(gotI interface{}) bool {
+				n := gotI.(withListofNestedBlocks)
+				return len(n.Nested) == 1
+			},
+			0,
+		},
+		{
+			// Make sure decoding value slices works the same as pointer slices.
+			map[string]interface{}{
+				"nested": []map[string]interface{}{
+					{
+						"b": "bar",
+					},
+					{
+						"b": "baz",
+					},
+				},
+			},
+			func() interface{} {
+				return &withListofNestedBlocksNoPointers{
+					Nested: []withTwoAttributes{
+						{
+							B: "foo",
+						},
+					},
+				}
+			},
+			func(gotI interface{}) bool {
+				n := gotI.(withListofNestedBlocksNoPointers)
+				return n.Nested[0].B == "bar" && len(n.Nested) == 2
+			},
+			0,
+		},
+	}
+
+	for i, test := range tests {
+		// For convenience here we're going to use the JSON parser
+		// to process the given body.
+		buf, err := json.Marshal(test.Body)
+		if err != nil {
+			t.Fatalf("error JSON-encoding body for test %d: %s", i, err)
+		}
+
+		t.Run(string(buf), func(t *testing.T) {
+			file, diags := hclJSON.Parse(buf, "test.json")
+			if len(diags) != 0 {
+				t.Fatalf("diagnostics while parsing: %s", diags.Error())
+			}
+
+			targetVal := reflect.ValueOf(test.Target())
+
+			diags = DecodeBody(file.Body, nil, targetVal.Interface())
+			if len(diags) != test.DiagCount {
+				t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
+				for _, diag := range diags {
+					t.Logf(" - %s", diag.Error())
+				}
+			}
+			got := targetVal.Elem().Interface()
+			if !test.Check(got) {
+				t.Errorf("wrong result\ngot:  %s", spew.Sdump(got))
+			}
+		})
+	}
+}
+
+func TestDecodeExpression(t *testing.T) {
+	tests := []struct {
+		Value     cty.Value
+		Target    interface{}
+		Want      interface{}
+		DiagCount int
+	}{
+		{
+			cty.StringVal("hello"),
+			"",
+			"hello",
+			0,
+		},
+		{
+			cty.StringVal("hello"),
+			cty.NilVal,
+			cty.StringVal("hello"),
+			0,
+		},
+		{
+			cty.NumberIntVal(2),
+			"",
+			"2",
+			0,
+		},
+		{
+			cty.StringVal("true"),
+			false,
+			true,
+			0,
+		},
+		{
+			cty.NullVal(cty.String),
+			"",
+			"",
+			1, // null value is not allowed
+		},
+		{
+			cty.UnknownVal(cty.String),
+			"",
+			"",
+			1, // value must be known
+		},
+		{
+			cty.ListVal([]cty.Value{cty.True}),
+			false,
+			false,
+			1, // bool required
+		},
+	}
+
+	for i, test := range tests {
+		t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) {
+			expr := &fixedExpression{test.Value}
+
+			targetVal := reflect.New(reflect.TypeOf(test.Target))
+
+			diags := DecodeExpression(expr, nil, targetVal.Interface())
+			if len(diags) != test.DiagCount {
+				t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount)
+				for _, diag := range diags {
+					t.Logf(" - %s", diag.Error())
+				}
+			}
+			got := targetVal.Elem().Interface()
+			if !reflect.DeepEqual(got, test.Want) {
+				t.Errorf("wrong result\ngot:  %#v\nwant: %#v", got, test.Want)
+			}
+		})
+	}
+}
+
+type fixedExpression struct {
+	val cty.Value
+}
+
+func (e *fixedExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
+	return e.val, nil
+}
+
+func (e *fixedExpression) Range() (r hcl.Range) {
+	return
+}
+
+func (e *fixedExpression) StartRange() (r hcl.Range) {
+	return
+}
+
+func (e *fixedExpression) Variables() []hcl.Traversal {
+	return nil
+}
+
+func makeInstantiateType(target interface{}) func() interface{} {
+	return func() interface{} {
+		return reflect.New(reflect.TypeOf(target)).Interface()
+	}
+}
diff -pruN 0.19.3+ds1-4/bake/hclparser/gohcl/doc.go 0.21.3-0ubuntu1/bake/hclparser/gohcl/doc.go
--- 0.19.3+ds1-4/bake/hclparser/gohcl/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/gohcl/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,65 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Package gohcl allows decoding HCL configurations into Go data structures.
+//
+// It provides a convenient and concise way of describing the schema for
+// configuration and then accessing the resulting data via native Go
+// types.
+//
+// A struct field tag scheme is used, similar to other decoding and
+// unmarshalling libraries. The tags are formatted as in the following example:
+//
+//	ThingType string `hcl:"thing_type,attr"`
+//
+// Within each tag there are two comma-separated tokens. The first is the
+// name of the corresponding construct in configuration, while the second
+// is a keyword giving the kind of construct expected. The following
+// kind keywords are supported:
+//
+//	attr (the default) indicates that the value is to be populated from an attribute
+//	block indicates that the value is to populated from a block
+//	label indicates that the value is to populated from a block label
+//	optional is the same as attr, but the field is optional
+//	remain indicates that the value is to be populated from the remaining body after populating other fields
+//
+// "attr" fields may either be of type *hcl.Expression, in which case the raw
+// expression is assigned, or of any type accepted by gocty, in which case
+// gocty will be used to assign the value to a native Go type.
+//
+// "block" fields may be a struct that recursively uses the same tags, or a
+// slice of such structs, in which case multiple blocks of the corresponding
+// type are decoded into the slice.
+//
+// "body" can be placed on a single field of type hcl.Body to capture
+// the full hcl.Body that was decoded for a block. This does not allow leftover
+// values like "remain", so a decoding error will still be returned if leftover
+// fields are given. If you want to capture the decoding body PLUS leftover
+// fields, you must specify a "remain" field as well to prevent errors. The
+// body field and the remain field will both contain the leftover fields.
+//
+// "label" fields are considered only in a struct used as the type of a field
+// marked as "block", and are used sequentially to capture the labels of
+// the blocks being decoded. In this case, the name token is used only as
+// an identifier for the label in diagnostic messages.
+//
+// "optional" fields behave like "attr" fields, but they are optional
+// and will not give parsing errors if they are missing.
+//
+// "remain" can be placed on a single field that may be either of type
+// hcl.Body or hcl.Attributes, in which case any remaining body content is
+// placed into this field for delayed processing. If no "remain" field is
+// present then any attributes or blocks not matched by another valid tag
+// will cause an error diagnostic.
+//
+// Only a subset of this tagging/typing vocabulary is supported for the
+// "Encode" family of functions. See the EncodeIntoBody docs for full details
+// on the constraints there.
+//
+// Broadly-speaking this package deals with two types of error. The first is
+// errors in the configuration itself, which are returned as diagnostics
+// written with the configuration author as the target audience. The second
+// is bugs in the calling program, such as invalid struct tags, which are
+// surfaced via panics since there can be no useful runtime handling of such
+// errors and they should certainly not be returned to the user as diagnostics.
+package gohcl
diff -pruN 0.19.3+ds1-4/bake/hclparser/gohcl/encode.go 0.21.3-0ubuntu1/bake/hclparser/gohcl/encode.go
--- 0.19.3+ds1-4/bake/hclparser/gohcl/encode.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/gohcl/encode.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,192 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package gohcl
+
+import (
+	"fmt"
+	"reflect"
+	"sort"
+
+	"github.com/hashicorp/hcl/v2/hclwrite"
+	"github.com/zclconf/go-cty/cty/gocty"
+)
+
+// EncodeIntoBody replaces the contents of the given hclwrite Body with
+// attributes and blocks derived from the given value, which must be a
+// struct value or a pointer to a struct value with the struct tags defined
+// in this package.
+//
+// This function can work only with fully-decoded data. It will ignore any
+// fields tagged as "remain", any fields that decode attributes into either
+// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
+// into hcl.Attributes values. This function does not have enough information
+// to complete the decoding of these types.
+//
+// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
+// to produce a whole hclwrite.Block including block labels.
+//
+// As long as a suitable value is given to encode and the destination body
+// is non-nil, this function will always complete. It will panic in case of
+// any errors in the calling program, such as passing an inappropriate type
+// or a nil body.
+//
+// The layout of the resulting HCL source is derived from the ordering of
+// the struct fields, with blank lines around nested blocks of different types.
+// Fields representing attributes should usually precede those representing
+// blocks so that the attributes can group togather in the result. For more
+// control, use the hclwrite API directly.
+func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
+	rv := reflect.ValueOf(val)
+	ty := rv.Type()
+	if ty.Kind() == reflect.Ptr {
+		rv = rv.Elem()
+		ty = rv.Type()
+	}
+	if ty.Kind() != reflect.Struct {
+		panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
+	}
+
+	tags := getFieldTags(ty)
+	populateBody(rv, ty, tags, dst)
+}
+
+// EncodeAsBlock creates a new hclwrite.Block populated with the data from
+// the given value, which must be a struct or pointer to struct with the
+// struct tags defined in this package.
+//
+// If the given struct type has fields tagged with "label" tags then they
+// will be used in order to annotate the created block with labels.
+//
+// This function has the same constraints as EncodeIntoBody and will panic
+// if they are violated.
+func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block {
+	rv := reflect.ValueOf(val)
+	ty := rv.Type()
+	if ty.Kind() == reflect.Ptr {
+		rv = rv.Elem()
+		ty = rv.Type()
+	}
+	if ty.Kind() != reflect.Struct {
+		panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
+	}
+
+	tags := getFieldTags(ty)
+	labels := make([]string, len(tags.Labels))
+	for i, lf := range tags.Labels {
+		lv := rv.Field(lf.FieldIndex)
+		// We just stringify whatever we find. It should always be a string
+		// but if not then we'll still do something reasonable.
+		labels[i] = fmt.Sprintf("%s", lv.Interface())
+	}
+
+	block := hclwrite.NewBlock(blockType, labels)
+	populateBody(rv, ty, tags, block.Body())
+	return block
+}
+
+func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
+	nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
+	namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
+	for n, i := range tags.Attributes {
+		nameIdxs[n] = i
+		namesOrder = append(namesOrder, n)
+	}
+	for n, i := range tags.Blocks {
+		nameIdxs[n] = i
+		namesOrder = append(namesOrder, n)
+	}
+	sort.SliceStable(namesOrder, func(i, j int) bool {
+		ni, nj := namesOrder[i], namesOrder[j]
+		return nameIdxs[ni] < nameIdxs[nj]
+	})
+
+	dst.Clear()
+
+	prevWasBlock := false
+	for _, name := range namesOrder {
+		fieldIdx := nameIdxs[name]
+		field := ty.Field(fieldIdx)
+		fieldTy := field.Type
+		fieldVal := rv.Field(fieldIdx)
+
+		if fieldTy.Kind() == reflect.Ptr {
+			fieldTy = fieldTy.Elem()
+			fieldVal = fieldVal.Elem()
+		}
+
+		if _, isAttr := tags.Attributes[name]; isAttr {
+			if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
+				continue // ignore undecoded fields
+			}
+			if !fieldVal.IsValid() {
+				continue // ignore (field value is nil pointer)
+			}
+			if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
+				continue // ignore
+			}
+			if prevWasBlock {
+				dst.AppendNewline()
+				prevWasBlock = false
+			}
+
+			valTy, err := gocty.ImpliedType(fieldVal.Interface())
+			if err != nil {
+				panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
+			}
+
+			val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
+			if err != nil {
+				// This should never happen, since we should always be able
+				// to decode into the implied type.
+				panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
+			}
+
+			dst.SetAttributeValue(name, val)
+		} else { // must be a block, then
+			elemTy := fieldTy
+			isSeq := false
+			if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
+				isSeq = true
+				elemTy = elemTy.Elem()
+			}
+
+			if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
+				continue // ignore undecoded fields
+			}
+			prevWasBlock = false
+
+			if isSeq {
+				l := fieldVal.Len()
+				for i := 0; i < l; i++ {
+					elemVal := fieldVal.Index(i)
+					if !elemVal.IsValid() {
+						continue // ignore (elem value is nil pointer)
+					}
+					if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
+						continue // ignore
+					}
+					block := EncodeAsBlock(elemVal.Interface(), name)
+					if !prevWasBlock {
+						dst.AppendNewline()
+						prevWasBlock = true
+					}
+					dst.AppendBlock(block)
+				}
+			} else {
+				if !fieldVal.IsValid() {
+					continue // ignore (field value is nil pointer)
+				}
+				if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
+					continue // ignore
+				}
+				block := EncodeAsBlock(fieldVal.Interface(), name)
+				if !prevWasBlock {
+					dst.AppendNewline()
+					prevWasBlock = true
+				}
+				dst.AppendBlock(block)
+			}
+		}
+	}
+}
diff -pruN 0.19.3+ds1-4/bake/hclparser/gohcl/encode_test.go 0.21.3-0ubuntu1/bake/hclparser/gohcl/encode_test.go
--- 0.19.3+ds1-4/bake/hclparser/gohcl/encode_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/gohcl/encode_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,67 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package gohcl_test
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/hcl/v2/gohcl"
+	"github.com/hashicorp/hcl/v2/hclwrite"
+)
+
+func ExampleEncodeIntoBody() {
+	type Service struct {
+		Name string   `hcl:"name,label"`
+		Exe  []string `hcl:"executable"`
+	}
+	type Constraints struct {
+		OS   string `hcl:"os"`
+		Arch string `hcl:"arch"`
+	}
+	type App struct {
+		Name        string       `hcl:"name"`
+		Desc        string       `hcl:"description"`
+		Constraints *Constraints `hcl:"constraints,block"`
+		Services    []Service    `hcl:"service,block"`
+	}
+
+	app := App{
+		Name: "awesome-app",
+		Desc: "Such an awesome application",
+		Constraints: &Constraints{
+			OS:   "linux",
+			Arch: "amd64",
+		},
+		Services: []Service{
+			{
+				Name: "web",
+				Exe:  []string{"./web", "--listen=:8080"},
+			},
+			{
+				Name: "worker",
+				Exe:  []string{"./worker"},
+			},
+		},
+	}
+
+	f := hclwrite.NewEmptyFile()
+	gohcl.EncodeIntoBody(&app, f.Body())
+	fmt.Printf("%s", f.Bytes())
+
+	// Output:
+	// name        = "awesome-app"
+	// description = "Such an awesome application"
+	//
+	// constraints {
+	//   os   = "linux"
+	//   arch = "amd64"
+	// }
+	//
+	// service "web" {
+	//   executable = ["./web", "--listen=:8080"]
+	// }
+	// service "worker" {
+	//   executable = ["./worker"]
+	// }
+}
diff -pruN 0.19.3+ds1-4/bake/hclparser/gohcl/schema.go 0.21.3-0ubuntu1/bake/hclparser/gohcl/schema.go
--- 0.19.3+ds1-4/bake/hclparser/gohcl/schema.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/gohcl/schema.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,185 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package gohcl
+
+import (
+	"fmt"
+	"reflect"
+	"sort"
+	"strings"
+
+	"github.com/hashicorp/hcl/v2"
+)
+
+// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
+// given value, which must be a struct value or a pointer to one. If an
+// inappropriate value is passed, this function will panic.
+//
+// The second return argument indicates whether the given struct includes
+// a "remain" field, and thus the returned schema is non-exhaustive.
+//
+// This uses the tags on the fields of the struct to discover how each
+// field's value should be expressed within configuration. If an invalid
+// mapping is attempted, this function will panic.
+func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) {
+	ty := reflect.TypeOf(val)
+
+	if ty.Kind() == reflect.Ptr {
+		ty = ty.Elem()
+	}
+
+	if ty.Kind() != reflect.Struct {
+		panic(fmt.Sprintf("given value must be struct, not %T", val))
+	}
+
+	var attrSchemas []hcl.AttributeSchema
+	var blockSchemas []hcl.BlockHeaderSchema
+
+	tags := getFieldTags(ty)
+
+	attrNames := make([]string, 0, len(tags.Attributes))
+	for n := range tags.Attributes {
+		attrNames = append(attrNames, n)
+	}
+	sort.Strings(attrNames)
+	for _, n := range attrNames {
+		idx := tags.Attributes[n]
+		optional := tags.Optional[n]
+		field := ty.Field(idx)
+
+		var required bool
+
+		switch {
+		case field.Type.AssignableTo(exprType):
+			//nolint:misspell
+			// If we're decoding to hcl.Expression then absense can be
+			// indicated via a null value, so we don't specify that
+			// the field is required during decoding.
+			required = false
+		case field.Type.Kind() != reflect.Ptr && !optional:
+			required = true
+		default:
+			required = false
+		}
+
+		attrSchemas = append(attrSchemas, hcl.AttributeSchema{
+			Name:     n,
+			Required: required,
+		})
+	}
+
+	blockNames := make([]string, 0, len(tags.Blocks))
+	for n := range tags.Blocks {
+		blockNames = append(blockNames, n)
+	}
+	sort.Strings(blockNames)
+	for _, n := range blockNames {
+		idx := tags.Blocks[n]
+		field := ty.Field(idx)
+		fty := field.Type
+		if fty.Kind() == reflect.Slice {
+			fty = fty.Elem()
+		}
+		if fty.Kind() == reflect.Ptr {
+			fty = fty.Elem()
+		}
+		if fty.Kind() != reflect.Struct {
+			panic(fmt.Sprintf(
+				"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
+			))
+		}
+		ftags := getFieldTags(fty)
+		var labelNames []string
+		if len(ftags.Labels) > 0 {
+			labelNames = make([]string, len(ftags.Labels))
+			for i, l := range ftags.Labels {
+				labelNames[i] = l.Name
+			}
+		}
+
+		blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
+			Type:       n,
+			LabelNames: labelNames,
+		})
+	}
+
+	partial = tags.Remain != nil
+	schema = &hcl.BodySchema{
+		Attributes: attrSchemas,
+		Blocks:     blockSchemas,
+	}
+	return schema, partial
+}
+
+type fieldTags struct {
+	Attributes map[string]int
+	Blocks     map[string]int
+	Labels     []labelField
+	Remain     *int
+	Body       *int
+	Optional   map[string]bool
+}
+
+type labelField struct {
+	FieldIndex int
+	Name       string
+}
+
+func getFieldTags(ty reflect.Type) *fieldTags {
+	ret := &fieldTags{
+		Attributes: map[string]int{},
+		Blocks:     map[string]int{},
+		Optional:   map[string]bool{},
+	}
+
+	ct := ty.NumField()
+	for i := 0; i < ct; i++ {
+		field := ty.Field(i)
+		tag := field.Tag.Get("hcl")
+		if tag == "" {
+			continue
+		}
+
+		comma := strings.Index(tag, ",")
+		var name, kind string
+		if comma != -1 {
+			name = tag[:comma]
+			kind = tag[comma+1:]
+		} else {
+			name = tag
+			kind = "attr"
+		}
+
+		switch kind {
+		case "attr":
+			ret.Attributes[name] = i
+		case "block":
+			ret.Blocks[name] = i
+		case "label":
+			ret.Labels = append(ret.Labels, labelField{
+				FieldIndex: i,
+				Name:       name,
+			})
+		case "remain":
+			if ret.Remain != nil {
+				panic("only one 'remain' tag is permitted")
+			}
+			idx := i // copy, because this loop will continue assigning to i
+			ret.Remain = &idx
+		case "body":
+			if ret.Body != nil {
+				panic("only one 'body' tag is permitted")
+			}
+			idx := i // copy, because this loop will continue assigning to i
+			ret.Body = &idx
+		case "optional":
+			ret.Attributes[name] = i
+			ret.Optional[name] = true
+		default:
+			panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
+		}
+	}
+
+	return ret
+}
diff -pruN 0.19.3+ds1-4/bake/hclparser/gohcl/schema_test.go 0.21.3-0ubuntu1/bake/hclparser/gohcl/schema_test.go
--- 0.19.3+ds1-4/bake/hclparser/gohcl/schema_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/gohcl/schema_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,233 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package gohcl
+
+import (
+	"fmt"
+	"reflect"
+	"testing"
+
+	"github.com/davecgh/go-spew/spew"
+	"github.com/hashicorp/hcl/v2"
+)
+
+func TestImpliedBodySchema(t *testing.T) {
+	tests := []struct {
+		val         interface{}
+		wantSchema  *hcl.BodySchema
+		wantPartial bool
+	}{
+		{
+			struct{}{},
+			&hcl.BodySchema{},
+			false,
+		},
+		{
+			struct {
+				Ignored bool
+			}{},
+			&hcl.BodySchema{},
+			false,
+		},
+		{
+			struct {
+				Attr1 bool `hcl:"attr1"`
+				Attr2 bool `hcl:"attr2"`
+			}{},
+			&hcl.BodySchema{
+				Attributes: []hcl.AttributeSchema{
+					{
+						Name:     "attr1",
+						Required: true,
+					},
+					{
+						Name:     "attr2",
+						Required: true,
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Attr *bool `hcl:"attr,attr"`
+			}{},
+			&hcl.BodySchema{
+				Attributes: []hcl.AttributeSchema{
+					{
+						Name:     "attr",
+						Required: false,
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Thing struct{} `hcl:"thing,block"`
+			}{},
+			&hcl.BodySchema{
+				Blocks: []hcl.BlockHeaderSchema{
+					{
+						Type: "thing",
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Thing struct {
+					Type string `hcl:"type,label"`
+					Name string `hcl:"name,label"`
+				} `hcl:"thing,block"`
+			}{},
+			&hcl.BodySchema{
+				Blocks: []hcl.BlockHeaderSchema{
+					{
+						Type:       "thing",
+						LabelNames: []string{"type", "name"},
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Thing []struct {
+					Type string `hcl:"type,label"`
+					Name string `hcl:"name,label"`
+				} `hcl:"thing,block"`
+			}{},
+			&hcl.BodySchema{
+				Blocks: []hcl.BlockHeaderSchema{
+					{
+						Type:       "thing",
+						LabelNames: []string{"type", "name"},
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Thing *struct {
+					Type string `hcl:"type,label"`
+					Name string `hcl:"name,label"`
+				} `hcl:"thing,block"`
+			}{},
+			&hcl.BodySchema{
+				Blocks: []hcl.BlockHeaderSchema{
+					{
+						Type:       "thing",
+						LabelNames: []string{"type", "name"},
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Thing struct {
+					Name      string `hcl:"name,label"`
+					Something string `hcl:"something"`
+				} `hcl:"thing,block"`
+			}{},
+			&hcl.BodySchema{
+				Blocks: []hcl.BlockHeaderSchema{
+					{
+						Type:       "thing",
+						LabelNames: []string{"name"},
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Doodad string `hcl:"doodad"`
+				Thing  struct {
+					Name string `hcl:"name,label"`
+				} `hcl:"thing,block"`
+			}{},
+			&hcl.BodySchema{
+				Attributes: []hcl.AttributeSchema{
+					{
+						Name:     "doodad",
+						Required: true,
+					},
+				},
+				Blocks: []hcl.BlockHeaderSchema{
+					{
+						Type:       "thing",
+						LabelNames: []string{"name"},
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Doodad string `hcl:"doodad"`
+				Config string `hcl:",remain"`
+			}{},
+			&hcl.BodySchema{
+				Attributes: []hcl.AttributeSchema{
+					{
+						Name:     "doodad",
+						Required: true,
+					},
+				},
+			},
+			true,
+		},
+		{
+			struct {
+				Expr hcl.Expression `hcl:"expr"`
+			}{},
+			&hcl.BodySchema{
+				Attributes: []hcl.AttributeSchema{
+					{
+						Name:     "expr",
+						Required: false,
+					},
+				},
+			},
+			false,
+		},
+		{
+			struct {
+				Meh string `hcl:"meh,optional"`
+			}{},
+			&hcl.BodySchema{
+				Attributes: []hcl.AttributeSchema{
+					{
+						Name:     "meh",
+						Required: false,
+					},
+				},
+			},
+			false,
+		},
+	}
+
+	for _, test := range tests {
+		t.Run(fmt.Sprintf("%#v", test.val), func(t *testing.T) {
+			schema, partial := ImpliedBodySchema(test.val)
+			if !reflect.DeepEqual(schema, test.wantSchema) {
+				t.Errorf(
+					"wrong schema\ngot:  %s\nwant: %s",
+					spew.Sdump(schema), spew.Sdump(test.wantSchema),
+				)
+			}
+
+			if partial != test.wantPartial {
+				t.Errorf(
+					"wrong partial flag\ngot:  %#v\nwant: %#v",
+					partial, test.wantPartial,
+				)
+			}
+		})
+	}
+}
diff -pruN 0.19.3+ds1-4/bake/hclparser/gohcl/types.go 0.21.3-0ubuntu1/bake/hclparser/gohcl/types.go
--- 0.19.3+ds1-4/bake/hclparser/gohcl/types.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/gohcl/types.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,19 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package gohcl
+
+import (
+	"reflect"
+
+	"github.com/hashicorp/hcl/v2"
+)
+
+var victimExpr hcl.Expression
+var victimBody hcl.Body
+
+var exprType = reflect.TypeOf(&victimExpr).Elem()
+var bodyType = reflect.TypeOf(&victimBody).Elem()
+var blockType = reflect.TypeOf((*hcl.Block)(nil)) //nolint:unused
+var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
+var attrsType = reflect.TypeOf(hcl.Attributes(nil))
diff -pruN 0.19.3+ds1-4/bake/hclparser/hclparser.go 0.21.3-0ubuntu1/bake/hclparser/hclparser.go
--- 0.19.3+ds1-4/bake/hclparser/hclparser.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/hclparser.go	2025-03-17 16:14:25.000000000 +0000
@@ -10,12 +10,11 @@ import (
 	"strconv"
 	"strings"
 
+	"github.com/docker/buildx/bake/hclparser/gohcl"
 	"github.com/docker/buildx/util/userfunc"
 	"github.com/hashicorp/hcl/v2"
-	"github.com/hashicorp/hcl/v2/gohcl"
 	"github.com/pkg/errors"
 	"github.com/zclconf/go-cty/cty"
-	"github.com/zclconf/go-cty/cty/gocty"
 )
 
 type Opt struct {
@@ -454,7 +453,7 @@ func (p *parser) resolveBlock(block *hcl
 		}
 
 		// decode!
-		diag = gohcl.DecodeBody(body(), ectx, output.Interface())
+		diag = decodeBody(body(), ectx, output.Interface())
 		if diag.HasErrors() {
 			return diag
 		}
@@ -476,11 +475,11 @@ func (p *parser) resolveBlock(block *hcl
 		}
 
 		// store the result into the evaluation context (so it can be referenced)
-		outputType, err := gocty.ImpliedType(output.Interface())
+		outputType, err := ImpliedType(output.Interface())
 		if err != nil {
 			return err
 		}
-		outputValue, err := gocty.ToCtyValue(output.Interface(), outputType)
+		outputValue, err := ToCtyValue(output.Interface(), outputType)
 		if err != nil {
 			return err
 		}
@@ -492,7 +491,12 @@ func (p *parser) resolveBlock(block *hcl
 			m = map[string]cty.Value{}
 		}
 		m[name] = outputValue
-		p.ectx.Variables[block.Type] = cty.MapVal(m)
+
+		// The logical contents of this structure is similar to a map,
+		// but it's possible for some attributes to be different in a way that's
+		// illegal for a map so we use an object here instead which is structurally
+		// equivalent but allows disparate types for different keys.
+		p.ectx.Variables[block.Type] = cty.ObjectVal(m)
 	}
 
 	return nil
@@ -575,9 +579,9 @@ func (p *parser) validateVariables(vars
 }
 
 type Variable struct {
-	Name        string
-	Description string
-	Value       *string
+	Name        string  `json:"name"`
+	Description string  `json:"description,omitempty"`
+	Value       *string `json:"value,omitempty"`
 }
 
 type ParseMeta struct {
@@ -983,3 +987,8 @@ func key(ks ...any) uint64 {
 	}
 	return hash.Sum64()
 }
+
+func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
+	dec := gohcl.DecodeOptions{ImpliedType: ImpliedType}
+	return dec.DecodeBody(body, ctx, val)
+}
diff -pruN 0.19.3+ds1-4/bake/hclparser/type_implied.go 0.21.3-0ubuntu1/bake/hclparser/type_implied.go
--- 0.19.3+ds1-4/bake/hclparser/type_implied.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/type_implied.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,160 @@
+// MIT License
+//
+// Copyright (c) 2017-2018 Martin Atkins
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package hclparser
+
+import (
+	"reflect"
+
+	"github.com/zclconf/go-cty/cty"
+)
+
+// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
+// to find a suitable cty.Type instance that could be used for a conversion
+// with ToCtyValue.
+//
+// This allows -- for simple situations at least -- types to be defined just
+// once in Go and the cty types derived from the Go types, but in the process
+// it makes some assumptions that may be undesirable so applications are
+// encouraged to build their cty types directly if exacting control is
+// required.
+//
+// Not all Go types can be represented as cty types, so an error may be
+// returned which is usually considered to be a bug in the calling program.
+// In particular, ImpliedType will never use capsule types in its returned
+// type, because it cannot know the capsule types supported by the calling
+// program.
+func ImpliedType(gv interface{}) (cty.Type, error) {
+	rt := reflect.TypeOf(gv)
+	var path cty.Path
+	return impliedType(rt, path)
+}
+
+func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
+	if ety, err := impliedTypeExt(rt, path); err == nil {
+		return ety, nil
+	}
+
+	switch rt.Kind() {
+	case reflect.Ptr:
+		return impliedType(rt.Elem(), path)
+
+	// Primitive types
+	case reflect.Bool:
+		return cty.Bool, nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return cty.Number, nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return cty.Number, nil
+	case reflect.Float32, reflect.Float64:
+		return cty.Number, nil
+	case reflect.String:
+		return cty.String, nil
+
+	// Collection types
+	case reflect.Slice:
+		path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
+		ety, err := impliedType(rt.Elem(), path)
+		if err != nil {
+			return cty.NilType, err
+		}
+		return cty.List(ety), nil
+	case reflect.Map:
+		if !stringType.AssignableTo(rt.Key()) {
+			return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
+		}
+		path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
+		ety, err := impliedType(rt.Elem(), path)
+		if err != nil {
+			return cty.NilType, err
+		}
+		return cty.Map(ety), nil
+
+	// Structural types
+	case reflect.Struct:
+		return impliedStructType(rt, path)
+
+	default:
+		return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
+	}
+}
+
+func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
+	if valueType.AssignableTo(rt) {
+		// Special case: cty.Value represents cty.DynamicPseudoType, for
+		// type conformance checking.
+		return cty.DynamicPseudoType, nil
+	}
+
+	fieldIdxs := structTagIndices(rt)
+	if len(fieldIdxs) == 0 {
+		return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
+	}
+
+	atys := make(map[string]cty.Type, len(fieldIdxs))
+
+	{
+		// Temporary extension of path for attributes
+		path := append(path, nil)
+
+		for k, fi := range fieldIdxs {
+			path[len(path)-1] = cty.GetAttrStep{Name: k}
+
+			ft := rt.Field(fi).Type
+			aty, err := impliedType(ft, path)
+			if err != nil {
+				return cty.NilType, err
+			}
+
+			atys[k] = aty
+		}
+	}
+
+	return cty.Object(atys), nil
+}
+
+var (
+	valueType  = reflect.TypeOf(cty.Value{})
+	stringType = reflect.TypeOf("")
+)
+
+// structTagIndices interrogates the fields of the given type (which must
+// be a struct type, or we'll panic) and returns a map from the cty
+// attribute names declared via struct tags to the indices of the
+// fields holding those tags.
+//
+// This function will panic if two fields within the struct are tagged with
+// the same cty attribute name.
+func structTagIndices(st reflect.Type) map[string]int {
+	ct := st.NumField()
+	ret := make(map[string]int, ct)
+
+	for i := 0; i < ct; i++ {
+		field := st.Field(i)
+		attrName := field.Tag.Get("cty")
+		if attrName != "" {
+			ret[attrName] = i
+		}
+	}
+
+	return ret
+}
diff -pruN 0.19.3+ds1-4/bake/hclparser/type_implied_ext.go 0.21.3-0ubuntu1/bake/hclparser/type_implied_ext.go
--- 0.19.3+ds1-4/bake/hclparser/type_implied_ext.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/bake/hclparser/type_implied_ext.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,166 @@
+package hclparser
+
+import (
+	"reflect"
+	"sync"
+
+	"github.com/containerd/errdefs"
+	"github.com/zclconf/go-cty/cty"
+	"github.com/zclconf/go-cty/cty/convert"
+	"github.com/zclconf/go-cty/cty/gocty"
+)
+
+type ToCtyValueConverter interface {
+	// ToCtyValue will convert this capsule value into a native
+	// cty.Value. This should not return a capsule type.
+	ToCtyValue() cty.Value
+}
+
+type FromCtyValueConverter interface {
+	// FromCtyValue will initialize this value using a cty.Value.
+	FromCtyValue(in cty.Value, path cty.Path) error
+}
+
+type extensionType int
+
+const (
+	unwrapCapsuleValueExtension extensionType = iota
+)
+
+func impliedTypeExt(rt reflect.Type, _ cty.Path) (cty.Type, error) {
+	if rt.Kind() != reflect.Pointer {
+		rt = reflect.PointerTo(rt)
+	}
+
+	if isCapsuleType(rt) {
+		return capsuleValueCapsuleType(rt), nil
+	}
+	return cty.NilType, errdefs.ErrNotImplemented
+}
+
+func isCapsuleType(rt reflect.Type) bool {
+	fromCtyValueType := reflect.TypeFor[FromCtyValueConverter]()
+	toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
+	return rt.Implements(fromCtyValueType) && rt.Implements(toCtyValueType)
+}
+
+var capsuleValueTypes sync.Map
+
+func capsuleValueCapsuleType(rt reflect.Type) cty.Type {
+	if rt.Kind() != reflect.Pointer {
+		panic("capsule value must be a pointer")
+	}
+
+	elem := rt.Elem()
+	if val, loaded := capsuleValueTypes.Load(elem); loaded {
+		return val.(cty.Type)
+	}
+
+	toCtyValueType := reflect.TypeFor[ToCtyValueConverter]()
+
+	// First time used. Initialize new capsule ops.
+	ops := &cty.CapsuleOps{
+		ConversionTo: func(_ cty.Type) func(cty.Value, cty.Path) (any, error) {
+			return func(in cty.Value, p cty.Path) (any, error) {
+				rv := reflect.New(elem).Interface()
+				if err := rv.(FromCtyValueConverter).FromCtyValue(in, p); err != nil {
+					return nil, err
+				}
+				return rv, nil
+			}
+		},
+		ConversionFrom: func(want cty.Type) func(any, cty.Path) (cty.Value, error) {
+			return func(in any, _ cty.Path) (cty.Value, error) {
+				rv := reflect.ValueOf(in).Convert(toCtyValueType)
+				v := rv.Interface().(ToCtyValueConverter).ToCtyValue()
+				return convert.Convert(v, want)
+			}
+		},
+		ExtensionData: func(key any) any {
+			switch key {
+			case unwrapCapsuleValueExtension:
+				zero := reflect.Zero(elem).Interface()
+				if conv, ok := zero.(ToCtyValueConverter); ok {
+					return conv.ToCtyValue().Type()
+				}
+
+				zero = reflect.Zero(rt).Interface()
+				if conv, ok := zero.(ToCtyValueConverter); ok {
+					return conv.ToCtyValue().Type()
+				}
+			}
+			return nil
+		},
+	}
+
+	// Attempt to store the new type. Use whichever was loaded first in the case
+	// of a race condition.
+	ety := cty.CapsuleWithOps(elem.Name(), elem, ops)
+	val, _ := capsuleValueTypes.LoadOrStore(elem, ety)
+	return val.(cty.Type)
+}
+
+// UnwrapCtyValue will unwrap capsule type values into their native cty value
+// equivalents if possible.
+func UnwrapCtyValue(in cty.Value) cty.Value {
+	want := toCtyValueType(in.Type())
+	if in.Type().Equals(want) {
+		return in
+	} else if out, err := convert.Convert(in, want); err == nil {
+		return out
+	}
+	return cty.NullVal(want)
+}
+
+func toCtyValueType(in cty.Type) cty.Type {
+	if et := in.MapElementType(); et != nil {
+		return cty.Map(toCtyValueType(*et))
+	}
+
+	if et := in.SetElementType(); et != nil {
+		return cty.Set(toCtyValueType(*et))
+	}
+
+	if et := in.ListElementType(); et != nil {
+		return cty.List(toCtyValueType(*et))
+	}
+
+	if in.IsObjectType() {
+		var optional []string
+		inAttrTypes := in.AttributeTypes()
+		outAttrTypes := make(map[string]cty.Type, len(inAttrTypes))
+		for name, typ := range inAttrTypes {
+			outAttrTypes[name] = toCtyValueType(typ)
+			if in.AttributeOptional(name) {
+				optional = append(optional, name)
+			}
+		}
+		return cty.ObjectWithOptionalAttrs(outAttrTypes, optional)
+	}
+
+	if in.IsTupleType() {
+		inTypes := in.TupleElementTypes()
+		outTypes := make([]cty.Type, len(inTypes))
+		for i, typ := range inTypes {
+			outTypes[i] = toCtyValueType(typ)
+		}
+		return cty.Tuple(outTypes)
+	}
+
+	if in.IsCapsuleType() {
+		if out := in.CapsuleExtensionData(unwrapCapsuleValueExtension); out != nil {
+			return out.(cty.Type)
+		}
+		return cty.DynamicPseudoType
+	}
+
+	return in
+}
+
+func ToCtyValue(val any, ty cty.Type) (cty.Value, error) {
+	out, err := gocty.ToCtyValue(val, ty)
+	if err != nil {
+		return out, err
+	}
+	return UnwrapCtyValue(out), nil
+}
diff -pruN 0.19.3+ds1-4/build/build.go 0.21.3-0ubuntu1/build/build.go
--- 0.19.3+ds1-4/build/build.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/build/build.go	2025-03-17 16:14:25.000000000 +0000
@@ -15,7 +15,7 @@ import (
 	"sync"
 	"time"
 
-	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/v2/core/images"
 	"github.com/distribution/reference"
 	"github.com/docker/buildx/builder"
 	controllerapi "github.com/docker/buildx/controller/pb"
@@ -40,7 +40,6 @@ import (
 	"github.com/moby/buildkit/solver/errdefs"
 	"github.com/moby/buildkit/solver/pb"
 	spb "github.com/moby/buildkit/sourcepolicy/pb"
-	"github.com/moby/buildkit/util/entitlements"
 	"github.com/moby/buildkit/util/progress/progresswriter"
 	"github.com/moby/buildkit/util/tracing"
 	"github.com/opencontainers/go-digest"
@@ -63,7 +62,7 @@ type Options struct {
 	Inputs Inputs
 
 	Ref                        string
-	Allow                      []entitlements.Entitlement
+	Allow                      []string
 	Attests                    map[string]*string
 	BuildArgs                  map[string]string
 	CacheFrom                  []client.CacheOptionsEntry
@@ -835,7 +834,7 @@ func remoteDigestWithMoby(ctx context.Co
 	if err != nil {
 		return "", err
 	}
-	img, _, err := api.ImageInspectWithRaw(ctx, name)
+	img, err := api.ImageInspect(ctx, name)
 	if err != nil {
 		return "", err
 	}
diff -pruN 0.19.3+ds1-4/build/opt.go 0.21.3-0ubuntu1/build/opt.go
--- 0.19.3+ds1-4/build/opt.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/build/opt.go	2025-03-17 16:14:25.000000000 +0000
@@ -11,8 +11,8 @@ import (
 	"strings"
 	"syscall"
 
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/content/local"
+	"github.com/containerd/containerd/v2/core/content"
+	"github.com/containerd/containerd/v2/plugins/content/local"
 	"github.com/containerd/platforms"
 	"github.com/distribution/reference"
 	"github.com/docker/buildx/builder"
@@ -318,7 +318,7 @@ func toSolveOpt(ctx context.Context, nod
 	switch opt.NetworkMode {
 	case "host":
 		so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
-		so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
+		so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost.String())
 	case "none":
 		so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
 	case "", "default":
diff -pruN 0.19.3+ds1-4/build/provenance.go 0.21.3-0ubuntu1/build/provenance.go
--- 0.19.3+ds1-4/build/provenance.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/build/provenance.go	2025-03-17 16:14:25.000000000 +0000
@@ -8,8 +8,8 @@ import (
 	"strings"
 	"sync"
 
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/content/proxy"
+	"github.com/containerd/containerd/v2/core/content"
+	"github.com/containerd/containerd/v2/core/content/proxy"
 	"github.com/docker/buildx/util/confutil"
 	"github.com/docker/buildx/util/progress"
 	controlapi "github.com/moby/buildkit/api/services/control"
diff -pruN 0.19.3+ds1-4/builder/builder_test.go 0.21.3-0ubuntu1/builder/builder_test.go
--- 0.19.3+ds1-4/builder/builder_test.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/builder/builder_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -29,7 +29,10 @@ func TestCsvToMap(t *testing.T) {
 }
 
 func TestParseBuildkitdFlags(t *testing.T) {
-	buildkitdConf := `
+	dirConf := t.TempDir()
+
+	buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
+	require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(`
 # debug enables additional debug logging
 debug = true
 # insecure-entitlements allows insecure entitlements, disabled by default.
@@ -37,10 +40,18 @@ insecure-entitlements = [ "network.host"
 [log]
   # log formatter: json or text
   format = "text"
-`
-	dirConf := t.TempDir()
-	buildkitdConfPath := path.Join(dirConf, "buildkitd-conf.toml")
-	require.NoError(t, os.WriteFile(buildkitdConfPath, []byte(buildkitdConf), 0644))
+`), 0644))
+
+	buildkitdConfBrokenPath := path.Join(dirConf, "buildkitd-conf-broken.toml")
+	require.NoError(t, os.WriteFile(buildkitdConfBrokenPath, []byte(`
+[worker.oci]
+  gc = "maybe"
+`), 0644))
+
+	buildkitdConfUnknownFieldPath := path.Join(dirConf, "buildkitd-unknown-field.toml")
+	require.NoError(t, os.WriteFile(buildkitdConfUnknownFieldPath, []byte(`
+foo = "bar"
+`), 0644))
 
 	testCases := []struct {
 		name                string
@@ -157,6 +168,26 @@ insecure-entitlements = [ "network.host"
 			nil,
 			true,
 		},
+		{
+			"error parsing buildkit config",
+			"",
+			"docker-container",
+			nil,
+			buildkitdConfBrokenPath,
+			nil,
+			true,
+		},
+		{
+			"unknown field in buildkit config",
+			"",
+			"docker-container",
+			nil,
+			buildkitdConfUnknownFieldPath,
+			[]string{
+				"--allow-insecure-entitlement=network.host",
+			},
+			false,
+		},
 	}
 	for _, tt := range testCases {
 		tt := tt
diff -pruN 0.19.3+ds1-4/builder/node.go 0.21.3-0ubuntu1/builder/node.go
--- 0.19.3+ds1-4/builder/node.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/builder/node.go	2025-03-17 16:14:25.000000000 +0000
@@ -32,10 +32,11 @@ type Node struct {
 	Err         error
 
 	// worker settings
-	IDs       []string
-	Platforms []ocispecs.Platform
-	GCPolicy  []client.PruneInfo
-	Labels    map[string]string
+	IDs        []string
+	Platforms  []ocispecs.Platform
+	GCPolicy   []client.PruneInfo
+	Labels     map[string]string
+	CDIDevices []client.CDIDevice
 }
 
 // Nodes returns nodes for this builder.
@@ -259,6 +260,7 @@ func (n *Node) loadData(ctx context.Cont
 				n.GCPolicy = w.GCPolicy
 				n.Labels = w.Labels
 			}
+			n.CDIDevices = w.CDIDevices
 		}
 		sort.Strings(n.IDs)
 		n.Platforms = platformutil.Dedupe(n.Platforms)
diff -pruN 0.19.3+ds1-4/cmd/buildx/main.go 0.21.3-0ubuntu1/cmd/buildx/main.go
--- 0.19.3+ds1-4/cmd/buildx/main.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/cmd/buildx/main.go	2025-03-17 16:14:25.000000000 +0000
@@ -4,6 +4,7 @@ import (
 	"context"
 	"fmt"
 	"os"
+	"path/filepath"
 
 	"github.com/docker/buildx/commands"
 	controllererrors "github.com/docker/buildx/controller/errdefs"
@@ -20,9 +21,6 @@ import (
 	"github.com/pkg/errors"
 	"go.opentelemetry.io/otel"
 
-	//nolint:staticcheck // vendored dependencies may still use this
-	"github.com/containerd/containerd/pkg/seed"
-
 	_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
 
 	_ "github.com/docker/buildx/driver/docker"
@@ -35,9 +33,6 @@ import (
 )
 
 func init() {
-	//nolint:staticcheck
-	seed.WithTimeAndRand()
-
 	stack.SetVersionInfo(version.Version, version.Revision)
 }
 
@@ -47,7 +42,8 @@ func runStandalone(cmd *command.DockerCl
 	}
 	defer flushMetrics(cmd)
 
-	rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
+	executable := os.Args[0]
+	rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
 	return rootCmd.Execute()
 }
 
diff -pruN 0.19.3+ds1-4/commands/bake.go 0.21.3-0ubuntu1/commands/bake.go
--- 0.19.3+ds1-4/commands/bake.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/bake.go	2025-03-17 16:14:25.000000000 +0000
@@ -25,7 +25,6 @@ import (
 	"github.com/docker/buildx/controller/pb"
 	"github.com/docker/buildx/localstate"
 	"github.com/docker/buildx/util/buildflags"
-	"github.com/docker/buildx/util/cobrautil"
 	"github.com/docker/buildx/util/cobrautil/completion"
 	"github.com/docker/buildx/util/confutil"
 	"github.com/docker/buildx/util/desktop"
@@ -38,30 +37,40 @@ import (
 	"github.com/moby/buildkit/util/progress/progressui"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
+	"github.com/tonistiigi/go-csvvalue"
 	"go.opentelemetry.io/otel/attribute"
 )
 
 type bakeOptions struct {
-	files       []string
-	overrides   []string
-	printOnly   bool
-	listTargets bool
-	listVars    bool
-	sbom        string
-	provenance  string
-	allow       []string
+	files     []string
+	overrides []string
+
+	sbom       string
+	provenance string
+	allow      []string
 
 	builder      string
 	metadataFile string
 	exportPush   bool
 	exportLoad   bool
 	callFunc     string
+
+	print bool
+	list  string
+
+	// TODO: remove deprecated flags
+	listTargets bool
+	listVars    bool
 }
 
 func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
 	mp := dockerCli.MeterProvider()
 
-	ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
+	ctx, end, err := tracing.TraceCurrentCommand(ctx, append([]string{"bake"}, targets...),
+		attribute.String("builder", in.builder),
+		attribute.StringSlice("targets", targets),
+		attribute.StringSlice("files", in.files),
+	)
 	if err != nil {
 		return err
 	}
@@ -121,9 +130,13 @@ func runBake(ctx context.Context, docker
 	var nodes []builder.Node
 	var progressConsoleDesc, progressTextDesc string
 
+	if in.print && in.list != "" {
+		return errors.New("--print and --list are mutually exclusive")
+	}
+
 	// instance only needed for reading remote bake files or building
 	var driverType string
-	if url != "" || !(in.printOnly || in.listTargets || in.listVars) {
+	if url != "" || !(in.print || in.list != "") {
 		b, err := builder.New(dockerCli,
 			builder.WithName(in.builder),
 			builder.WithContextPathHash(contextPathHash),
@@ -184,7 +197,7 @@ func runBake(ctx context.Context, docker
 		"BAKE_LOCAL_PLATFORM": platforms.Format(platforms.DefaultSpec()),
 	}
 
-	if in.listTargets || in.listVars {
+	if in.list != "" {
 		cfg, pm, err := bake.ParseFiles(files, defaults)
 		if err != nil {
 			return err
@@ -192,10 +205,15 @@ func runBake(ctx context.Context, docker
 		if err = printer.Wait(); err != nil {
 			return err
 		}
-		if in.listTargets {
-			return printTargetList(dockerCli.Out(), cfg)
-		} else if in.listVars {
-			return printVars(dockerCli.Out(), pm.AllVariables)
+		list, err := parseList(in.list)
+		if err != nil {
+			return err
+		}
+		switch list.Type {
+		case "targets":
+			return printTargetList(dockerCli.Out(), list.Format, cfg)
+		case "variables":
+			return printVars(dockerCli.Out(), list.Format, pm.AllVariables)
 		}
 	}
 
@@ -231,7 +249,7 @@ func runBake(ctx context.Context, docker
 		Target: tgts,
 	}
 
-	if in.printOnly {
+	if in.print {
 		if err = printer.Wait(); err != nil {
 			return err
 		}
@@ -257,8 +275,10 @@ func runBake(ctx context.Context, docker
 	if err != nil {
 		return err
 	}
-	if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
-		return err
+	if progressMode != progressui.RawJSONMode {
+		if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
+			return err
+		}
 	}
 	if printer.IsDone() {
 		// init new printer as old one was stopped to show the prompt
@@ -267,7 +287,7 @@ func runBake(ctx context.Context, docker
 		}
 	}
 
-	if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
+	if err := saveLocalStateGroup(dockerCli, in, targets, bo); err != nil {
 		return err
 	}
 
@@ -427,6 +447,13 @@ func bakeCmd(dockerCli command.Cli, root
 			if !cmd.Flags().Lookup("pull").Changed {
 				cFlags.pull = nil
 			}
+			if options.list == "" {
+				if options.listTargets {
+					options.list = "targets"
+				} else if options.listVars {
+					options.list = "variables"
+				}
+			}
 			options.builder = rootOpts.builder
 			options.metadataFile = cFlags.metadataFile
 			// Other common flags (noCache, pull and progress) are processed in runBake function.
@@ -439,7 +466,6 @@ func bakeCmd(dockerCli command.Cli, root
 
 	flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
 	flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
-	flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
 	flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
 	flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
 	flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
@@ -450,20 +476,30 @@ func bakeCmd(dockerCli command.Cli, root
 	flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
 	flags.Lookup("check").NoOptDefVal = "true"
 
+	flags.BoolVar(&options.print, "print", false, "Print the options without building")
+	flags.StringVar(&options.list, "list", "", "List targets or variables")
+
+	// TODO: remove deprecated flags
 	flags.BoolVar(&options.listTargets, "list-targets", false, "List available targets")
-	cobrautil.MarkFlagsExperimental(flags, "list-targets")
 	flags.MarkHidden("list-targets")
-
+	flags.MarkDeprecated("list-targets", "list-targets is deprecated, use list=targets instead")
 	flags.BoolVar(&options.listVars, "list-variables", false, "List defined variables")
-	cobrautil.MarkFlagsExperimental(flags, "list-variables")
 	flags.MarkHidden("list-variables")
+	flags.MarkDeprecated("list-variables", "list-variables is deprecated, use list=variables instead")
 
 	commonBuildFlags(&cFlags, flags)
 
 	return cmd
 }
 
-func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
+func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options) error {
+	l, err := localstate.New(confutil.NewConfig(dockerCli))
+	if err != nil {
+		return err
+	}
+
+	defer l.MigrateIfNeeded()
+
 	prm := confutil.MetadataProvenance()
 	if len(in.metadataFile) == 0 {
 		prm = confutil.MetadataProvenanceModeDisabled
@@ -483,19 +519,10 @@ func saveLocalStateGroup(dockerCli comma
 	if len(refs) == 0 {
 		return nil
 	}
-	l, err := localstate.New(confutil.NewConfig(dockerCli))
-	if err != nil {
-		return err
-	}
-	dtdef, err := json.MarshalIndent(def, "", "  ")
-	if err != nil {
-		return err
-	}
+
 	return l.SaveGroup(groupRef, localstate.StateGroup{
-		Definition: dtdef,
-		Targets:    targets,
-		Inputs:     overrides,
-		Refs:       refs,
+		Refs:    refs,
+		Targets: targets,
 	})
 }
 
@@ -557,10 +584,70 @@ func readBakeFiles(ctx context.Context,
 	return
 }
 
-func printVars(w io.Writer, vars []*hclparser.Variable) error {
+type listEntry struct {
+	Type   string
+	Format string
+}
+
+func parseList(input string) (listEntry, error) {
+	res := listEntry{}
+
+	fields, err := csvvalue.Fields(input, nil)
+	if err != nil {
+		return res, err
+	}
+
+	if len(fields) == 1 && fields[0] == input && !strings.HasPrefix(input, "type=") {
+		res.Type = input
+	}
+
+	if res.Type == "" {
+		for _, field := range fields {
+			key, value, ok := strings.Cut(field, "=")
+			if !ok {
+				return res, errors.Errorf("invalid value %s", field)
+			}
+			key = strings.TrimSpace(strings.ToLower(key))
+			switch key {
+			case "type":
+				res.Type = value
+			case "format":
+				res.Format = value
+			default:
+				return res, errors.Errorf("unexpected key '%s' in '%s'", key, field)
+			}
+		}
+	}
+	if res.Format == "" {
+		res.Format = "table"
+	}
+
+	switch res.Type {
+	case "targets", "variables":
+	default:
+		return res, errors.Errorf("invalid list type %q", res.Type)
+	}
+
+	switch res.Format {
+	case "table", "json":
+	default:
+		return res, errors.Errorf("invalid list format %q", res.Format)
+	}
+
+	return res, nil
+}
+
+func printVars(w io.Writer, format string, vars []*hclparser.Variable) error {
 	slices.SortFunc(vars, func(a, b *hclparser.Variable) int {
 		return cmp.Compare(a.Name, b.Name)
 	})
+
+	if format == "json" {
+		enc := json.NewEncoder(w)
+		enc.SetIndent("", "  ")
+		return enc.Encode(vars)
+	}
+
 	tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
 	defer tw.Flush()
 
@@ -578,12 +665,7 @@ func printVars(w io.Writer, vars []*hclp
 	return nil
 }
 
-func printTargetList(w io.Writer, cfg *bake.Config) error {
-	tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
-	defer tw.Flush()
-
-	tw.Write([]byte("TARGET\tDESCRIPTION\n"))
-
+func printTargetList(w io.Writer, format string, cfg *bake.Config) error {
 	type targetOrGroup struct {
 		name   string
 		target *bake.Target
@@ -602,6 +684,20 @@ func printTargetList(w io.Writer, cfg *b
 		return cmp.Compare(a.name, b.name)
 	})
 
+	var tw *tabwriter.Writer
+	if format == "table" {
+		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+		defer tw.Flush()
+		tw.Write([]byte("TARGET\tDESCRIPTION\n"))
+	}
+
+	type targetList struct {
+		Name        string `json:"name"`
+		Description string `json:"description,omitempty"`
+		Group       bool   `json:"group,omitempty"`
+	}
+	var targetsList []targetList
+
 	for _, tgt := range list {
 		if strings.HasPrefix(tgt.name, "_") {
 			// convention for a private target
@@ -610,9 +706,9 @@ func printTargetList(w io.Writer, cfg *b
 		var descr string
 		if tgt.target != nil {
 			descr = tgt.target.Description
+			targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr})
 		} else if tgt.group != nil {
 			descr = tgt.group.Description
-
 			if len(tgt.group.Targets) > 0 {
 				slices.Sort(tgt.group.Targets)
 				names := strings.Join(tgt.group.Targets, ", ")
@@ -622,8 +718,17 @@ func printTargetList(w io.Writer, cfg *b
 					descr = names
 				}
 			}
+			targetsList = append(targetsList, targetList{Name: tgt.name, Description: descr, Group: true})
+		}
+		if format == "table" {
+			fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
 		}
-		fmt.Fprintf(tw, "%s\t%s\n", tgt.name, descr)
+	}
+
+	if format == "json" {
+		enc := json.NewEncoder(w)
+		enc.SetIndent("", "  ")
+		return enc.Encode(targetsList)
 	}
 
 	return nil
diff -pruN 0.19.3+ds1-4/commands/build.go 0.21.3-0ubuntu1/commands/build.go
--- 0.19.3+ds1-4/commands/build.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/build.go	2025-03-17 16:14:25.000000000 +0000
@@ -41,7 +41,7 @@ import (
 	"github.com/docker/cli/cli/command"
 	dockeropts "github.com/docker/cli/opts"
 	"github.com/docker/docker/api/types/versions"
-	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/atomicwriter"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/frontend/subrequests"
@@ -183,14 +183,17 @@ func (o *buildOptions) toControllerOptio
 		}
 	}
 
-	opts.CacheFrom, err = buildflags.ParseCacheEntry(o.cacheFrom)
+	cacheFrom, err := buildflags.ParseCacheEntry(o.cacheFrom)
 	if err != nil {
 		return nil, err
 	}
-	opts.CacheTo, err = buildflags.ParseCacheEntry(o.cacheTo)
+	opts.CacheFrom = cacheFrom.ToPB()
+
+	cacheTo, err := buildflags.ParseCacheEntry(o.cacheTo)
 	if err != nil {
 		return nil, err
 	}
+	opts.CacheTo = cacheTo.ToPB()
 
 	opts.Secrets, err = buildflags.ParseSecretSpecs(o.secrets)
 	if err != nil {
@@ -282,7 +285,11 @@ func (o *buildOptionsHash) String() stri
 func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
 	mp := dockerCli.MeterProvider()
 
-	ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
+	ctx, end, err := tracing.TraceCurrentCommand(ctx, []string{"build", options.contextPath},
+		attribute.String("builder", options.builder),
+		attribute.String("context", options.contextPath),
+		attribute.String("dockerfile", options.dockerfileName),
+	)
 	if err != nil {
 		return err
 	}
@@ -463,7 +470,7 @@ func runControllerBuild(ctx context.Cont
 	if err != nil {
 		var be *controllererrors.BuildError
 		if errors.As(err, &be) {
-			ref = be.Ref
+			ref = be.SessionID
 			retErr = err
 			// We can proceed to monitor
 		} else {
@@ -590,7 +597,7 @@ func buildCmd(dockerCli command.Cli, roo
 
 	flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
 
-	flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
+	flags.StringArrayVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
 
 	flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
 
@@ -720,7 +727,7 @@ type commonFlags struct {
 
 func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
 	options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
-	flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
+	flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "quiet", "plain", "tty", "rawjson"). Use plain to show container output`)
 	options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
 	flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file")
 }
@@ -742,7 +749,7 @@ func writeMetadataFile(filename string,
 	if err != nil {
 		return err
 	}
-	return ioutils.AtomicWriteFile(filename, b, 0644)
+	return atomicwriter.WriteFile(filename, b, 0644)
 }
 
 func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
diff -pruN 0.19.3+ds1-4/commands/history/inspect.go 0.21.3-0ubuntu1/commands/history/inspect.go
--- 0.19.3+ds1-4/commands/history/inspect.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/inspect.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,900 @@
+package history
+
+import (
+	"bytes"
+	"cmp"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"slices"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+	"time"
+
+	"github.com/containerd/containerd/v2/core/content"
+	"github.com/containerd/containerd/v2/core/content/proxy"
+	"github.com/containerd/containerd/v2/core/images"
+	"github.com/containerd/platforms"
+	"github.com/docker/buildx/builder"
+	"github.com/docker/buildx/localstate"
+	"github.com/docker/buildx/util/cobrautil/completion"
+	"github.com/docker/buildx/util/confutil"
+	"github.com/docker/buildx/util/desktop"
+	"github.com/docker/cli/cli/command"
+	"github.com/docker/cli/cli/command/formatter"
+	"github.com/docker/cli/cli/debug"
+	slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
+	slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
+	controlapi "github.com/moby/buildkit/api/services/control"
+	"github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/solver/errdefs"
+	provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+	"github.com/moby/buildkit/util/grpcerrors"
+	"github.com/moby/buildkit/util/stack"
+	"github.com/opencontainers/go-digest"
+	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+	"github.com/tonistiigi/go-csvvalue"
+	spb "google.golang.org/genproto/googleapis/rpc/status"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	proto "google.golang.org/protobuf/proto"
+)
+
+type statusT string
+
+const (
+	statusComplete statusT = "completed"
+	statusRunning  statusT = "running"
+	statusError    statusT = "failed"
+	statusCanceled statusT = "canceled"
+)
+
+type inspectOptions struct {
+	builder string
+	ref     string
+	format  string
+}
+
+type inspectOutput struct {
+	Name string `json:",omitempty"`
+	Ref  string
+
+	Context       string   `json:",omitempty"`
+	Dockerfile    string   `json:",omitempty"`
+	VCSRepository string   `json:",omitempty"`
+	VCSRevision   string   `json:",omitempty"`
+	Target        string   `json:",omitempty"`
+	Platform      []string `json:",omitempty"`
+	KeepGitDir    bool     `json:",omitempty"`
+
+	NamedContexts []keyValueOutput `json:",omitempty"`
+
+	StartedAt   *time.Time    `json:",omitempty"`
+	CompletedAt *time.Time    `json:",omitempty"`
+	Duration    time.Duration `json:",omitempty"`
+	Status      statusT       `json:",omitempty"`
+	Error       *errorOutput  `json:",omitempty"`
+
+	NumCompletedSteps int32
+	NumTotalSteps     int32
+	NumCachedSteps    int32
+
+	BuildArgs []keyValueOutput `json:",omitempty"`
+	Labels    []keyValueOutput `json:",omitempty"`
+
+	Config configOutput `json:",omitempty"`
+
+	Materials   []materialOutput   `json:",omitempty"`
+	Attachments []attachmentOutput `json:",omitempty"`
+
+	Errors []string `json:",omitempty"`
+}
+
+type configOutput struct {
+	Network          string   `json:",omitempty"`
+	ExtraHosts       []string `json:",omitempty"`
+	Hostname         string   `json:",omitempty"`
+	CgroupParent     string   `json:",omitempty"`
+	ImageResolveMode string   `json:",omitempty"`
+	MultiPlatform    bool     `json:",omitempty"`
+	NoCache          bool     `json:",omitempty"`
+	NoCacheFilter    []string `json:",omitempty"`
+
+	ShmSize               string `json:",omitempty"`
+	Ulimit                string `json:",omitempty"`
+	CacheMountNS          string `json:",omitempty"`
+	DockerfileCheckConfig string `json:",omitempty"`
+	SourceDateEpoch       string `json:",omitempty"`
+	SandboxHostname       string `json:",omitempty"`
+
+	RestRaw []keyValueOutput `json:",omitempty"`
+}
+
+type materialOutput struct {
+	URI     string   `json:",omitempty"`
+	Digests []string `json:",omitempty"`
+}
+
+type attachmentOutput struct {
+	Digest   string `json:",omitempty"`
+	Platform string `json:",omitempty"`
+	Type     string `json:",omitempty"`
+}
+
+type errorOutput struct {
+	Code    int      `json:",omitempty"`
+	Message string   `json:",omitempty"`
+	Name    string   `json:",omitempty"`
+	Logs    []string `json:",omitempty"`
+	Sources []byte   `json:",omitempty"`
+	Stack   []byte   `json:",omitempty"`
+}
+
+type keyValueOutput struct {
+	Name  string `json:",omitempty"`
+	Value string `json:",omitempty"`
+}
+
+func readAttr[T any](attrs map[string]string, k string, dest *T, f func(v string) (T, bool)) {
+	if sv, ok := attrs[k]; ok {
+		if f != nil {
+			v, ok := f(sv)
+			if ok {
+				*dest = v
+			}
+		}
+		if d, ok := any(dest).(*string); ok {
+			*d = sv
+		}
+	}
+	delete(attrs, k)
+}
+
+func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) error {
+	b, err := builder.New(dockerCli, builder.WithName(opts.builder))
+	if err != nil {
+		return err
+	}
+
+	nodes, err := b.LoadNodes(ctx)
+	if err != nil {
+		return err
+	}
+	for _, node := range nodes {
+		if node.Err != nil {
+			return node.Err
+		}
+	}
+
+	recs, err := queryRecords(ctx, opts.ref, nodes)
+	if err != nil {
+		return err
+	}
+
+	if len(recs) == 0 {
+		if opts.ref == "" {
+			return errors.New("no records found")
+		}
+		return errors.Errorf("no record found for ref %q", opts.ref)
+	}
+
+	if opts.ref == "" {
+		slices.SortFunc(recs, func(a, b historyRecord) int {
+			return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
+		})
+	}
+
+	rec := &recs[0]
+
+	c, err := rec.node.Driver.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	store := proxy.NewContentStore(c.ContentClient())
+
+	var defaultPlatform string
+	workers, err := c.ListWorkers(ctx)
+	if err != nil {
+		return errors.Wrap(err, "failed to list workers")
+	}
+workers0:
+	for _, w := range workers {
+		for _, p := range w.Platforms {
+			defaultPlatform = platforms.FormatAll(platforms.Normalize(p))
+			break workers0
+		}
+	}
+
+	ls, err := localstate.New(confutil.NewConfig(dockerCli))
+	if err != nil {
+		return err
+	}
+	st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
+
+	attrs := rec.FrontendAttrs
+	delete(attrs, "frontend.caps")
+
+	var out inspectOutput
+
+	var context string
+	var dockerfile string
+	if st != nil {
+		context = st.LocalPath
+		dockerfile = st.DockerfilePath
+		wd, _ := os.Getwd()
+
+		if dockerfile != "" && dockerfile != "-" {
+			if rel, err := filepath.Rel(context, dockerfile); err == nil {
+				if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
+					dockerfile = rel
+				}
+			}
+		}
+		if context != "" {
+			if rel, err := filepath.Rel(wd, context); err == nil {
+				if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
+					context = rel
+				}
+			}
+		}
+	}
+
+	if v, ok := attrs["context"]; ok && context == "" {
+		delete(attrs, "context")
+		context = v
+	}
+	if dockerfile == "" {
+		if v, ok := attrs["filename"]; ok {
+			dockerfile = v
+			if dfdir, ok := attrs["vcs:localdir:dockerfile"]; ok {
+				dockerfile = filepath.Join(dfdir, dockerfile)
+			}
+		}
+	}
+	delete(attrs, "filename")
+
+	out.Name = buildName(rec.FrontendAttrs, st)
+	out.Ref = rec.Ref
+
+	out.Context = context
+	out.Dockerfile = dockerfile
+
+	if _, ok := attrs["context"]; !ok {
+		if src, ok := attrs["vcs:source"]; ok {
+			out.VCSRepository = src
+		}
+		if rev, ok := attrs["vcs:revision"]; ok {
+			out.VCSRevision = rev
+		}
+	}
+
+	readAttr(attrs, "target", &out.Target, nil)
+
+	readAttr(attrs, "platform", &out.Platform, func(v string) ([]string, bool) {
+		return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
+			var pp []string
+			for _, v := range strings.Split(v, ",") {
+				p, err := platforms.Parse(v)
+				if err != nil {
+					return nil, err
+				}
+				pp = append(pp, platforms.FormatAll(platforms.Normalize(p)))
+			}
+			if len(pp) == 0 {
+				pp = append(pp, defaultPlatform)
+			}
+			return pp, nil
+		})
+	})
+
+	readAttr(attrs, "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", &out.KeepGitDir, func(v string) (bool, bool) {
+		return tryParseValue(v, &out.Errors, strconv.ParseBool)
+	})
+
+	out.NamedContexts = readKeyValues(attrs, "context:")
+
+	if rec.CreatedAt != nil {
+		tm := rec.CreatedAt.AsTime().Local()
+		out.StartedAt = &tm
+	}
+	out.Status = statusRunning
+
+	if rec.CompletedAt != nil {
+		tm := rec.CompletedAt.AsTime().Local()
+		out.CompletedAt = &tm
+		out.Status = statusComplete
+	}
+
+	if rec.Error != nil || rec.ExternalError != nil {
+		out.Error = &errorOutput{}
+		if rec.Error != nil {
+			if codes.Code(rec.Error.Code) == codes.Canceled {
+				out.Status = statusCanceled
+			} else {
+				out.Status = statusError
+			}
+			out.Error.Code = int(codes.Code(rec.Error.Code))
+			out.Error.Message = rec.Error.Message
+		}
+		if rec.ExternalError != nil {
+			dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError))
+			if err != nil {
+				return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest)
+			}
+			var st spb.Status
+			if err := proto.Unmarshal(dt, &st); err != nil {
+				return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest)
+			}
+			retErr := grpcerrors.FromGRPC(status.ErrorProto(&st))
+			var errsources bytes.Buffer
+			for _, s := range errdefs.Sources(retErr) {
+				s.Print(&errsources)
+				errsources.WriteString("\n")
+			}
+			out.Error.Sources = errsources.Bytes()
+			var ve *errdefs.VertexError
+			if errors.As(retErr, &ve) {
+				dgst, err := digest.Parse(ve.Vertex.Digest)
+				if err != nil {
+					return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest)
+				}
+				name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16)
+				if err != nil {
+					return errors.Wrapf(err, "failed to load vertex logs %s", dgst)
+				}
+				out.Error.Name = name
+				out.Error.Logs = logs
+			}
+			out.Error.Stack = []byte(fmt.Sprintf("%+v", stack.Formatter(retErr)))
+		}
+	}
+
+	if out.StartedAt != nil {
+		if out.CompletedAt != nil {
+			out.Duration = out.CompletedAt.Sub(*out.StartedAt)
+		} else {
+			out.Duration = rec.currentTimestamp.Sub(*out.StartedAt)
+		}
+	}
+
+	out.NumCompletedSteps = rec.NumCompletedSteps
+	out.NumTotalSteps = rec.NumTotalSteps
+	out.NumCachedSteps = rec.NumCachedSteps
+
+	out.BuildArgs = readKeyValues(attrs, "build-arg:")
+	out.Labels = readKeyValues(attrs, "label:")
+
+	readAttr(attrs, "force-network-mode", &out.Config.Network, nil)
+	readAttr(attrs, "hostname", &out.Config.Hostname, nil)
+	readAttr(attrs, "cgroup-parent", &out.Config.CgroupParent, nil)
+	readAttr(attrs, "image-resolve-mode", &out.Config.ImageResolveMode, nil)
+	readAttr(attrs, "build-arg:BUILDKIT_MULTI_PLATFORM", &out.Config.MultiPlatform, func(v string) (bool, bool) {
+		return tryParseValue(v, &out.Errors, strconv.ParseBool)
+	})
+	readAttr(attrs, "multi-platform", &out.Config.MultiPlatform, func(v string) (bool, bool) {
+		return tryParseValue(v, &out.Errors, strconv.ParseBool)
+	})
+	readAttr(attrs, "no-cache", &out.Config.NoCache, func(v string) (bool, bool) {
+		if v == "" {
+			return true, true
+		}
+		return false, false
+	})
+	readAttr(attrs, "no-cache", &out.Config.NoCacheFilter, func(v string) ([]string, bool) {
+		if v == "" {
+			return nil, false
+		}
+		return strings.Split(v, ","), true
+	})
+
+	readAttr(attrs, "add-hosts", &out.Config.ExtraHosts, func(v string) ([]string, bool) {
+		return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
+			fields, err := csvvalue.Fields(v, nil)
+			if err != nil {
+				return nil, err
+			}
+			return fields, nil
+		})
+	})
+
+	readAttr(attrs, "shm-size", &out.Config.ShmSize, nil)
+	readAttr(attrs, "ulimit", &out.Config.Ulimit, nil)
+	readAttr(attrs, "build-arg:BUILDKIT_CACHE_MOUNT_NS", &out.Config.CacheMountNS, nil)
+	readAttr(attrs, "build-arg:BUILDKIT_DOCKERFILE_CHECK", &out.Config.DockerfileCheckConfig, nil)
+	readAttr(attrs, "build-arg:SOURCE_DATE_EPOCH", &out.Config.SourceDateEpoch, nil)
+	readAttr(attrs, "build-arg:SANDBOX_HOSTNAME", &out.Config.SandboxHostname, nil)
+
+	var unusedAttrs []keyValueOutput
+	for k := range attrs {
+		if strings.HasPrefix(k, "vcs:") || strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "context:") || strings.HasPrefix(k, "attest:") {
+			continue
+		}
+		unusedAttrs = append(unusedAttrs, keyValueOutput{
+			Name:  k,
+			Value: attrs[k],
+		})
+	}
+	slices.SortFunc(unusedAttrs, func(a, b keyValueOutput) int {
+		return cmp.Compare(a.Name, b.Name)
+	})
+	out.Config.RestRaw = unusedAttrs
+
+	attachments, err := allAttachments(ctx, store, *rec)
+	if err != nil {
+		return err
+	}
+
+	provIndex := slices.IndexFunc(attachments, func(a attachment) bool {
+		return descrType(a.descr) == slsa02.PredicateSLSAProvenance
+	})
+	if provIndex != -1 {
+		prov := attachments[provIndex]
+		dt, err := content.ReadBlob(ctx, store, prov.descr)
+		if err != nil {
+			return errors.Errorf("failed to read provenance %s: %v", prov.descr.Digest, err)
+		}
+		var pred provenancetypes.ProvenancePredicate
+		if err := json.Unmarshal(dt, &pred); err != nil {
+			return errors.Errorf("failed to unmarshal provenance %s: %v", prov.descr.Digest, err)
+		}
+		for _, m := range pred.Materials {
+			out.Materials = append(out.Materials, materialOutput{
+				URI:     m.URI,
+				Digests: digestSetToDigests(m.Digest),
+			})
+		}
+	}
+
+	if len(attachments) > 0 {
+		for _, a := range attachments {
+			p := ""
+			if a.platform != nil {
+				p = platforms.FormatAll(*a.platform)
+			}
+			out.Attachments = append(out.Attachments, attachmentOutput{
+				Digest:   a.descr.Digest.String(),
+				Platform: p,
+				Type:     descrType(a.descr),
+			})
+		}
+	}
+
+	if opts.format == formatter.JSONFormatKey {
+		enc := json.NewEncoder(dockerCli.Out())
+		enc.SetIndent("", "  ")
+		return enc.Encode(out)
+	} else if opts.format != formatter.PrettyFormatKey {
+		tmpl, err := template.New("inspect").Parse(opts.format)
+		if err != nil {
+			return errors.Wrapf(err, "failed to parse format template")
+		}
+		var buf bytes.Buffer
+		if err := tmpl.Execute(&buf, out); err != nil {
+			return errors.Wrapf(err, "failed to execute format template")
+		}
+		fmt.Fprintln(dockerCli.Out(), buf.String())
+		return nil
+	}
+
+	tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
+
+	if out.Name != "" {
+		fmt.Fprintf(tw, "Name:\t%s\n", out.Name)
+	}
+	if opts.ref == "" && out.Ref != "" {
+		fmt.Fprintf(tw, "Ref:\t%s\n", out.Ref)
+	}
+	if out.Context != "" {
+		fmt.Fprintf(tw, "Context:\t%s\n", out.Context)
+	}
+	if out.Dockerfile != "" {
+		fmt.Fprintf(tw, "Dockerfile:\t%s\n", out.Dockerfile)
+	}
+	if out.VCSRepository != "" {
+		fmt.Fprintf(tw, "VCS Repository:\t%s\n", out.VCSRepository)
+	}
+	if out.VCSRevision != "" {
+		fmt.Fprintf(tw, "VCS Revision:\t%s\n", out.VCSRevision)
+	}
+
+	if out.Target != "" {
+		fmt.Fprintf(tw, "Target:\t%s\n", out.Target)
+	}
+
+	if len(out.Platform) > 0 {
+		fmt.Fprintf(tw, "Platforms:\t%s\n", strings.Join(out.Platform, ", "))
+	}
+
+	if out.KeepGitDir {
+		fmt.Fprintf(tw, "Keep Git Dir:\t%s\n", strconv.FormatBool(out.KeepGitDir))
+	}
+
+	tw.Flush()
+
+	fmt.Fprintln(dockerCli.Out())
+
+	printTable(dockerCli.Out(), out.NamedContexts, "Named Context")
+
+	tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
+
+	fmt.Fprintf(tw, "Started:\t%s\n", out.StartedAt.Format("2006-01-02 15:04:05"))
+	var statusStr string
+	if out.Status == statusRunning {
+		statusStr = " (running)"
+	}
+	fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(out.Duration), statusStr)
+
+	if out.Status == statusError {
+		fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message)
+	} else if out.Status == statusCanceled {
+		fmt.Fprintf(tw, "Status:\tCanceled\n")
+	}
+
+	fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", out.NumCompletedSteps, out.NumTotalSteps, float64(out.NumCachedSteps)/float64(out.NumTotalSteps)*100)
+	tw.Flush()
+
+	fmt.Fprintln(dockerCli.Out())
+
+	tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
+
+	if out.Config.Network != "" {
+		fmt.Fprintf(tw, "Network:\t%s\n", out.Config.Network)
+	}
+	if out.Config.Hostname != "" {
+		fmt.Fprintf(tw, "Hostname:\t%s\n", out.Config.Hostname)
+	}
+	if len(out.Config.ExtraHosts) > 0 {
+		fmt.Fprintf(tw, "Extra Hosts:\t%s\n", strings.Join(out.Config.ExtraHosts, ", "))
+	}
+	if out.Config.CgroupParent != "" {
+		fmt.Fprintf(tw, "Cgroup Parent:\t%s\n", out.Config.CgroupParent)
+	}
+	if out.Config.ImageResolveMode != "" {
+		fmt.Fprintf(tw, "Image Resolve Mode:\t%s\n", out.Config.ImageResolveMode)
+	}
+	if out.Config.MultiPlatform {
+		fmt.Fprintf(tw, "Multi-Platform:\t%s\n", strconv.FormatBool(out.Config.MultiPlatform))
+	}
+	if out.Config.NoCache {
+		fmt.Fprintf(tw, "No Cache:\t%s\n", strconv.FormatBool(out.Config.NoCache))
+	}
+	if len(out.Config.NoCacheFilter) > 0 {
+		fmt.Fprintf(tw, "No Cache Filter:\t%s\n", strings.Join(out.Config.NoCacheFilter, ", "))
+	}
+
+	if out.Config.ShmSize != "" {
+		fmt.Fprintf(tw, "Shm Size:\t%s\n", out.Config.ShmSize)
+	}
+	if out.Config.Ulimit != "" {
+		fmt.Fprintf(tw, "Resource Limits:\t%s\n", out.Config.Ulimit)
+	}
+	if out.Config.CacheMountNS != "" {
+		fmt.Fprintf(tw, "Cache Mount Namespace:\t%s\n", out.Config.CacheMountNS)
+	}
+	if out.Config.DockerfileCheckConfig != "" {
+		fmt.Fprintf(tw, "Dockerfile Check Config:\t%s\n", out.Config.DockerfileCheckConfig)
+	}
+	if out.Config.SourceDateEpoch != "" {
+		fmt.Fprintf(tw, "Source Date Epoch:\t%s\n", out.Config.SourceDateEpoch)
+	}
+	if out.Config.SandboxHostname != "" {
+		fmt.Fprintf(tw, "Sandbox Hostname:\t%s\n", out.Config.SandboxHostname)
+	}
+
+	for _, kv := range out.Config.RestRaw {
+		fmt.Fprintf(tw, "%s:\t%s\n", kv.Name, kv.Value)
+	}
+
+	tw.Flush()
+
+	fmt.Fprintln(dockerCli.Out())
+
+	printTable(dockerCli.Out(), out.BuildArgs, "Build Arg")
+	printTable(dockerCli.Out(), out.Labels, "Label")
+
+	if len(out.Materials) > 0 {
+		fmt.Fprintln(dockerCli.Out(), "Materials:")
+		tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
+		fmt.Fprintf(tw, "URI\tDIGEST\n")
+		for _, m := range out.Materials {
+			fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(m.Digests, ", "))
+		}
+		tw.Flush()
+		fmt.Fprintln(dockerCli.Out())
+	}
+
+	if len(out.Attachments) > 0 {
+		fmt.Fprintf(tw, "Attachments:\n")
+		tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
+		fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n")
+		for _, a := range out.Attachments {
+			fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Digest, a.Platform, a.Type)
+		}
+		tw.Flush()
+		fmt.Fprintln(dockerCli.Out())
+	}
+
+	if out.Error != nil {
+		if out.Error.Sources != nil {
+			fmt.Fprint(dockerCli.Out(), string(out.Error.Sources))
+		}
+		if len(out.Error.Logs) > 0 {
+			fmt.Fprintln(dockerCli.Out(), "Logs:")
+			fmt.Fprintf(dockerCli.Out(), "> => %s:\n", out.Error.Name)
+			for _, l := range out.Error.Logs {
+				fmt.Fprintln(dockerCli.Out(), "> "+l)
+			}
+			fmt.Fprintln(dockerCli.Out())
+		}
+		if len(out.Error.Stack) > 0 {
+			if debug.IsEnabled() {
+				fmt.Fprintf(dockerCli.Out(), "\n%s\n", out.Error.Stack)
+			} else {
+				fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n")
+			}
+		}
+	}
+
+	fmt.Fprintf(dockerCli.Out(), "Print build logs: docker buildx history logs %s\n", rec.Ref)
+
+	fmt.Fprintf(dockerCli.Out(), "View build in Docker Desktop: %s\n", desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref)))
+
+	return nil
+}
+
+func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+	var options inspectOptions
+
+	cmd := &cobra.Command{
+		Use:   "inspect [OPTIONS] [REF]",
+		Short: "Inspect a build",
+		Args:  cobra.MaximumNArgs(1),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if len(args) > 0 {
+				options.ref = args[0]
+			}
+			options.builder = *rootOpts.Builder
+			return runInspect(cmd.Context(), dockerCli, options)
+		},
+		ValidArgsFunction: completion.Disable,
+	}
+
+	cmd.AddCommand(
+		attachmentCmd(dockerCli, rootOpts),
+	)
+
+	flags := cmd.Flags()
+	flags.StringVar(&options.format, "format", formatter.PrettyFormatKey, "Format the output")
+
+	return cmd
+}
+
+func loadVertexLogs(ctx context.Context, c *client.Client, ref string, dgst digest.Digest, limit int) (string, []string, error) {
+	st, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
+		Ref: ref,
+	})
+	if err != nil {
+		return "", nil, err
+	}
+
+	var name string
+	var logs []string
+	lastState := map[int]int{}
+
+loop0:
+	for {
+		select {
+		case <-ctx.Done():
+			st.CloseSend()
+			return "", nil, context.Cause(ctx)
+		default:
+			ev, err := st.Recv()
+			if err != nil {
+				if errors.Is(err, io.EOF) {
+					break loop0
+				}
+				return "", nil, err
+			}
+			ss := client.NewSolveStatus(ev)
+			for _, v := range ss.Vertexes {
+				if v.Digest == dgst {
+					name = v.Name
+					break
+				}
+			}
+			for _, l := range ss.Logs {
+				if l.Vertex == dgst {
+					parts := bytes.Split(l.Data, []byte("\n"))
+					for i, p := range parts {
+						var wrote bool
+						if i == 0 {
+							idx, ok := lastState[l.Stream]
+							if ok && idx != -1 {
+								logs[idx] = logs[idx] + string(p)
+								wrote = true
+							}
+						}
+						if !wrote {
+							if len(p) > 0 {
+								logs = append(logs, string(p))
+							}
+							lastState[l.Stream] = len(logs) - 1
+						}
+						if i == len(parts)-1 && len(p) == 0 {
+							lastState[l.Stream] = -1
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if limit > 0 && len(logs) > limit {
+		logs = logs[len(logs)-limit:]
+	}
+
+	return name, logs, nil
+}
+
+type attachment struct {
+	platform *ocispecs.Platform
+	descr    ocispecs.Descriptor
+}
+
+func allAttachments(ctx context.Context, store content.Store, rec historyRecord) ([]attachment, error) {
+	var attachments []attachment
+
+	if rec.Result != nil {
+		for _, a := range rec.Result.Attestations {
+			attachments = append(attachments, attachment{
+				descr: ociDesc(a),
+			})
+		}
+		for _, r := range rec.Result.Results {
+			attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), nil)...)
+		}
+	}
+
+	for key, ri := range rec.Results {
+		p, err := platforms.Parse(key)
+		if err != nil {
+			return nil, err
+		}
+		for _, a := range ri.Attestations {
+			attachments = append(attachments, attachment{
+				platform: &p,
+				descr:    ociDesc(a),
+			})
+		}
+		for _, r := range ri.Results {
+			attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), &p)...)
+		}
+	}
+
+	slices.SortFunc(attachments, func(a, b attachment) int {
+		pCmp := 0
+		if a.platform == nil && b.platform != nil {
+			return -1
+		} else if a.platform != nil && b.platform == nil {
+			return 1
+		} else if a.platform != nil && b.platform != nil {
+			pCmp = cmp.Compare(platforms.FormatAll(*a.platform), platforms.FormatAll(*b.platform))
+		}
+		return cmp.Or(
+			pCmp,
+			cmp.Compare(descrType(a.descr), descrType(b.descr)),
+		)
+	})
+
+	return attachments, nil
+}
+
+func walkAttachments(ctx context.Context, store content.Store, desc ocispecs.Descriptor, platform *ocispecs.Platform) []attachment {
+	_, err := store.Info(ctx, desc.Digest)
+	if err != nil {
+		return nil
+	}
+
+	var out []attachment
+
+	if desc.Annotations["vnd.docker.reference.type"] != "attestation-manifest" {
+		out = append(out, attachment{platform: platform, descr: desc})
+	}
+
+	if desc.MediaType != ocispecs.MediaTypeImageIndex && desc.MediaType != images.MediaTypeDockerSchema2ManifestList {
+		return out
+	}
+
+	dt, err := content.ReadBlob(ctx, store, desc)
+	if err != nil {
+		return out
+	}
+
+	var idx ocispecs.Index
+	if err := json.Unmarshal(dt, &idx); err != nil {
+		return out
+	}
+
+	for _, d := range idx.Manifests {
+		p := platform
+		if d.Platform != nil {
+			p = d.Platform
+		}
+		out = append(out, walkAttachments(ctx, store, d, p)...)
+	}
+
+	return out
+}
+
+func ociDesc(in *controlapi.Descriptor) ocispecs.Descriptor {
+	return ocispecs.Descriptor{
+		MediaType:   in.MediaType,
+		Digest:      digest.Digest(in.Digest),
+		Size:        in.Size,
+		Annotations: in.Annotations,
+	}
+}
+func descrType(desc ocispecs.Descriptor) string {
+	if typ, ok := desc.Annotations["in-toto.io/predicate-type"]; ok {
+		return typ
+	}
+	return desc.MediaType
+}
+
+func tryParseValue[T any](s string, errs *[]string, f func(string) (T, error)) (T, bool) {
+	v, err := f(s)
+	if err != nil {
+		errStr := fmt.Sprintf("failed to parse %s: (%v)", s, err)
+		*errs = append(*errs, errStr)
+	}
+	return v, true
+}
+
+func printTable(w io.Writer, kvs []keyValueOutput, title string) {
+	if len(kvs) == 0 {
+		return
+	}
+
+	tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+	fmt.Fprintf(tw, "%s\tVALUE\n", strings.ToUpper(title))
+	for _, k := range kvs {
+		fmt.Fprintf(tw, "%s\t%s\n", k.Name, k.Value)
+	}
+	tw.Flush()
+	fmt.Fprintln(w)
+}
+
+func readKeyValues(attrs map[string]string, prefix string) []keyValueOutput {
+	var out []keyValueOutput
+	for k, v := range attrs {
+		if strings.HasPrefix(k, prefix) {
+			out = append(out, keyValueOutput{
+				Name:  strings.TrimPrefix(k, prefix),
+				Value: v,
+			})
+		}
+	}
+	if len(out) == 0 {
+		return nil
+	}
+	slices.SortFunc(out, func(a, b keyValueOutput) int {
+		return cmp.Compare(a.Name, b.Name)
+	})
+	return out
+}
+
+func digestSetToDigests(ds slsa.DigestSet) []string {
+	var out []string
+	for k, v := range ds {
+		out = append(out, fmt.Sprintf("%s:%s", k, v))
+	}
+	return out
+}
diff -pruN 0.19.3+ds1-4/commands/history/inspect_attachment.go 0.21.3-0ubuntu1/commands/history/inspect_attachment.go
--- 0.19.3+ds1-4/commands/history/inspect_attachment.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/inspect_attachment.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,152 @@
+package history
+
+import (
+	"context"
+	"io"
+	"slices"
+
+	"github.com/containerd/containerd/v2/core/content/proxy"
+	"github.com/containerd/platforms"
+	"github.com/docker/buildx/builder"
+	"github.com/docker/buildx/util/cobrautil/completion"
+	"github.com/docker/cli/cli/command"
+	intoto "github.com/in-toto/in-toto-golang/in_toto"
+	slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
+	"github.com/opencontainers/go-digest"
+	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+type attachmentOptions struct {
+	builder  string
+	typ      string
+	platform string
+	ref      string
+	digest   digest.Digest
+}
+
+func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOptions) error {
+	b, err := builder.New(dockerCli, builder.WithName(opts.builder))
+	if err != nil {
+		return err
+	}
+
+	nodes, err := b.LoadNodes(ctx)
+	if err != nil {
+		return err
+	}
+	for _, node := range nodes {
+		if node.Err != nil {
+			return node.Err
+		}
+	}
+
+	recs, err := queryRecords(ctx, opts.ref, nodes)
+	if err != nil {
+		return err
+	}
+
+	if len(recs) == 0 {
+		if opts.ref == "" {
+			return errors.New("no records found")
+		}
+		return errors.Errorf("no record found for ref %q", opts.ref)
+	}
+
+	if opts.ref == "" {
+		slices.SortFunc(recs, func(a, b historyRecord) int {
+			return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
+		})
+	}
+
+	rec := &recs[0]
+
+	c, err := rec.node.Driver.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	store := proxy.NewContentStore(c.ContentClient())
+
+	if opts.digest != "" {
+		ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{Digest: opts.digest})
+		if err != nil {
+			return err
+		}
+		_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
+		return err
+	}
+
+	attachments, err := allAttachments(ctx, store, *rec)
+	if err != nil {
+		return err
+	}
+
+	typ := opts.typ
+	switch typ {
+	case "index":
+		typ = ocispecs.MediaTypeImageIndex
+	case "manifest":
+		typ = ocispecs.MediaTypeImageManifest
+	case "image":
+		typ = ocispecs.MediaTypeImageConfig
+	case "provenance":
+		typ = slsa02.PredicateSLSAProvenance
+	case "sbom":
+		typ = intoto.PredicateSPDX
+	}
+
+	for _, a := range attachments {
+		if opts.platform != "" && (a.platform == nil || platforms.FormatAll(*a.platform) != opts.platform) {
+			continue
+		}
+		if typ != "" && descrType(a.descr) != typ {
+			continue
+		}
+		ra, err := store.ReaderAt(ctx, a.descr)
+		if err != nil {
+			return err
+		}
+		_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
+		return err
+	}
+
+	return errors.Errorf("no matching attachment found for ref %q", opts.ref)
+}
+
+func attachmentCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+	var options attachmentOptions
+
+	cmd := &cobra.Command{
+		Use:   "attachment [OPTIONS] REF [DIGEST]",
+		Short: "Inspect a build attachment",
+		Args:  cobra.RangeArgs(1, 2),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if len(args) > 0 {
+				options.ref = args[0]
+			}
+			if len(args) > 1 {
+				dgst, err := digest.Parse(args[1])
+				if err != nil {
+					return errors.Wrapf(err, "invalid digest %q", args[1])
+				}
+				options.digest = dgst
+			}
+
+			if options.digest == "" && options.platform == "" && options.typ == "" {
+				return errors.New("at least one of --type, --platform or DIGEST must be specified")
+			}
+
+			options.builder = *rootOpts.Builder
+			return runAttachment(cmd.Context(), dockerCli, options)
+		},
+		ValidArgsFunction: completion.Disable,
+	}
+
+	flags := cmd.Flags()
+	flags.StringVar(&options.typ, "type", "", "Type of attachment")
+	flags.StringVar(&options.platform, "platform", "", "Platform of attachment")
+
+	return cmd
+}
diff -pruN 0.19.3+ds1-4/commands/history/logs.go 0.21.3-0ubuntu1/commands/history/logs.go
--- 0.19.3+ds1-4/commands/history/logs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/logs.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,124 @@
+package history
+
+import (
+	"context"
+	"io"
+	"os"
+	"slices"
+
+	"github.com/docker/buildx/builder"
+	"github.com/docker/buildx/util/cobrautil/completion"
+	"github.com/docker/buildx/util/progress"
+	"github.com/docker/cli/cli/command"
+	controlapi "github.com/moby/buildkit/api/services/control"
+	"github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/util/progress/progressui"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+type logsOptions struct {
+	builder  string
+	ref      string
+	progress string
+}
+
+func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error {
+	b, err := builder.New(dockerCli, builder.WithName(opts.builder))
+	if err != nil {
+		return err
+	}
+
+	nodes, err := b.LoadNodes(ctx)
+	if err != nil {
+		return err
+	}
+	for _, node := range nodes {
+		if node.Err != nil {
+			return node.Err
+		}
+	}
+
+	recs, err := queryRecords(ctx, opts.ref, nodes)
+	if err != nil {
+		return err
+	}
+
+	if len(recs) == 0 {
+		if opts.ref == "" {
+			return errors.New("no records found")
+		}
+		return errors.Errorf("no record found for ref %q", opts.ref)
+	}
+
+	if opts.ref == "" {
+		slices.SortFunc(recs, func(a, b historyRecord) int {
+			return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
+		})
+	}
+
+	rec := &recs[0]
+	c, err := rec.node.Driver.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
+		Ref: rec.Ref,
+	})
+	if err != nil {
+		return err
+	}
+
+	var mode progressui.DisplayMode = progressui.DisplayMode(opts.progress)
+	if mode == progressui.AutoMode {
+		mode = progressui.PlainMode
+	}
+	printer, err := progress.NewPrinter(context.TODO(), os.Stderr, mode)
+	if err != nil {
+		return err
+	}
+
+loop0:
+	for {
+		select {
+		case <-ctx.Done():
+			cl.CloseSend()
+			return context.Cause(ctx)
+		default:
+			ev, err := cl.Recv()
+			if err != nil {
+				if errors.Is(err, io.EOF) {
+					break loop0
+				}
+				return err
+			}
+			printer.Write(client.NewSolveStatus(ev))
+		}
+	}
+
+	return printer.Wait()
+}
+
+func logsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+	var options logsOptions
+
+	cmd := &cobra.Command{
+		Use:   "logs [OPTIONS] [REF]",
+		Short: "Print the logs of a build",
+		Args:  cobra.MaximumNArgs(1),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if len(args) > 0 {
+				options.ref = args[0]
+			}
+			options.builder = *rootOpts.Builder
+			return runLogs(cmd.Context(), dockerCli, options)
+		},
+		ValidArgsFunction: completion.Disable,
+	}
+
+	flags := cmd.Flags()
+	flags.StringVar(&options.progress, "progress", "plain", "Set type of progress output (plain, rawjson, tty)")
+
+	return cmd
+}
diff -pruN 0.19.3+ds1-4/commands/history/ls.go 0.21.3-0ubuntu1/commands/history/ls.go
--- 0.19.3+ds1-4/commands/history/ls.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/ls.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,234 @@
+package history
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"os"
+	"slices"
+	"time"
+
+	"github.com/containerd/console"
+	"github.com/docker/buildx/builder"
+	"github.com/docker/buildx/localstate"
+	"github.com/docker/buildx/util/cobrautil/completion"
+	"github.com/docker/buildx/util/confutil"
+	"github.com/docker/buildx/util/desktop"
+	"github.com/docker/cli/cli"
+	"github.com/docker/cli/cli/command"
+	"github.com/docker/cli/cli/command/formatter"
+	"github.com/docker/go-units"
+	"github.com/spf13/cobra"
+)
+
+const (
+	lsHeaderBuildID  = "BUILD ID"
+	lsHeaderName     = "NAME"
+	lsHeaderStatus   = "STATUS"
+	lsHeaderCreated  = "CREATED AT"
+	lsHeaderDuration = "DURATION"
+	lsHeaderLink     = ""
+
+	lsDefaultTableFormat = "table {{.Ref}}\t{{.Name}}\t{{.Status}}\t{{.CreatedAt}}\t{{.Duration}}\t{{.Link}}"
+
+	headerKeyTimestamp = "buildkit-current-timestamp"
+)
+
+type lsOptions struct {
+	builder string
+	format  string
+	noTrunc bool
+}
+
+func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
+	b, err := builder.New(dockerCli, builder.WithName(opts.builder))
+	if err != nil {
+		return err
+	}
+
+	nodes, err := b.LoadNodes(ctx)
+	if err != nil {
+		return err
+	}
+	for _, node := range nodes {
+		if node.Err != nil {
+			return node.Err
+		}
+	}
+
+	out, err := queryRecords(ctx, "", nodes)
+	if err != nil {
+		return err
+	}
+
+	ls, err := localstate.New(confutil.NewConfig(dockerCli))
+	if err != nil {
+		return err
+	}
+
+	for i, rec := range out {
+		st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
+		rec.name = buildName(rec.FrontendAttrs, st)
+		out[i] = rec
+	}
+
+	return lsPrint(dockerCli, out, opts)
+}
+
+func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+	var options lsOptions
+
+	cmd := &cobra.Command{
+		Use:   "ls",
+		Short: "List build records",
+		Args:  cli.NoArgs,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			options.builder = *rootOpts.Builder
+			return runLs(cmd.Context(), dockerCli, options)
+		},
+		ValidArgsFunction: completion.Disable,
+	}
+
+	flags := cmd.Flags()
+	flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
+	flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
+
+	return cmd
+}
+
+func lsPrint(dockerCli command.Cli, records []historyRecord, in lsOptions) error {
+	if in.format == formatter.TableFormatKey {
+		in.format = lsDefaultTableFormat
+	}
+
+	ctx := formatter.Context{
+		Output: dockerCli.Out(),
+		Format: formatter.Format(in.format),
+		Trunc:  !in.noTrunc,
+	}
+
+	slices.SortFunc(records, func(a, b historyRecord) int {
+		if a.CompletedAt == nil && b.CompletedAt != nil {
+			return -1
+		}
+		if a.CompletedAt != nil && b.CompletedAt == nil {
+			return 1
+		}
+		return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
+	})
+
+	var term bool
+	if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
+		term = true
+	}
+	render := func(format func(subContext formatter.SubContext) error) error {
+		for _, r := range records {
+			if err := format(&lsContext{
+				format: formatter.Format(in.format),
+				isTerm: term,
+				trunc:  !in.noTrunc,
+				record: &r,
+			}); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	lsCtx := lsContext{
+		isTerm: term,
+		trunc:  !in.noTrunc,
+	}
+	lsCtx.Header = formatter.SubHeaderContext{
+		"Ref":       lsHeaderBuildID,
+		"Name":      lsHeaderName,
+		"Status":    lsHeaderStatus,
+		"CreatedAt": lsHeaderCreated,
+		"Duration":  lsHeaderDuration,
+		"Link":      lsHeaderLink,
+	}
+
+	return ctx.Write(&lsCtx, render)
+}
+
+type lsContext struct {
+	formatter.HeaderContext
+
+	isTerm bool
+	trunc  bool
+	format formatter.Format
+	record *historyRecord
+}
+
+func (c *lsContext) MarshalJSON() ([]byte, error) {
+	m := map[string]interface{}{
+		"ref":             c.FullRef(),
+		"name":            c.Name(),
+		"status":          c.Status(),
+		"created_at":      c.record.CreatedAt.AsTime().Format(time.RFC3339Nano),
+		"total_steps":     c.record.NumTotalSteps,
+		"completed_steps": c.record.NumCompletedSteps,
+		"cached_steps":    c.record.NumCachedSteps,
+	}
+	if c.record.CompletedAt != nil {
+		m["completed_at"] = c.record.CompletedAt.AsTime().Format(time.RFC3339Nano)
+	}
+	return json.Marshal(m)
+}
+
+func (c *lsContext) Ref() string {
+	return c.record.Ref
+}
+
+func (c *lsContext) FullRef() string {
+	return fmt.Sprintf("%s/%s/%s", c.record.node.Builder, c.record.node.Name, c.record.Ref)
+}
+
+func (c *lsContext) Name() string {
+	name := c.record.name
+	if c.trunc && c.format.IsTable() {
+		return trimBeginning(name, 36)
+	}
+	return name
+}
+
+func (c *lsContext) Status() string {
+	if c.record.CompletedAt != nil {
+		if c.record.Error != nil {
+			return "Error"
+		}
+		return "Completed"
+	}
+	return "Running"
+}
+
+func (c *lsContext) CreatedAt() string {
+	return units.HumanDuration(time.Since(c.record.CreatedAt.AsTime())) + " ago"
+}
+
+func (c *lsContext) Duration() string {
+	lastTime := c.record.currentTimestamp
+	if c.record.CompletedAt != nil {
+		tm := c.record.CompletedAt.AsTime()
+		lastTime = &tm
+	}
+	if lastTime == nil {
+		return ""
+	}
+	v := formatDuration(lastTime.Sub(c.record.CreatedAt.AsTime()))
+	if c.record.CompletedAt == nil {
+		v += "+"
+	}
+	return v
+}
+
+func (c *lsContext) Link() string {
+	url := desktop.BuildURL(c.FullRef())
+	if c.format.IsTable() {
+		if c.isTerm {
+			return desktop.ANSIHyperlink(url, "Open")
+		}
+		return ""
+	}
+	return url
+}
diff -pruN 0.19.3+ds1-4/commands/history/open.go 0.21.3-0ubuntu1/commands/history/open.go
--- 0.19.3+ds1-4/commands/history/open.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/open.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,80 @@
+package history
+
+import (
+	"context"
+	"fmt"
+	"slices"
+
+	"github.com/docker/buildx/builder"
+	"github.com/docker/buildx/util/cobrautil/completion"
+	"github.com/docker/buildx/util/desktop"
+	"github.com/docker/cli/cli/command"
+	"github.com/pkg/browser"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+type openOptions struct {
+	builder string
+	ref     string
+}
+
+func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error {
+	b, err := builder.New(dockerCli, builder.WithName(opts.builder))
+	if err != nil {
+		return err
+	}
+
+	nodes, err := b.LoadNodes(ctx)
+	if err != nil {
+		return err
+	}
+	for _, node := range nodes {
+		if node.Err != nil {
+			return node.Err
+		}
+	}
+
+	recs, err := queryRecords(ctx, opts.ref, nodes)
+	if err != nil {
+		return err
+	}
+
+	if len(recs) == 0 {
+		if opts.ref == "" {
+			return errors.New("no records found")
+		}
+		return errors.Errorf("no record found for ref %q", opts.ref)
+	}
+
+	if opts.ref == "" {
+		slices.SortFunc(recs, func(a, b historyRecord) int {
+			return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
+		})
+	}
+
+	rec := &recs[0]
+
+	url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
+	return browser.OpenURL(url)
+}
+
+func openCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+	var options openOptions
+
+	cmd := &cobra.Command{
+		Use:   "open [OPTIONS] [REF]",
+		Short: "Open a build in Docker Desktop",
+		Args:  cobra.MaximumNArgs(1),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if len(args) > 0 {
+				options.ref = args[0]
+			}
+			options.builder = *rootOpts.Builder
+			return runOpen(cmd.Context(), dockerCli, options)
+		},
+		ValidArgsFunction: completion.Disable,
+	}
+
+	return cmd
+}
diff -pruN 0.19.3+ds1-4/commands/history/rm.go 0.21.3-0ubuntu1/commands/history/rm.go
--- 0.19.3+ds1-4/commands/history/rm.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/rm.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,151 @@
+package history
+
+import (
+	"context"
+	"io"
+
+	"github.com/docker/buildx/builder"
+	"github.com/docker/buildx/util/cobrautil/completion"
+	"github.com/docker/cli/cli/command"
+	"github.com/hashicorp/go-multierror"
+	controlapi "github.com/moby/buildkit/api/services/control"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+	"golang.org/x/sync/errgroup"
+)
+
+type rmOptions struct {
+	builder string
+	refs    []string
+	all     bool
+}
+
+func runRm(ctx context.Context, dockerCli command.Cli, opts rmOptions) error {
+	b, err := builder.New(dockerCli, builder.WithName(opts.builder))
+	if err != nil {
+		return err
+	}
+
+	nodes, err := b.LoadNodes(ctx)
+	if err != nil {
+		return err
+	}
+	for _, node := range nodes {
+		if node.Err != nil {
+			return node.Err
+		}
+	}
+
+	errs := make([][]error, len(opts.refs))
+	for i := range errs {
+		errs[i] = make([]error, len(nodes))
+	}
+
+	eg, ctx := errgroup.WithContext(ctx)
+	for i, node := range nodes {
+		node := node
+		eg.Go(func() error {
+			if node.Driver == nil {
+				return nil
+			}
+			c, err := node.Driver.Client(ctx)
+			if err != nil {
+				return err
+			}
+
+			refs := opts.refs
+
+			if opts.all {
+				serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
+					EarlyExit: true,
+				})
+				if err != nil {
+					return err
+				}
+				defer serv.CloseSend()
+
+				for {
+					resp, err := serv.Recv()
+					if err != nil {
+						if errors.Is(err, io.EOF) {
+							break
+						}
+						return err
+					}
+					if resp.Type == controlapi.BuildHistoryEventType_COMPLETE {
+						refs = append(refs, resp.Record.Ref)
+					}
+				}
+			}
+
+			for j, ref := range refs {
+				_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
+					Ref:    ref,
+					Delete: true,
+				})
+				if opts.all {
+					if err != nil {
+						return err
+					}
+				} else {
+					errs[j][i] = err
+				}
+			}
+			return nil
+		})
+	}
+
+	if err := eg.Wait(); err != nil {
+		return err
+	}
+
+	var out []error
+loop0:
+	for _, nodeErrs := range errs {
+		var nodeErr error
+		for _, err1 := range nodeErrs {
+			if err1 == nil {
+				continue loop0
+			}
+			if nodeErr == nil {
+				nodeErr = err1
+			} else {
+				nodeErr = multierror.Append(nodeErr, err1)
+			}
+		}
+		out = append(out, nodeErr)
+	}
+	if len(out) == 0 {
+		return nil
+	}
+	if len(out) == 1 {
+		return out[0]
+	}
+	return multierror.Append(out[0], out[1:]...)
+}
+
+func rmCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+	var options rmOptions
+
+	cmd := &cobra.Command{
+		Use:   "rm [OPTIONS] [REF...]",
+		Short: "Remove build records",
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if len(args) == 0 && !options.all {
+				return errors.New("rm requires at least one argument")
+			}
+			if len(args) > 0 && options.all {
+				return errors.New("rm requires either --all or at least one argument")
+			}
+			options.refs = args
+			options.builder = *rootOpts.Builder
+			return runRm(cmd.Context(), dockerCli, options)
+		},
+		ValidArgsFunction: completion.Disable,
+	}
+
+	flags := cmd.Flags()
+	flags.BoolVar(&options.all, "all", false, "Remove all build records")
+
+	return cmd
+}
diff -pruN 0.19.3+ds1-4/commands/history/root.go 0.21.3-0ubuntu1/commands/history/root.go
--- 0.19.3+ds1-4/commands/history/root.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/root.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,31 @@
+package history
+
+import (
+	"github.com/docker/buildx/util/cobrautil/completion"
+	"github.com/docker/cli/cli/command"
+	"github.com/spf13/cobra"
+)
+
+type RootOptions struct {
+	Builder *string
+}
+
+func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
+	cmd := &cobra.Command{
+		Use:               "history",
+		Short:             "Commands to work on build records",
+		ValidArgsFunction: completion.Disable,
+		RunE:              rootcmd.RunE,
+	}
+
+	cmd.AddCommand(
+		lsCmd(dockerCli, opts),
+		rmCmd(dockerCli, opts),
+		logsCmd(dockerCli, opts),
+		inspectCmd(dockerCli, opts),
+		openCmd(dockerCli, opts),
+		traceCmd(dockerCli, opts),
+	)
+
+	return cmd
+}
diff -pruN 0.19.3+ds1-4/commands/history/trace.go 0.21.3-0ubuntu1/commands/history/trace.go
--- 0.19.3+ds1-4/commands/history/trace.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/trace.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,260 @@
+package history
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"slices"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/containerd/console"
+	"github.com/containerd/containerd/v2/core/content/proxy"
+	"github.com/docker/buildx/builder"
+	"github.com/docker/buildx/util/cobrautil/completion"
+	"github.com/docker/buildx/util/otelutil"
+	"github.com/docker/buildx/util/otelutil/jaeger"
+	"github.com/docker/cli/cli/command"
+	controlapi "github.com/moby/buildkit/api/services/control"
+	"github.com/opencontainers/go-digest"
+	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/browser"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+	jaegerui "github.com/tonistiigi/jaeger-ui-rest"
+)
+
+type traceOptions struct {
+	builder string
+	ref     string
+	addr    string
+	compare string
+}
+
+func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
+	var offset *int
+	if strings.HasPrefix(ref, "^") {
+		off, err := strconv.Atoi(ref[1:])
+		if err != nil {
+			return "", nil, errors.Wrapf(err, "invalid offset %q", ref)
+		}
+		offset = &off
+		ref = ""
+	}
+
+	recs, err := queryRecords(ctx, ref, nodes)
+	if err != nil {
+		return "", nil, err
+	}
+
+	var rec *historyRecord
+
+	if ref == "" {
+		slices.SortFunc(recs, func(a, b historyRecord) int {
+			return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
+		})
+		for _, r := range recs {
+			if r.CompletedAt != nil {
+				if offset != nil {
+					if *offset > 0 {
+						*offset--
+						continue
+					}
+				}
+				rec = &r
+				break
+			}
+		}
+		if offset != nil && *offset > 0 {
+			return "", nil, errors.Errorf("no completed build found with offset %d", *offset)
+		}
+	} else {
+		rec = &recs[0]
+	}
+	if rec == nil {
+		if ref == "" {
+			return "", nil, errors.New("no records found")
+		}
+		return "", nil, errors.Errorf("no record found for ref %q", ref)
+	}
+
+	if rec.CompletedAt == nil {
+		return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
+	}
+
+	if rec.Trace == nil {
+		// build is complete but no trace yet. try to finalize the trace
+		time.Sleep(1 * time.Second) // give some extra time for last parts of trace to be written
+
+		c, err := rec.node.Driver.Client(ctx)
+		if err != nil {
+			return "", nil, err
+		}
+		_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
+			Ref:      rec.Ref,
+			Finalize: true,
+		})
+		if err != nil {
+			return "", nil, err
+		}
+
+		recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node})
+		if err != nil {
+			return "", nil, err
+		}
+
+		if len(recs) == 0 {
+			return "", nil, errors.Errorf("build record %q was deleted", rec.Ref)
+		}
+
+		rec = &recs[0]
+		if rec.Trace == nil {
+			return "", nil, errors.Errorf("build record %q is missing a trace", rec.Ref)
+		}
+	}
+
+	c, err := rec.node.Driver.Client(ctx)
+	if err != nil {
+		return "", nil, err
+	}
+
+	store := proxy.NewContentStore(c.ContentClient())
+
+	ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{
+		Digest:    digest.Digest(rec.Trace.Digest),
+		MediaType: rec.Trace.MediaType,
+		Size:      rec.Trace.Size,
+	})
+	if err != nil {
+		return "", nil, err
+	}
+
+	spans, err := otelutil.ParseSpanStubs(io.NewSectionReader(ra, 0, ra.Size()))
+	if err != nil {
+		return "", nil, err
+	}
+
+	wrapper := struct {
+		Data []jaeger.Trace `json:"data"`
+	}{
+		Data: spans.JaegerData().Data,
+	}
+
+	if len(wrapper.Data) == 0 {
+		return "", nil, errors.New("no trace data")
+	}
+
+	buf := &bytes.Buffer{}
+	enc := json.NewEncoder(buf)
+	enc.SetIndent("", "  ")
+	if err := enc.Encode(wrapper); err != nil {
+		return "", nil, err
+	}
+
+	return string(wrapper.Data[0].TraceID), buf.Bytes(), nil
+}
+
+func runTrace(ctx context.Context, dockerCli command.Cli, opts traceOptions) error {
+	b, err := builder.New(dockerCli, builder.WithName(opts.builder))
+	if err != nil {
+		return err
+	}
+
+	nodes, err := b.LoadNodes(ctx)
+	if err != nil {
+		return err
+	}
+	for _, node := range nodes {
+		if node.Err != nil {
+			return node.Err
+		}
+	}
+
+	traceID, data, err := loadTrace(ctx, opts.ref, nodes)
+	if err != nil {
+		return err
+	}
+	srv := jaegerui.NewServer(jaegerui.Config{})
+	if err := srv.AddTrace(traceID, bytes.NewReader(data)); err != nil {
+		return err
+	}
+	url := "/trace/" + traceID
+
+	if opts.compare != "" {
+		traceIDcomp, data, err := loadTrace(ctx, opts.compare, nodes)
+		if err != nil {
+			return errors.Wrapf(err, "failed to load trace for %s", opts.compare)
+		}
+		if err := srv.AddTrace(traceIDcomp, bytes.NewReader(data)); err != nil {
+			return err
+		}
+		url = "/trace/" + traceIDcomp + "..." + traceID
+	}
+
+	var term bool
+	if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
+		term = true
+	}
+
+	if !term && opts.compare == "" {
+		fmt.Fprintln(dockerCli.Out(), string(data))
+		return nil
+	}
+
+	ln, err := net.Listen("tcp", opts.addr)
+	if err != nil {
+		return err
+	}
+
+	go func() {
+		time.Sleep(100 * time.Millisecond)
+		browser.OpenURL(url)
+	}()
+
+	url = "http://" + ln.Addr().String() + url
+	fmt.Fprintf(dockerCli.Err(), "Trace available at %s\n", url)
+
+	go func() {
+		<-ctx.Done()
+		ln.Close()
+	}()
+
+	err = srv.Serve(ln)
+	if err != nil {
+		select {
+		case <-ctx.Done():
+			return nil
+		default:
+		}
+	}
+	return err
+}
+
+func traceCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+	var options traceOptions
+
+	cmd := &cobra.Command{
+		Use:   "trace [OPTIONS] [REF]",
+		Short: "Show the OpenTelemetry trace of a build record",
+		Args:  cobra.MaximumNArgs(1),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			if len(args) > 0 {
+				options.ref = args[0]
+			}
+			options.builder = *rootOpts.Builder
+			return runTrace(cmd.Context(), dockerCli, options)
+		},
+		ValidArgsFunction: completion.Disable,
+	}
+
+	flags := cmd.Flags()
+	flags.StringVar(&options.addr, "addr", "127.0.0.1:0", "Address to bind the UI server")
+	flags.StringVar(&options.compare, "compare", "", "Compare with another build reference")
+
+	return cmd
+}
diff -pruN 0.19.3+ds1-4/commands/history/utils.go 0.21.3-0ubuntu1/commands/history/utils.go
--- 0.19.3+ds1-4/commands/history/utils.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/history/utils.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,180 @@
+package history
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/docker/buildx/build"
+	"github.com/docker/buildx/builder"
+	"github.com/docker/buildx/localstate"
+	controlapi "github.com/moby/buildkit/api/services/control"
+	"github.com/pkg/errors"
+	"golang.org/x/sync/errgroup"
+)
+
+func buildName(fattrs map[string]string, ls *localstate.State) string {
+	var res string
+
+	var target, contextPath, dockerfilePath, vcsSource string
+	if v, ok := fattrs["target"]; ok {
+		target = v
+	}
+	if v, ok := fattrs["context"]; ok {
+		contextPath = filepath.ToSlash(v)
+	} else if v, ok := fattrs["vcs:localdir:context"]; ok && v != "." {
+		contextPath = filepath.ToSlash(v)
+	}
+	if v, ok := fattrs["vcs:source"]; ok {
+		vcsSource = v
+	}
+	if v, ok := fattrs["filename"]; ok && v != "Dockerfile" {
+		dockerfilePath = filepath.ToSlash(v)
+	}
+	if v, ok := fattrs["vcs:localdir:dockerfile"]; ok && v != "." {
+		dockerfilePath = filepath.ToSlash(filepath.Join(v, dockerfilePath))
+	}
+
+	var localPath string
+	if ls != nil && !build.IsRemoteURL(ls.LocalPath) {
+		if ls.LocalPath != "" && ls.LocalPath != "-" {
+			localPath = filepath.ToSlash(ls.LocalPath)
+		}
+		if ls.DockerfilePath != "" && ls.DockerfilePath != "-" && ls.DockerfilePath != "Dockerfile" {
+			dockerfilePath = filepath.ToSlash(ls.DockerfilePath)
+		}
+	}
+
+	// remove default dockerfile name
+	const defaultFilename = "/Dockerfile"
+	hasDefaultFileName := strings.HasSuffix(dockerfilePath, defaultFilename) || dockerfilePath == ""
+	dockerfilePath = strings.TrimSuffix(dockerfilePath, defaultFilename)
+
+	// dockerfile is a subpath of context
+	if strings.HasPrefix(dockerfilePath, localPath) && len(dockerfilePath) > len(localPath) {
+		res = dockerfilePath[strings.LastIndex(localPath, "/")+1:]
+	} else {
+		// Otherwise, use basename
+		bpath := localPath
+		if len(dockerfilePath) > 0 {
+			bpath = dockerfilePath
+		}
+		if len(bpath) > 0 {
+			lidx := strings.LastIndex(bpath, "/")
+			res = bpath[lidx+1:]
+			if !hasDefaultFileName {
+				if lidx != -1 {
+					res = filepath.ToSlash(filepath.Join(filepath.Base(bpath[:lidx]), res))
+				} else {
+					res = filepath.ToSlash(filepath.Join(filepath.Base(bpath), res))
+				}
+			}
+		}
+	}
+
+	if len(contextPath) > 0 {
+		res = contextPath
+	}
+	if len(target) > 0 {
+		if len(res) > 0 {
+			res = res + " (" + target + ")"
+		} else {
+			res = target
+		}
+	}
+	if res == "" && vcsSource != "" {
+		return vcsSource
+	}
+	return res
+}
+
+func trimBeginning(s string, n int) string {
+	if len(s) <= n {
+		return s
+	}
+	return ".." + s[len(s)-n+2:]
+}
+
+type historyRecord struct {
+	*controlapi.BuildHistoryRecord
+	currentTimestamp *time.Time
+	node             *builder.Node
+	name             string
+}
+
+func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]historyRecord, error) {
+	var mu sync.Mutex
+	var out []historyRecord
+
+	eg, ctx := errgroup.WithContext(ctx)
+	for _, node := range nodes {
+		node := node
+		eg.Go(func() error {
+			if node.Driver == nil {
+				return nil
+			}
+			var records []historyRecord
+			c, err := node.Driver.Client(ctx)
+			if err != nil {
+				return err
+			}
+			serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
+				EarlyExit: true,
+				Ref:       ref,
+			})
+			if err != nil {
+				return err
+			}
+			md, err := serv.Header()
+			if err != nil {
+				return err
+			}
+			var ts *time.Time
+			if v, ok := md[headerKeyTimestamp]; ok {
+				t, err := time.Parse(time.RFC3339Nano, v[0])
+				if err != nil {
+					return err
+				}
+				ts = &t
+			}
+			defer serv.CloseSend()
+			for {
+				he, err := serv.Recv()
+				if err != nil {
+					if errors.Is(err, io.EOF) {
+						break
+					}
+					return err
+				}
+				if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
+					continue
+				}
+				records = append(records, historyRecord{
+					BuildHistoryRecord: he.Record,
+					currentTimestamp:   ts,
+					node:               &node,
+				})
+			}
+			mu.Lock()
+			out = append(out, records...)
+			mu.Unlock()
+			return nil
+		})
+	}
+
+	if err := eg.Wait(); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func formatDuration(d time.Duration) string {
+	if d < time.Minute {
+		return fmt.Sprintf("%.1fs", d.Seconds())
+	}
+	return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
+}
diff -pruN 0.19.3+ds1-4/commands/inspect.go 0.21.3-0ubuntu1/commands/inspect.go
--- 0.19.3+ds1-4/commands/inspect.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/inspect.go	2025-03-17 16:14:25.000000000 +0000
@@ -115,6 +115,25 @@ func runInspect(ctx context.Context, doc
 						fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
 					}
 				}
+
+				if len(nodes[i].CDIDevices) > 0 {
+					fmt.Fprintf(w, "Devices:\n")
+					for _, dev := range nodes[i].CDIDevices {
+						fmt.Fprintf(w, "\tName:\t%s\n", dev.Name)
+						if dev.OnDemand {
+							fmt.Fprintf(w, "\tOn-Demand:\t%v\n", dev.OnDemand)
+						} else {
+							fmt.Fprintf(w, "\tAutomatically allowed:\t%v\n", dev.AutoAllow)
+						}
+						if len(dev.Annotations) > 0 {
+							fmt.Fprintf(w, "\tAnnotations:\n")
+							for k, v := range dev.Annotations {
+								fmt.Fprintf(w, "\t\t%s:\t%s\n", k, v)
+							}
+						}
+					}
+				}
+
 				for ri, rule := range nodes[i].GCPolicy {
 					fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
 					fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)
diff -pruN 0.19.3+ds1-4/commands/ls.go 0.21.3-0ubuntu1/commands/ls.go
--- 0.19.3+ds1-4/commands/ls.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/ls.go	2025-03-17 16:14:25.000000000 +0000
@@ -159,6 +159,9 @@ func lsPrint(dockerCli command.Cli, curr
 				}
 				continue
 			}
+			if ctx.Format.IsJSON() {
+				continue
+			}
 			for _, n := range b.Nodes() {
 				if n.Err != nil {
 					if ctx.Format.IsTable() {
diff -pruN 0.19.3+ds1-4/commands/root.go 0.21.3-0ubuntu1/commands/root.go
--- 0.19.3+ds1-4/commands/root.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/commands/root.go	2025-03-17 16:14:25.000000000 +0000
@@ -5,6 +5,7 @@ import (
 	"os"
 
 	debugcmd "github.com/docker/buildx/commands/debug"
+	historycmd "github.com/docker/buildx/commands/history"
 	imagetoolscmd "github.com/docker/buildx/commands/imagetools"
 	"github.com/docker/buildx/controller/remote"
 	"github.com/docker/buildx/util/cobrautil/completion"
@@ -106,6 +107,7 @@ func addCommands(cmd *cobra.Command, opt
 		pruneCmd(dockerCli, opts),
 		duCmd(dockerCli, opts),
 		imagetoolscmd.RootCmd(cmd, dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
+		historycmd.RootCmd(cmd, dockerCli, historycmd.RootOptions{Builder: &opts.builder}),
 	)
 	if confutil.IsExperimental() {
 		cmd.AddCommand(debugcmd.RootCmd(dockerCli,
diff -pruN 0.19.3+ds1-4/controller/build/build.go 0.21.3-0ubuntu1/controller/build/build.go
--- 0.19.3+ds1-4/controller/build/build.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/controller/build/build.go	2025-03-17 16:14:25.000000000 +0000
@@ -75,7 +75,9 @@ func RunBuild(ctx context.Context, docke
 	opts.Platforms = platforms
 
 	dockerConfig := dockerCli.ConfigFile()
-	opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil))
+	opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
+		ConfigFile: dockerConfig,
+	}))
 
 	secrets, err := controllerapi.CreateSecrets(in.Secrets)
 	if err != nil {
diff -pruN 0.19.3+ds1-4/controller/pb/export.go 0.21.3-0ubuntu1/controller/pb/export.go
--- 0.19.3+ds1-4/controller/pb/export.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/controller/pb/export.go	2025-03-17 16:14:25.000000000 +0000
@@ -46,6 +46,7 @@ func CreateExports(entries []*ExportEntr
 			supportDir = !tar
 		case "registry":
 			out.Type = client.ExporterImage
+			out.Attrs["push"] = "true"
 		}
 
 		if supportDir {
diff -pruN 0.19.3+ds1-4/controller/remote/client.go 0.21.3-0ubuntu1/controller/remote/client.go
--- 0.19.3+ds1-4/controller/remote/client.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/controller/remote/client.go	2025-03-17 16:14:25.000000000 +0000
@@ -6,8 +6,8 @@ import (
 	"sync"
 	"time"
 
-	"github.com/containerd/containerd/defaults"
-	"github.com/containerd/containerd/pkg/dialer"
+	"github.com/containerd/containerd/v2/defaults"
+	"github.com/containerd/containerd/v2/pkg/dialer"
 	"github.com/docker/buildx/build"
 	"github.com/docker/buildx/controller/pb"
 	"github.com/docker/buildx/util/progress"
diff -pruN 0.19.3+ds1-4/debian/.gitignore 0.21.3-0ubuntu1/debian/.gitignore
--- 0.19.3+ds1-4/debian/.gitignore	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/.gitignore	1970-01-01 00:00:00.000000000 +0000
@@ -1,7 +0,0 @@
-*.debhelper
-*.log
-*.substvars
-/.debhelper/
-/debhelper-build-stamp
-/files
-/docker-buildx/
diff -pruN 0.19.3+ds1-4/debian/NEWS 0.21.3-0ubuntu1/debian/NEWS
--- 0.19.3+ds1-4/debian/NEWS	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/NEWS	1970-01-01 00:00:00.000000000 +0000
@@ -1,28 +0,0 @@
-golang-github-docker-buildx (0.19.3+ds1-1) experimental; urgency=medium
-
-  * Added new `--call` option replacing experimental `--print`, enabling
-    configuration validation (`--check`), target inspection, and outline
-    display of build parameters.
-  * Introduced new Bake features:
-    - Support for filesystem entitlements via `--allow` (currently warns,
-      will error in v0.20).
-    - Support for HCL/JSON fields using object notation instead of CSV
-      strings (output, cache-from/to, secret, ssh).
-    - New functions (`basename`, `dirname`, `sanitize`) and improved
-      listing via `--list-targets` and `--list-variables`.
-    - Added metrics and warning reporting support for Bake builds.
-  * CLI and UX improvements:
-    - `ls` output more compact; `--no-trunc` restores full output.
-    - `inspect` now shows BuildKit daemon TOML configuration.
-    - Improved and more accurate error and warning messages throughout
-      build and bake commands.
-    - `prune` supports new filters `--max-used-space` and `--min-free-space`.
-  * Compose compatibility updated incrementally to v2.4.4.
-  * Various bug fixes:
-    - Fixed permission issues after running with `sudo`.
-    - Fixed provenance metadata for remote contexts.
-    - Fixed duplicated delegated traces and incorrect build target linking.
-    - Corrected JSON formatting and COMPOSE_PROJECT_NAME handling.
-    - Fixed support for multi-type annotations and local state warnings.
-
- -- Reinhard Tartler <siretart@tauware.de>  Sun, 19 Oct 2025 05:50:46 -0400
diff -pruN 0.19.3+ds1-4/debian/changelog 0.21.3-0ubuntu1/debian/changelog
--- 0.19.3+ds1-4/debian/changelog	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/changelog	2025-07-03 13:30:29.000000000 +0000
@@ -1,54 +1,52 @@
-docker-buildx (0.19.3+ds1-4) unstable; urgency=medium
+docker-buildx (0.21.3-0ubuntu1) questing; urgency=medium
 
-  * Fix running build and tests in autopkgtest
+  * d/watch: fetch releases data from the Github API. This allows us to fetch
+    more available versions
+  * New upstream version 0.21.3:
+    - CVE-2025-0495
 
- -- Nicolas Peugnet <nicolas@club1.fr>  Wed, 29 Oct 2025 18:50:55 +0100
+ -- Athos Ribeiro <athos.ribeiro@canonical.com>  Thu, 03 Jul 2025 10:30:29 -0300
 
-docker-buildx (0.19.3+ds1-3) unstable; urgency=medium
+docker-buildx (0.20.1-0ubuntu1) plucky; urgency=medium
 
-  * Upload to unstable
+  * d/watch: do not watch pre releases
+  * New upstream version 0.20.1
+  * d/control: build with golang 1.24 (LP: #2098662)
+  * d/rules: add golang 1.24 to PATH
+  * d/s/lintian-overrides: remove unused overrides
+  * d/docker-buildx.docs: update NOTICE file list
+  * d/copyright: update copyright data
+  * d/t/control: add missing B-D on file
 
- -- Reinhard Tartler <siretart@tauware.de>  Sat, 25 Oct 2025 14:32:50 -0400
+ -- Athos Ribeiro <athos.ribeiro@canonical.com>  Mon, 17 Feb 2025 14:34:35 -0300
 
-docker-buildx (0.19.3+ds1-2) experimental; urgency=medium
+docker-buildx (0.14.1-0ubuntu1) oracular; urgency=medium
 
-  [ Nicolas Peugnet ]
-  * Update the list of dependencies
-  * Update dependencies version constraints
-  * Port patch for CVE-2025-0495 to v0.19.3
+  * New upstream release.
+  * d/docker-buildx.docs: remove NOTICE file of a removed dependency.
 
- -- Reinhard Tartler <siretart@tauware.de>  Sun, 19 Oct 2025 06:47:42 -0400
+ -- Lucas Kanashiro <kanashiro@ubuntu.com>  Tue, 04 Jun 2024 18:24:40 -0300
 
-docker-buildx (0.19.3+ds1-1) experimental; urgency=medium
+docker-buildx (0.12.1-0ubuntu2) noble; urgency=medium
 
-  * New upstream version: 0.19.3, see NEWS for summary of changes
-  * Build against docker 27.5.1
-  * Bump Standards-Version, no changes needed
+  * Build with Go 1.22.
+    - d/control: b-d on golang-1.22-go instead of golang-1.21-go
+    - d/rules: add Go 1.22 to $PATH
 
- -- Reinhard Tartler <siretart@tauware.de>  Sun, 19 Oct 2025 05:50:43 -0400
+ -- Lucas Kanashiro <kanashiro@ubuntu.com>  Wed, 17 Apr 2024 17:12:00 -0300
 
-docker-buildx (0.13.1+ds1-3) unstable; urgency=medium
+docker-buildx (0.12.1-0ubuntu1) noble; urgency=medium
 
-  * Fix CVE-2025-0495: possible credential leakage to telemetry endpoint
-    (Closes: #1100991)
+  * New upstream release.
+  * d/copyright: remove superfluous file pattern
+  * Build with Go 1.21, recommended by upstream
+    - d/control: b-d on golang-1.21-go instead of golang-go.
+    - d/rules: add Go 1.21 binary to the $PATH.
 
- -- Nicolas Peugnet <nicolas@club1.fr>  Fri, 11 Apr 2025 11:35:58 +0200
+ -- Lucas Kanashiro <kanashiro@ubuntu.com>  Mon, 19 Feb 2024 17:35:26 -0300
 
-docker-buildx (0.13.1+ds1-2) unstable; urgency=medium
+docker-buildx (0.11.2-0ubuntu1) mantic; urgency=medium
 
-  [ Nicolas Peugnet ]
-  * Generate and install docker-buildx manual pages
-  * Install the Markdown files as docs
-  * Fix clean target by removing generated manual pages
-  * Recommend docker-cli package
+  * Initial release.
 
-  [ Reinhard Tartler]
-  * Add myself to Uploaders
-
- -- Reinhard Tartler <siretart@tauware.de>  Thu, 20 Feb 2025 17:49:01 -0500
-
-docker-buildx (0.13.1+ds1-1) experimental; urgency=medium
-
-  * Initial release (Closes: #989917)
-
- -- Nicolas Peugnet <nicolas@club1.fr>  Sat, 15 Feb 2025 17:30:38 +0100
+ -- Lucas Kanashiro <kanashiro@ubuntu.com>  Wed, 17 May 2023 16:17:52 -0300
diff -pruN 0.19.3+ds1-4/debian/clean 0.21.3-0ubuntu1/debian/clean
--- 0.19.3+ds1-4/debian/clean	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/clean	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-docs/reference/docker-buildx*.1
diff -pruN 0.19.3+ds1-4/debian/control 0.21.3-0ubuntu1/debian/control
--- 0.19.3+ds1-4/debian/control	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/control	2025-06-09 13:52:48.000000000 +0000
@@ -1,137 +1,24 @@
 Source: docker-buildx
-Section: golang
+Section: admin
 Priority: optional
-Maintainer: Debian Go Packaging Team <team+pkg-go@tracker.debian.org>
-Uploaders: Nicolas Peugnet <nicolas@club1.fr>,
-           Reinhard Tartler <siretart@tauware.de>,
-Rules-Requires-Root: no
-Build-Depends: debhelper-compat (= 13),
-               dh-exec,
-               dh-sequence-golang,
-               git <!nocheck>,
-               golang-any,
-               golang-github-aws-aws-sdk-go-v2-dev,
-               golang-github-compose-spec-compose-go-dev,
-               golang-github-containerd-console-dev,
-               golang-github-containerd-containerd-dev,
-               golang-github-containerd-continuity-dev,
-               golang-github-containerd-errdefs-dev,
-               golang-github-containerd-log-dev,
-               golang-github-containerd-platforms-dev,
-               golang-github-containerd-typeurl-dev,
-               golang-github-creack-pty-dev,
-               golang-github-distribution-reference-dev,
-               golang-github-docker-cli-docs-tool-dev,
-               golang-github-docker-docker-dev (>= 27.5.1+dfsg3-4),
-               golang-github-docker-go-units-dev,
-               golang-github-gofrs-flock-dev,
-               golang-github-google-shlex-dev,
-               golang-github-google-uuid-dev,
-               golang-github-hashicorp-go-cty-funcs-dev,
-               golang-github-hashicorp-hcl-v2-dev,
-               golang-github-in-toto-in-toto-golang-dev,
-               golang-github-masterminds-semver-dev,
-               golang-github-mitchellh-hashstructure-dev,
-               golang-github-moby-sys-dev,
-               golang-github-morikuni-aec-dev,
-               golang-github-opencontainers-go-digest-dev,
-               golang-github-opencontainers-image-spec-dev,
-               golang-github-pelletier-go-toml-dev,
-               golang-github-pkg-errors-dev,
-               golang-github-serialx-hashring-dev,
-               golang-github-spf13-cobra-dev,
-               golang-github-spf13-pflag-dev,
-               golang-github-stretchr-testify-dev,
-               golang-github-tonistiigi-fsutil-dev (>= 0.0~git20240925.a340068),
-               golang-github-zclconf-go-cty-dev,
-               golang-golang-x-mod-dev,
-               golang-golang-x-sync-dev,
-               golang-golang-x-sys-dev,
-               golang-golang-x-term-dev,
-               golang-golang-x-text-dev,
-               golang-google-grpc-dev,
-               golang-google-protobuf-dev,
-               golang-gopkg-yaml.v3-dev,
-               golang-k8s-api-dev,
-               golang-k8s-apimachinery-dev,
-               golang-k8s-apiserver-dev,
-               golang-k8s-client-go-dev,
-               golang-logrus-dev,
-               golang-opentelemetry-otel-dev,
-Build-Conflicts: golang-github-cenkalti-backoff-dev (>> 5)
-Testsuite: autopkgtest-pkg-go
-Standards-Version: 4.7.2
-Vcs-Browser: https://salsa.debian.org/go-team/packages/docker-buildx
-Vcs-Git: https://salsa.debian.org/go-team/packages/docker-buildx.git
+Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
+Build-Depends: debhelper-compat (= 12),
+               dh-golang,
+               golang-1.24-go
+Standards-Version: 4.6.2
 Homepage: https://github.com/docker/buildx
-XS-Go-Import-Path: github.com/docker/buildx
-
-Package: golang-github-docker-buildx-dev
-Architecture: all
-Multi-Arch: foreign
-Depends: golang-github-aws-aws-sdk-go-v2-dev,
-         golang-github-compose-spec-compose-go-dev,
-         golang-github-containerd-console-dev,
-         golang-github-containerd-containerd-dev,
-         golang-github-containerd-continuity-dev,
-         golang-github-containerd-errdefs-dev,
-         golang-github-containerd-log-dev,
-         golang-github-containerd-platforms-dev,
-         golang-github-containerd-typeurl-dev,
-         golang-github-creack-pty-dev,
-         golang-github-distribution-reference-dev,
-         golang-github-docker-cli-docs-tool-dev,
-         golang-github-docker-docker-dev (>= 27.5.1+dfsg2),
-         golang-github-docker-go-units-dev,
-         golang-github-gofrs-flock-dev,
-         golang-github-google-shlex-dev,
-         golang-github-google-uuid-dev,
-         golang-github-hashicorp-go-cty-funcs-dev,
-         golang-github-hashicorp-hcl-v2-dev,
-         golang-github-in-toto-in-toto-golang-dev,
-         golang-github-masterminds-semver-dev,
-         golang-github-mitchellh-hashstructure-dev,
-         golang-github-moby-sys-dev,
-         golang-github-morikuni-aec-dev,
-         golang-github-opencontainers-go-digest-dev,
-         golang-github-opencontainers-image-spec-dev,
-         golang-github-pelletier-go-toml-dev,
-         golang-github-pkg-errors-dev,
-         golang-github-serialx-hashring-dev,
-         golang-github-spf13-cobra-dev,
-         golang-github-spf13-pflag-dev,
-         golang-github-stretchr-testify-dev,
-         golang-github-tonistiigi-fsutil-dev (>= 0.0~git20240925.a340068),
-         golang-github-zclconf-go-cty-dev,
-         golang-golang-x-mod-dev,
-         golang-golang-x-sync-dev,
-         golang-golang-x-sys-dev,
-         golang-golang-x-term-dev,
-         golang-golang-x-text-dev,
-         golang-google-grpc-dev,
-         golang-google-protobuf-dev,
-         golang-gopkg-yaml.v3-dev,
-         golang-k8s-api-dev,
-         golang-k8s-apimachinery-dev,
-         golang-k8s-apiserver-dev,
-         golang-k8s-client-go-dev,
-         golang-logrus-dev,
-         golang-opentelemetry-otel-dev,
-         ${misc:Depends},
-Description: Docker CLI plugin for extended build capabilities with BuildKit (library)
- This package contains the sources of docker-buildx, to be used with
- packages that depend on it.
+Vcs-Git: https://github.com/canonical/docker-buildx.git
+Vcs-Browser: https://github.com/canonical/docker-buildx
+Rules-Requires-Root: no
 
 Package: docker-buildx
-Section: admin
 Architecture: any
-Depends: ${misc:Depends},
-         ${shlibs:Depends},
-Recommends: docker-cli,
-Static-Built-Using: ${misc:Static-Built-Using}
-Description: Docker CLI plugin for extended build capabilities with BuildKit (program)
- buildx is a Docker CLI plugin for extended build capabilities with
- BuildKit (https://github.com/moby/buildkit).
+Depends: docker.io,
+         ${misc:Depends},
+         ${shlibs:Depends}
+Built-Using: ${libc:Built-Using}, ${misc:Built-Using}
+Description: Docker CLI plugin for extended build capabilities with BuildKit
+ buildx is a Docker CLI plugin for extended build capabilities with BuildKit.
  .
  Key features:
  .
diff -pruN 0.19.3+ds1-4/debian/copyright 0.21.3-0ubuntu1/debian/copyright
--- 0.19.3+ds1-4/debian/copyright	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/copyright	2025-06-09 13:52:48.000000000 +0000
@@ -1,19 +1,13 @@
 Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Source: https://github.com/docker/buildx
 Upstream-Name: buildx
-Upstream-Contact: Docker, Inc. <github@docker.com>
-Files-Excluded:
-  vendor
+Upstream-Contact: The Buildx Authors
+Source: https://github.com/docker/buildx
 Comment: The Buildx Authors refer to all contributors in the AUTHORS file.
 
 Files: *
 Copyright: The Buildx Authors
 License: Apache-2.0
 
-Files: bake/hclparser/merged.go
-Copyright: HashiCorp, Inc.
-License: MPL-2.0
-
 Files: hack/dockerfiles/generated-files.Dockerfile
 Copyright: The BuildKit Authors
            The Buildx Authors
@@ -21,30 +15,1004 @@ License: Apache-2.0
 
 Files: debian/*
 Copyright: 2023 Canonical Ltd.
-           2025 Nicolas Peugnet <nicolas@club1.fr>
 License: Apache-2.0
-Comment: Debian packaging is licensed under the same terms as upstream
+
+Files: vendor/go.opentelemetry.io/*
+Copyright: The OpenTelemetry Authors
+License: Apache-2.0
+
+Files: vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
+       vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
+       vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
+       vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
+Copyright: 2019 OpenTelemetry Authors
+License: Apache-2.0
+
+Files: vendor/golang.org/x/*
+Copyright: 2009-2023 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/k8s.io/*
+Copyright: 2014-2022 The Kubernetes Authors
+License: Apache-2.0
+
+Files: vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/*
+Copyright: 2020-2022 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/k8s.io/kube-openapi/pkg/validation/spec/*
+Copyright: 2015 go-swagger maintainers
+License: Apache-2.0
+
+Files: vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go
+Copyright: 2022 The Kubernetes Authors
+License: Apache-2.0
+
+Files: vendor/k8s.io/utils/internal/third_party/forked/golang/*
+Copyright: 2009-2012 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/k8s.io/klog/v2/klog_file.go
+       vendor/k8s.io/klog/v2/klog.go
+       vendor/k8s.io/klog/v2/internal/dbg/dbg.go
+Copyright: 2013 Google Inc
+License: Apache-2.0
+
+Files: vendor/k8s.io/klog/v2/exit.go
+       vendor/k8s.io/klog/v2/internal/buffer/buffer.go
+       vendor/k8s.io/klog/v2/internal/severity/severity.go
+Copyright: 2013 Google Inc
+           2022 The Kubernetes Authors
+License: Apache-2.0
+
+Files: vendor/google.golang.org/grpc/*
+Copyright: 2014-2022 gRPC authors
+License: Apache-2.0
+
+Files: vendor/google.golang.org/protobuf/*
+Copyright: 2018-2020 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/google.golang.org/protobuf/types/*/*.pb.go
+Copyright: 2008 Google Inc
+License: BSD-3-clause-Google
+
+Files: vendor/sigs.k8s.io/*
+Copyright: 2018-2021 The Kubernetes Authors
+License: Apache-2.0
+
+Files: vendor/sigs.k8s.io/json/internal/golang/*
+Copyright: 2010-2019 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/sigs.k8s.io/yaml/*
+Copyright: 2014 Sam Ghods
+           2012 The Go Authors
+License: Expat and BSD-3-clause-Google
+
+Files: vendor/sigs.k8s.io/yaml/fields.go
+Copyright: 2013 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/gopkg.in/yaml.v2/*
+Copyright: 2011-2016 Canonical Ltd
+License: Apache-2.0
+
+Files: vendor/gopkg.in/yaml.v2/apic.go
+       vendor/gopkg.in/yaml.v2/emitterc.go
+       vendor/gopkg.in/yaml.v2/parserc.go
+       vendor/gopkg.in/yaml.v2/readerc.go
+       vendor/gopkg.in/yaml.v2/scannerc.go
+       vendor/gopkg.in/yaml.v2/writerc.go
+       vendor/gopkg.in/yaml.v2/yamlh.go
+       vendor/gopkg.in/yaml.v2/yamlprivateh.go
+Copyright: 2006 Kirill Simonov
+License: Expat
+
+Files: vendor/gopkg.in/yaml.v3/*
+Copyright: 2011-2019 Canonical Ltd
+License: Apache-2.0
+
+Files: vendor/gopkg.in/yaml.v3/writerc.go
+       vendor/gopkg.in/yaml.v3/scannerc.go
+       vendor/gopkg.in/yaml.v3/emitterc.go
+       vendor/gopkg.in/yaml.v3/apic.go
+       vendor/gopkg.in/yaml.v3/parserc.go
+       vendor/gopkg.in/yaml.v3/readerc.go
+       vendor/gopkg.in/yaml.v3/yamlh.go
+       vendor/gopkg.in/yaml.v3/yamlprivateh.go
+Copyright: 2011-2019 Canonical Ltd
+           2006-2010 Kirill Simonov
+License: Expat
+
+Files: vendor/gopkg.in/inf.v0/*
+Copyright: 2012 Péter Surányi
+           2009 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/apparentlymart/*
+Copyright: 2015-2017 Martin Atkins
+License: Expat
+
+Files: vendor/github.com/cespare/xxhash/v2/*
+Copyright: 2016 Caleb Spare
+License: Expat
+
+Files: vendor/github.com/compose-spec/compose-go/*
+Copyright: 2020 The Compose Specification Authors
+           2013-2017 Docker, Inc
+License: Apache-2.0
+
+Files: vendor/github.com/compose-spec/compose-go/v2/dotenv/*
+Copyright: 2013 John Barton
+License: Expat
+
+Files: vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go
+Copyright: 2020 The Compose Specification Authors
+           2010 The Go Authors
+License: Apache-2.0 and BSD-3-clause-Google
+
+Files: vendor/github.com/aws/smithy-go/*
+Copyright: Amazon.com, Inc. or its affiliates
+License: Apache-2.0
+
+Files: vendor/github.com/aws/smithy-go/internal/sync/singleflight/*
+       vendor/github.com/aws/smithy-go/encoding/xml/escape.go
+       vendor/github.com/aws/smithy-go/encoding/xml/element.go
+       vendor/github.com/aws/smithy-go/encoding/json/escape.go
+Copyright: 2009-2016 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/aws/aws-sdk-go-v2/*
+Copyright: 2015 Amazon.com, Inc. or its affiliates
+           2014-2015 Stripe, Inc
+License: Apache-2.0
+
+Files: vendor/github.com/Microsoft/go-winio/*
+Copyright: 2015 Microsoft
+License: Expat
+
+Files: vendor/github.com/klauspost/compress/*
+Copyright: 2012 The Go Authors
+           2018-2019 Klaus Post
+           2013 Yann Collet
+           2016-2017 The New York Times Company
+License: BSD-3-clause-Google and Apache-2.0
+
+Files: vendor/github.com/klauspost/compress/internal/snapref/*
+Copyright: 2011-2016 The Snappy-Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/klauspost/compress/zstd/internal/xxhash/*
+Copyright: 2016 Caleb Spare
+License: Expat
+
+Files: vendor/github.com/moby/*
+Copyright: 2012-2021 Docker Inc
+License: Apache-2.0
+
+Files: vendor/github.com/moby/spdystream/spdy/read.go
+       vendor/github.com/moby/spdystream/spdy/write.go
+       vendor/github.com/moby/spdystream/spdy/types.go
+       vendor/github.com/moby/spdystream/spdy/dictionary.go
+Copyright: 2014-2021 Docker Inc
+           2011-2013 The Go Authors
+License: Apache-2.0 and BSD-3-clause-Google
+
+Files: vendor/github.com/moby/buildkit/version/version.go
+Copyright: The BuildKit Authors
+           The containerd Authors
+License: Apache-2.0
+
+Files: vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/*
+Copyright: The OpenTelemetry Authors
+License: Apache-2.0
+
+Files: vendor/github.com/gogo/protobuf/*
+Copyright: 2010-2017 The Go Authors
+           2013 The GoGo Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/gogo/protobuf/*/*_gogo.go
+       vendor/github.com/gogo/protobuf/proto/wrappers.go
+       vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
+Copyright: 2013-2018 The GoGo Authors
+License: BSD-2-clause 
+
+Files: vendor/google.golang.org/genproto/googleapis/*
+Copyright: 2024 Google LLC
+License: Apache-2.0
+
+Files: vendor/google.golang.org/genproto/googleapis/*/*/*.pb.go
+Copyright: 2015-2022 Google LLC
+License: Apache-2.0
+
+Files: vendor/github.com/pkg/errors/*
+Copyright: 2015 Dave Cheney <dave@cheney.net>
+License: BSD-2-clause
+
+Files: vendor/github.com/fvbommel/sortorder/*
+Copyright: 2015 Frits van Bommel
+License: Expat
+
+Files: vendor/github.com/munnerz/goautoneg/*
+Copyright: 2011 Open Knowledge Foundation Ltd
+License: BSD-3-clause-Generic
+
+Files: vendor/github.com/inconshreveable/mousetrap/*
+Copyright: 2022 Alan Shreve <@inconshreveable>
+License: Apache-2.0
+
+Files: vendor/github.com/google/gofuzz/*
+Copyright: 2014 Google Inc
+License: Apache-2.0
+
+Files: vendor/github.com/google/go-cmp/*
+Copyright: 2017-2020 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/google/gnostic-models/*
+Copyright: 2017-2020 Google LLC
+License: Apache-2.0
+
+Files: vendor/github.com/google/shlex/*
+Copyright: 2012 Google Inc
+License: Apache-2.0
+
+Files: vendor/github.com/google/uuid/*
+Copyright: 2009-2021 Google Inc
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/serialx/hashring/*
+Copyright: 2016 Sung-jin Hong
+License: Expat
+
+Files: vendor/github.com/gofrs/flock/*
+Copyright: 2015-2020 Tim Heckman
+License: BSD-3-clause-Generic
+
+Files: vendor/github.com/zclconf/go-cty/*
+Copyright: 2017-2018 Martin Atkins
+License: Expat
+
+Files: vendor/github.com/morikuni/aec/*
+Copyright: 2016 Taihei Morikuni
+License: Expat
+
+Files: vendor/github.com/secure-systems-lab/go-securesystemslib/*
+Copyright: 2021 NYU Secure Systems Lab
+License: Expat
+
+Files: vendor/github.com/shibumi/go-pathspec/*
+Copyright: 2014 Sander van Harmelen
+           2020 Christian Rebischke
+           2012 The Go Authors
+License: Apache-2.0 and BSD-3-clause-Google
+
+Files: vendor/github.com/felixge/httpsnoop/*
+Copyright: 2016 Felix Geisendörfer <felix@debuggable.com>
+License: Expat
+
+Files: vendor/github.com/davecgh/go-spew/*
+Copyright: 2012-2016 Dave Collins <dave@davec.name>
+License: ISC
+
+Files: vendor/github.com/in-toto/in-toto-golang/*
+Copyright: 2018 New York University
+License: Apache-2.0
+
+Files: vendor/github.com/in-toto/in-toto-golang/in_toto/match.go
+Copyright: 2010 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/emicklei/go-restful/v3/*
+Copyright: 2012-2021 Ernest Micklei
+License: Expat
+
+Files: vendor/github.com/theupdateframework/notary/*
+Copyright: 2015 Docker, Inc
+License: Apache-2.0
+
+Files: vendor/github.com/theupdateframework/notary/tuf/*
+Copyright: 2015 Docker Inc
+           2014-2015 Prime Directive, Inc
+License: BSD-3-clause-Prime
+
+Files: vendor/github.com/theupdateframework/notary/tuf/utils/pkcs8.go
+Copyright: 2014 youmark
+License: Expat
+
+Files: vendor/github.com/miekg/pkcs11/*
+Copyright: 2013 Miek Gieben
+License: BSD-3-clause-Generic
+
+Files: vendor/github.com/miekg/pkcs11/pkcs11.h
+       vendor/github.com/miekg/pkcs11/pkcs11t.h
+       vendor/github.com/miekg/pkcs11/pkcs11f.h
+Copyright: 2016 OASIS Open
+License: OASIS-IPR-Policy
+
+Files: vendor/github.com/prometheus/*
+Copyright: 2013-2022 The Prometheus Authors
+License: Apache-2.0
+
+Files: vendor/github.com/prometheus/client_golang/*
+Copyright: 2012-2021 The Prometheus Authors
+           2013-2015 Blake Mizerany, Björn Rabenstein
+           2010 The Go Authors
+           2013 Matt T. Proud
+License: Apache-2.0
+
+Files: vendor/github.com/mailru/easyjson/*
+Copyright: 2016 Mail.Ru Group
+License: Expat
+
+Files: vendor/github.com/hashicorp/*
+Copyright: Hashicorp
+License: MPL-2.0
+
+Files: vendor/github.com/beorn7/perks/*
+Copyright: 2013 Blake Mizerany
+License: Expat
+
+Files: vendor/github.com/AdaLogics/go-fuzz-headers/*
+Copyright: 2023 The go-fuzz-headers Authors
+License: Apache-2.0
+
+Files: vendor/github.com/xeipuuv/*
+Copyright: 2015 xeipuuv <https://github.com/xeipuuv>
+License: Apache-2.0
+
+Files: vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go
+       vendor/github.com/xeipuuv/gojsonschema/draft.go
+Copyright: 2018 johandorland <https://github.com/johandorland>
+License: Apache-2.0
+
+Files: vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
+Copyright: 2013 MongoDB, Inc
+License: Apache-2.0
+
+Files: vendor/github.com/mattn/go-shellwords/*
+Copyright: 2017 Yasuhiro Matsumoto
+License: Expat
+
+Files: vendor/github.com/cenkalti/backoff/v4/*
+Copyright: 2014 Cenk Altı
+License: Expat
+
+Files: vendor/github.com/go-logr/*
+Copyright: 2019-2021 The logr Authors
+License: Apache-2.0
+
+Files: vendor/github.com/sirupsen/logrus/*
+Copyright: 2014 Simon Eskildsen
+License: Expat
+
+Files: vendor/github.com/sirupsen/logrus/alt_exit.go
+Copyright: 2012 Miki Tebeka <miki.tebeka@gmail.com>
+License: Expat
+
+Files: vendor/github.com/spf13/cobra/*
+Copyright: 2013-2023 The Cobra Authors
+License: Apache-2.0
+
+Files: vendor/github.com/spf13/pflag/*
+Copyright: 2012 Alex Ogier
+           2009-2012 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/json-iterator/go/*
+Copyright: 2016 json-iterator
+License: Expat
+
+Files: vendor/github.com/pelletier/go-toml/*
+Copyright: 2013-2021 Thomas Pelletier, Eric Anderton
+License: Expat
+
+Files: vendor/github.com/pelletier/go-toml/localtime.go
+Copyright: 2016 Google LLC
+License: Apache-2.0
+
+Files: vendor/github.com/Masterminds/semver/v3/*
+Copyright: 2014-2019 Matt Butcher and Matt Farina
+License: Expat
+
+Files: vendor/github.com/tonistiigi/*
+Copyright: 2017 Tõnis Tiigi
+License: Expat
+
+Files: vendor/github.com/tonistiigi/vt100/*
+Copyright: 2015 James Aguilar
+License: Expat
+
+Files: vendor/github.com/josharian/intern/*
+Copyright: 2019 Josh Bleecher Snyder
+License: Expat
+
+Files: vendor/github.com/go-openapi/*
+Copyright: 2013 sigu-399 <https://github.com/sigu-399>
+License: Apache-2.0
+
+Files: vendor/github.com/go-openapi/swag/*
+Copyright: 2015 go-swagger maintainers
+License: Apache-2.0
+
+Files: vendor/github.com/docker/*
+Copyright: 2012-2017 Docker, Inc
+License: Apache-2.0
+
+Files: vendor/github.com/docker/docker-credential-helpers/*
+Copyright: 2016 David Calavera
+License: Expat
+
+Files: vendor/github.com/docker/go/*
+Copyright: 2010-2013 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/docker/cli-docs-tool/*
+Copyright: 2017-2021 cli-docs-tool authors
+License: Apache-2.0
+
+Files: vendor/github.com/Azure/go-ansiterm/*
+Copyright: 2015 Microsoft Corporation
+License: Expat
+
+Files: vendor/github.com/grpc-ecosystem/grpc-gateway/v2/*
+Copyright: 2015, Gengo, Inc
+License: BSD-3-clause-Generic
+
+Files: vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
+Copyright: 2009 The Go Authors
+           2015, Gengo, Inc
+License: BSD-3-clause-Generic
+
+Files: vendor/github.com/golang/protobuf/*
+Copyright: 2010-2019 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/mitchellh/*
+Copyright: 2013-2014 Mitchell Hashimoto
+License: Expat
+
+Files: vendor/github.com/gorilla/mux/*
+Copyright: 2012-2018 The Gorilla Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/opencontainers/go-digest/*
+Copyright: 2019-2020 OCI Contributors
+           2016-2017 Docker, Inc
+License: Apache-2.0
+
+Files: vendor/github.com/opencontainers/image-spec/*
+Copyright: 2016-2022 The Linux Foundation
+License: Apache-2.0
+
+Files: vendor/github.com/imdario/mergo/*
+Copyright: 2013-2014 Dario Castañé
+           2009-2012 The Go Authors
+License: BSD-3-clause-Google
+
+Files: vendor/github.com/pmezard/go-difflib/*
+Copyright: 2013 Patrick Mezard
+License: BSD-3-clause-Generic
+
+Files: vendor/github.com/containerd/*
+Copyright: The containerd Authors
+License: Apache-2.0
+
+Files: vendor/github.com/agext/levenshtein/*
+Copyright: 2016 ALRUX Inc
+License: Apache-2.0
+
+Files: vendor/github.com/stretchr/testify/*
+Copyright: 2012-2020 Mat Ryer, Tyler Bunnell and contributors
+License: Expat
 
 License: Apache-2.0
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at
  .
- https://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
  .
  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
-Comment:
+ .
  On Debian systems, the complete text of the Apache version 2.0 license
  can be found in "/usr/share/common-licenses/Apache-2.0".
 
+License: BSD-2-clause
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ .
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: BSD-3-clause-Google
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ .
+    * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+    * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: BSD-3-clause-Prime
+ All rights reserved.
+ .
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ .
+    * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+    * Neither the name of Prime Directive, Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: BSD-3-clause-Generic
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+     * Redistributions of source code must retain the above copyright
+       notice, this list of conditions and the following disclaimer.
+     * Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the distribution.
+     * Neither the name of the <organization> nor the
+       names of its contributors may be used to endorse or promote products
+       derived from this software without specific prior written permission.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: Expat
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+ .
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+ .
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
 License: MPL-2.0
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-Comment:
- On Debian systems, the complete text of the MPL-2.0 license can be found
- in "/usr/share/common-licenses/MPL-2.0".
+ Mozilla Public License, version 2.0
+ .
+ 1. Definitions
+ .
+ 1.1. “Contributor”
+ .
+      means each individual or legal entity that creates, contributes to the
+      creation of, or owns Covered Software.
+ .
+ 1.2. “Contributor Version”
+ .
+      means the combination of the Contributions of others (if any) used by a
+      Contributor and that particular Contributor’s Contribution.
+ .
+ 1.3. “Contribution”
+ .
+      means Covered Software of a particular Contributor.
+ .
+ 1.4. “Covered Software”
+ .
+      means Source Code Form to which the initial Contributor has attached the
+      notice in Exhibit A, the Executable Form of such Source Code Form, and
+      Modifications of such Source Code Form, in each case including portions
+      thereof.
+ .
+ 1.5. “Incompatible With Secondary Licenses”
+      means
+ .
+      a. that the initial Contributor has attached the notice described in
+         Exhibit B to the Covered Software; or
+ .
+      b. that the Covered Software was made available under the terms of version
+         1.1 or earlier of the License, but not also under the terms of a
+         Secondary License.
+ .
+ 1.6. “Executable Form”
+ .
+      means any form of the work other than Source Code Form.
+ .
+ 1.7. “Larger Work”
+ .
+      means a work that combines Covered Software with other material, in a separate
+      file or files, that is not Covered Software.
+ .
+ 1.8. “License”
+ .
+      means this document.
+ .
+ 1.9. “Licensable”
+ .
+      means having the right to grant, to the maximum extent possible, whether at the
+      time of the initial grant or subsequently, any and all of the rights conveyed by
+      this License.
+ .
+ 1.10. “Modifications”
+ .
+      means any of the following:
+ .
+      a. any file in Source Code Form that results from an addition to, deletion
+         from, or modification of the contents of Covered Software; or
+ .
+      b. any new file in Source Code Form that contains any Covered Software.
+ .
+ 1.11. “Patent Claims” of a Contributor
+ .
+       means any patent claim(s), including without limitation, method, process,
+       and apparatus claims, in any patent Licensable by such Contributor that
+       would be infringed, but for the grant of the License, by the making,
+       using, selling, offering for sale, having made, import, or transfer of
+       either its Contributions or its Contributor Version.
+ .
+ 1.12. “Secondary License”
+ .
+       means either the GNU General Public License, Version 2.0, the GNU Lesser
+       General Public License, Version 2.1, the GNU Affero General Public
+       License, Version 3.0, or any later versions of those licenses.
+ .
+ 1.13. “Source Code Form”
+ .
+       means the form of the work preferred for making modifications.
+ .
+ 1.14. “You” (or “Your”)
+ .
+       means an individual or a legal entity exercising rights under this
+       License. For legal entities, “You” includes any entity that controls, is
+       controlled by, or is under common control with You. For purposes of this
+       definition, “control” means (a) the power, direct or indirect, to cause
+       the direction or management of such entity, whether by contract or
+       otherwise, or (b) ownership of more than fifty percent (50%) of the
+       outstanding shares or beneficial ownership of such entity.
+ .
+ .
+ 2. License Grants and Conditions
+ .
+ 2.1. Grants
+ .
+      Each Contributor hereby grants You a world-wide, royalty-free,
+      non-exclusive license:
+ .
+      a. under intellectual property rights (other than patent or trademark)
+         Licensable by such Contributor to use, reproduce, make available,
+         modify, display, perform, distribute, and otherwise exploit its
+         Contributions, either on an unmodified basis, with Modifications, or as
+         part of a Larger Work; and
+ .
+      b. under Patent Claims of such Contributor to make, use, sell, offer for
+         sale, have made, import, and otherwise transfer either its Contributions
+         or its Contributor Version.
+ .
+ 2.2. Effective Date
+ .
+      The licenses granted in Section 2.1 with respect to any Contribution become
+      effective for each Contribution on the date the Contributor first distributes
+      such Contribution.
+ .
+ 2.3. Limitations on Grant Scope
+ .
+      The licenses granted in this Section 2 are the only rights granted under this
+      License. No additional rights or licenses will be implied from the distribution
+      or licensing of Covered Software under this License. Notwithstanding Section
+      2.1(b) above, no patent license is granted by a Contributor:
+ .
+      a. for any code that a Contributor has removed from Covered Software; or
+ .
+      b. for infringements caused by: (i) Your and any other third party’s
+         modifications of Covered Software, or (ii) the combination of its
+         Contributions with other software (except as part of its Contributor
+         Version); or
+ .
+      c. under Patent Claims infringed by Covered Software in the absence of its
+         Contributions.
+ .
+      This License does not grant any rights in the trademarks, service marks, or
+      logos of any Contributor (except as may be necessary to comply with the
+      notice requirements in Section 3.4).
+ .
+ 2.4. Subsequent Licenses
+ .
+      No Contributor makes additional grants as a result of Your choice to
+      distribute the Covered Software under a subsequent version of this License
+      (see Section 10.2) or under the terms of a Secondary License (if permitted
+      under the terms of Section 3.3).
+ .
+ 2.5. Representation
+ .
+      Each Contributor represents that the Contributor believes its Contributions
+      are its original creation(s) or it has sufficient rights to grant the
+      rights to its Contributions conveyed by this License.
+ .
+ 2.6. Fair Use
+ .
+      This License is not intended to limit any rights You have under applicable
+      copyright doctrines of fair use, fair dealing, or other equivalents.
+ .
+ 2.7. Conditions
+ .
+      Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+      Section 2.1.
+ .
+ .
+ 3. Responsibilities
+ .
+ 3.1. Distribution of Source Form
+ .
+      All distribution of Covered Software in Source Code Form, including any
+      Modifications that You create or to which You contribute, must be under the
+      terms of this License. You must inform recipients that the Source Code Form
+      of the Covered Software is governed by the terms of this License, and how
+      they can obtain a copy of this License. You may not attempt to alter or
+      restrict the recipients’ rights in the Source Code Form.
+ .
+ 3.2. Distribution of Executable Form
+ .
+      If You distribute Covered Software in Executable Form then:
+ .
+      a. such Covered Software must also be made available in Source Code Form,
+         as described in Section 3.1, and You must inform recipients of the
+         Executable Form how they can obtain a copy of such Source Code Form by
+         reasonable means in a timely manner, at a charge no more than the cost
+         of distribution to the recipient; and
+ .
+      b. You may distribute such Executable Form under the terms of this License,
+         or sublicense it under different terms, provided that the license for
+         the Executable Form does not attempt to limit or alter the recipients’
+         rights in the Source Code Form under this License.
+ .
+ 3.3. Distribution of a Larger Work
+ .
+      You may create and distribute a Larger Work under terms of Your choice,
+      provided that You also comply with the requirements of this License for the
+      Covered Software. If the Larger Work is a combination of Covered Software
+      with a work governed by one or more Secondary Licenses, and the Covered
+      Software is not Incompatible With Secondary Licenses, this License permits
+      You to additionally distribute such Covered Software under the terms of
+      such Secondary License(s), so that the recipient of the Larger Work may, at
+      their option, further distribute the Covered Software under the terms of
+      either this License or such Secondary License(s).
+ .
+ 3.4. Notices
+ .
+      You may not remove or alter the substance of any license notices (including
+      copyright notices, patent notices, disclaimers of warranty, or limitations
+      of liability) contained within the Source Code Form of the Covered
+      Software, except that You may alter any license notices to the extent
+      required to remedy known factual inaccuracies.
+ .
+ 3.5. Application of Additional Terms
+ .
+      You may choose to offer, and to charge a fee for, warranty, support,
+      indemnity or liability obligations to one or more recipients of Covered
+      Software. However, You may do so only on Your own behalf, and not on behalf
+      of any Contributor. You must make it absolutely clear that any such
+      warranty, support, indemnity, or liability obligation is offered by You
+      alone, and You hereby agree to indemnify every Contributor for any
+      liability incurred by such Contributor as a result of warranty, support,
+      indemnity or liability terms You offer. You may include additional
+      disclaimers of warranty and limitations of liability specific to any
+      jurisdiction.
+ .
+ 4. Inability to Comply Due to Statute or Regulation
+ .
+    If it is impossible for You to comply with any of the terms of this License
+    with respect to some or all of the Covered Software due to statute, judicial
+    order, or regulation then You must: (a) comply with the terms of this License
+    to the maximum extent possible; and (b) describe the limitations and the code
+    they affect. Such description must be placed in a text file included with all
+    distributions of the Covered Software under this License. Except to the
+    extent prohibited by statute or regulation, such description must be
+    sufficiently detailed for a recipient of ordinary skill to be able to
+    understand it.
+ .
+ 5. Termination
+ .
+ 5.1. The rights granted under this License will terminate automatically if You
+      fail to comply with any of its terms. However, if You become compliant,
+      then the rights granted under this License from a particular Contributor
+      are reinstated (a) provisionally, unless and until such Contributor
+      explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+      if such Contributor fails to notify You of the non-compliance by some
+      reasonable means prior to 60 days after You have come back into compliance.
+      Moreover, Your grants from a particular Contributor are reinstated on an
+      ongoing basis if such Contributor notifies You of the non-compliance by
+      some reasonable means, this is the first time You have received notice of
+      non-compliance with this License from such Contributor, and You become
+      compliant prior to 30 days after Your receipt of the notice.
+ .
+ 5.2. If You initiate litigation against any entity by asserting a patent
+      infringement claim (excluding declaratory judgment actions, counter-claims,
+      and cross-claims) alleging that a Contributor Version directly or
+      indirectly infringes any patent, then the rights granted to You by any and
+      all Contributors for the Covered Software under Section 2.1 of this License
+      shall terminate.
+ .
+ 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+      license agreements (excluding distributors and resellers) which have been
+      validly granted by You or Your distributors under this License prior to
+      termination shall survive termination.
+ .
+ 6. Disclaimer of Warranty
+ .
+    Covered Software is provided under this License on an “as is” basis, without
+    warranty of any kind, either expressed, implied, or statutory, including,
+    without limitation, warranties that the Covered Software is free of defects,
+    merchantable, fit for a particular purpose or non-infringing. The entire
+    risk as to the quality and performance of the Covered Software is with You.
+    Should any Covered Software prove defective in any respect, You (not any
+    Contributor) assume the cost of any necessary servicing, repair, or
+    correction. This disclaimer of warranty constitutes an essential part of this
+    License. No use of  any Covered Software is authorized under this License
+    except under this disclaimer.
+ .
+ 7. Limitation of Liability
+ .
+    Under no circumstances and under no legal theory, whether tort (including
+    negligence), contract, or otherwise, shall any Contributor, or anyone who
+    distributes Covered Software as permitted above, be liable to You for any
+    direct, indirect, special, incidental, or consequential damages of any
+    character including, without limitation, damages for lost profits, loss of
+    goodwill, work stoppage, computer failure or malfunction, or any and all
+    other commercial damages or losses, even if such party shall have been
+    informed of the possibility of such damages. This limitation of liability
+    shall not apply to liability for death or personal injury resulting from such
+    party’s negligence to the extent applicable law prohibits such limitation.
+    Some jurisdictions do not allow the exclusion or limitation of incidental or
+    consequential damages, so this exclusion and limitation may not apply to You.
+ .
+ 8. Litigation
+ .
+    Any litigation relating to this License may be brought only in the courts of
+    a jurisdiction where the defendant maintains its principal place of business
+    and such litigation shall be governed by laws of that jurisdiction, without
+    reference to its conflict-of-law provisions. Nothing in this Section shall
+    prevent a party’s ability to bring cross-claims or counter-claims.
+ .
+ 9. Miscellaneous
+ .
+    This License represents the complete agreement concerning the subject matter
+    hereof. If any provision of this License is held to be unenforceable, such
+    provision shall be reformed only to the extent necessary to make it
+    enforceable. Any law or regulation which provides that the language of a
+    contract shall be construed against the drafter shall not be used to construe
+    this License against a Contributor.
+ .
+ .
+ 10. Versions of the License
+ .
+ 10.1. New Versions
+ .
+       Mozilla Foundation is the license steward. Except as provided in Section
+       10.3, no one other than the license steward has the right to modify or
+       publish new versions of this License. Each version will be given a
+       distinguishing version number.
+ .
+ 10.2. Effect of New Versions
+ .
+       You may distribute the Covered Software under the terms of the version of
+       the License under which You originally received the Covered Software, or
+       under the terms of any subsequent version published by the license
+       steward.
+ .
+ 10.3. Modified Versions
+ .
+       If you create software not governed by this License, and you want to
+       create a new license for such software, you may create and use a modified
+       version of this License if you rename the license and remove any
+       references to the name of the license steward (except to note that such
+       modified license differs from this License).
+ .
+ 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+       If You choose to distribute Source Code Form that is Incompatible With
+       Secondary Licenses under the terms of this version of the License, the
+       notice described in Exhibit B of this License must be attached.
+ .
+ Exhibit A - Source Code Form License Notice
+ .
+       This Source Code Form is subject to the
+       terms of the Mozilla Public License, v.
+       2.0. If a copy of the MPL was not
+       distributed with this file, You can
+       obtain one at
+       http://mozilla.org/MPL/2.0/.
+ .
+ If it is not possible or desirable to put the notice in a particular file, then
+ You may include the notice in a location (such as a LICENSE file in a relevant
+ directory) where a recipient would be likely to look for such a notice.
+ .
+ You may add additional accurate notices of copyright ownership.
+ .
+ Exhibit B - “Incompatible With Secondary Licenses” Notice
+ .
+       This Source Code Form is “Incompatible
+       With Secondary Licenses”, as defined by
+       the Mozilla Public License, v. 2.0.
+
+License: ISC
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+ .
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+License: OASIS-IPR-Policy
+ Distributed under the terms of the OASIS IPR Policy,
+ [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
+ IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS
+ FOR A PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
diff -pruN 0.19.3+ds1-4/debian/docker-buildx.docs 0.21.3-0ubuntu1/debian/docker-buildx.docs
--- 0.19.3+ds1-4/debian/docker-buildx.docs	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/docker-buildx.docs	2025-06-09 13:52:48.000000000 +0000
@@ -1,2 +1,19 @@
-README.md
-docs/reference/*.md
+# Add NOTICE files because of Apache-2.0 license requirement
+vendor/github.com/docker/docker/NOTICE
+vendor/github.com/docker/cli/NOTICE
+vendor/github.com/docker/go-metrics/NOTICE
+vendor/github.com/containerd/containerd/v2/NOTICE
+vendor/github.com/compose-spec/compose-go/v2/NOTICE
+vendor/github.com/moby/patternmatcher/NOTICE
+vendor/github.com/moby/spdystream/NOTICE
+vendor/github.com/agext/levenshtein/NOTICE
+vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
+vendor/github.com/aws/smithy-go/NOTICE
+vendor/github.com/prometheus/procfs/NOTICE
+vendor/github.com/prometheus/client_model/NOTICE
+vendor/github.com/prometheus/client_golang/NOTICE
+vendor/github.com/prometheus/common/NOTICE
+vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
+vendor/gopkg.in/yaml.v3/NOTICE
+vendor/gopkg.in/yaml.v2/NOTICE
+vendor/google.golang.org/grpc/NOTICE.txt
diff -pruN 0.19.3+ds1-4/debian/docker-buildx.install 0.21.3-0ubuntu1/debian/docker-buildx.install
--- 0.19.3+ds1-4/debian/docker-buildx.install	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/docker-buildx.install	2025-06-09 13:52:48.000000000 +0000
@@ -1,2 +1 @@
-#!/usr/bin/dh-exec
-_build/bin/buildx => usr/libexec/docker/cli-plugins/docker-buildx
+bin/build/docker-buildx usr/libexec/docker/cli-plugins/
diff -pruN 0.19.3+ds1-4/debian/docker-buildx.lintian-overrides 0.21.3-0ubuntu1/debian/docker-buildx.lintian-overrides
--- 0.19.3+ds1-4/debian/docker-buildx.lintian-overrides	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/docker-buildx.lintian-overrides	1970-01-01 00:00:00.000000000 +0000
@@ -1,2 +0,0 @@
-# These manual pages are for the docker buildx subcommand
-docker-buildx: spare-manual-page [usr/share/man/man1/docker-buildx*]
diff -pruN 0.19.3+ds1-4/debian/docker-buildx.manpages 0.21.3-0ubuntu1/debian/docker-buildx.manpages
--- 0.19.3+ds1-4/debian/docker-buildx.manpages	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/docker-buildx.manpages	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-docs/reference/docker-buildx*.1
diff -pruN 0.19.3+ds1-4/debian/gbp.conf 0.21.3-0ubuntu1/debian/gbp.conf
--- 0.19.3+ds1-4/debian/gbp.conf	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/gbp.conf	1970-01-01 00:00:00.000000000 +0000
@@ -1,5 +0,0 @@
-[DEFAULT]
-debian-branch = debian/sid
-dist = DEP14
-# make gbp import-orig automatically link to upstream's tags
-upstream-vcs-tag = v%(version%~%-)s
diff -pruN 0.19.3+ds1-4/debian/gitlab-ci.yml 0.21.3-0ubuntu1/debian/gitlab-ci.yml
--- 0.19.3+ds1-4/debian/gitlab-ci.yml	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/gitlab-ci.yml	1970-01-01 00:00:00.000000000 +0000
@@ -1,5 +0,0 @@
----
-include:
-  - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/salsa-ci.yml
-  - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/pipeline-jobs.yml
-  - https://salsa.debian.org/go-team/infra/pkg-go-tools/-/raw/master/pipeline/test-archive.yml
diff -pruN 0.19.3+ds1-4/debian/golang-github-docker-buildx-dev.install 0.21.3-0ubuntu1/debian/golang-github-docker-buildx-dev.install
--- 0.19.3+ds1-4/debian/golang-github-docker-buildx-dev.install	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/golang-github-docker-buildx-dev.install	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-usr/share
diff -pruN 0.19.3+ds1-4/debian/patches/0001-Skip-tests-that-assume-a-git-repo-or-network-access.patch 0.21.3-0ubuntu1/debian/patches/0001-Skip-tests-that-assume-a-git-repo-or-network-access.patch
--- 0.19.3+ds1-4/debian/patches/0001-Skip-tests-that-assume-a-git-repo-or-network-access.patch	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/patches/0001-Skip-tests-that-assume-a-git-repo-or-network-access.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,23 +0,0 @@
-From: Nicolas Peugnet <nicolas@club1.fr>
-Date: Sun, 2 Feb 2025 17:45:20 +0100
-Subject: Skip tests that assume a git repo or network access
-
-The TestGit test assumes that we are in a git repository, which is not
-always the case when building Debian packages.
-The two test cases removed from TestGitRemoteURL assumes network access,
-which is not the case on Debian's build infrastructure.
----
- util/gitutil/gitutil_test.go | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/util/gitutil/gitutil_test.go b/util/gitutil/gitutil_test.go
-index 06c6fa4..679c7d6 100644
---- a/util/gitutil/gitutil_test.go
-+++ b/util/gitutil/gitutil_test.go
-@@ -1,3 +1,6 @@
-+// Debian-local: assumes being in a Git repository
-+//go:build debian_disabled
-+
- package gitutil
- 
- import (
diff -pruN 0.19.3+ds1-4/debian/patches/0003-Add-an-option-to-generate-the-manual-pages.patch 0.21.3-0ubuntu1/debian/patches/0003-Add-an-option-to-generate-the-manual-pages.patch
--- 0.19.3+ds1-4/debian/patches/0003-Add-an-option-to-generate-the-manual-pages.patch	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/patches/0003-Add-an-option-to-generate-the-manual-pages.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,27 +0,0 @@
-From: Nicolas Peugnet <nicolas.peugnet@lip6.fr>
-Date: Tue, 18 Feb 2025 18:41:00 +0100
-Subject: Add an option to generate the manual pages
-
-For an unknown reason, Docker authors disabled the manual pages
-generation from their docs generator. This patch allows to use it.
-
-Forwarded: not-needed
----
- docs/generate.go | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/docs/generate.go b/docs/generate.go
-index 0cd0033..343f521 100644
---- a/docs/generate.go
-+++ b/docs/generate.go
-@@ -75,6 +75,10 @@ func gen(opts *options) error {
- 
- 	for _, format := range opts.formats {
- 		switch format {
-+		case "man":
-+			if err = c.GenManTree(cmd); err != nil {
-+				return err
-+			}
- 		case "md":
- 			if err = c.GenMarkdownTree(cmd); err != nil {
- 				return err
diff -pruN 0.19.3+ds1-4/debian/patches/0003-Revert-vendor-update-buildkit-to-v0.17.0-rc2.patch 0.21.3-0ubuntu1/debian/patches/0003-Revert-vendor-update-buildkit-to-v0.17.0-rc2.patch
--- 0.19.3+ds1-4/debian/patches/0003-Revert-vendor-update-buildkit-to-v0.17.0-rc2.patch	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/patches/0003-Revert-vendor-update-buildkit-to-v0.17.0-rc2.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,23 +0,0 @@
-From: Reinhard Tartler <siretart@tauware.de>
-Date: Sun, 19 Oct 2025 05:40:43 -0400
-Subject: Revert "vendor: update buildkit to v0.17.0-rc2"
-
-This reverts commit 6fcc6853d9c487d90aa65fc1b73844953b3077c9.
----
- util/confutil/config.go | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-diff --git a/util/confutil/config.go b/util/confutil/config.go
-index c3814c2..58036ac 100644
---- a/util/confutil/config.go
-+++ b/util/confutil/config.go
-@@ -86,8 +86,7 @@ func (c *Config) MkdirAll(dir string, perm os.FileMode) error {
- 	st, err := os.Stat(d)
- 	if err != nil {
- 		if os.IsNotExist(err) {
--			_, err := fs.MkdirAll(d, perm, chown, nil)
--			return err
-+			return fs.MkdirAll(d, perm, chown, nil)
- 		}
- 		return err
- 	}
diff -pruN 0.19.3+ds1-4/debian/patches/0004-otel-avoid-tracing-raw-os-arguments.patch 0.21.3-0ubuntu1/debian/patches/0004-otel-avoid-tracing-raw-os-arguments.patch
--- 0.19.3+ds1-4/debian/patches/0004-otel-avoid-tracing-raw-os-arguments.patch	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/patches/0004-otel-avoid-tracing-raw-os-arguments.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,90 +0,0 @@
-From: Tonis Tiigi <tonistiigi@gmail.com>
-Date: Mon, 3 Feb 2025 22:14:55 -0800
-Subject: [PATCH] otel: avoid tracing raw os arguments
-
-User might pass a value that they don't expect to
-be kept in trace storage. For example some cache backends
-allow passing authentication tokens with a flag.
-
-Instead use known primary config values as attributes
-of the root span.
-
-Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
-
-Origin: upstream, https://github.com/docker/buildx/commit/0982070af84d476b232d2d75ab551c3222592db1
-Bug-Debian: http://bugs.debian.org/1100991
-Backported-by: Nicolas Peugnet <nicolas@club1.fr>
- * Added missing import in commands/bake.go
- * Fixed conflicts in other hunks
----
- commands/bake.go      | 6 +++++-
- commands/build.go     | 6 +++++-
- util/tracing/trace.go | 7 +++----
- 3 files changed, 13 insertions(+), 6 deletions(-)
-
-diff --git a/commands/bake.go b/commands/bake.go
-index 12befc8..3405f0c 100644
---- a/commands/bake.go
-+++ b/commands/bake.go
-@@ -61,7 +61,11 @@ type bakeOptions struct {
- func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
- 	mp := dockerCli.MeterProvider()
- 
--	ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
-+	ctx, end, err := tracing.TraceCurrentCommand(ctx, append([]string{"bake"}, targets...),
-+		attribute.String("builder", in.builder),
-+		attribute.StringSlice("targets", targets),
-+		attribute.StringSlice("files", in.files),
-+	)
- 	if err != nil {
- 		return err
- 	}
-diff --git a/commands/build.go b/commands/build.go
-index 7850c1b..3bbdb29 100644
---- a/commands/build.go
-+++ b/commands/build.go
-@@ -282,7 +282,11 @@ func (o *buildOptionsHash) String() string {
- func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
- 	mp := dockerCli.MeterProvider()
- 
--	ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
-+	ctx, end, err := tracing.TraceCurrentCommand(ctx, []string{"build", options.contextPath},
-+		attribute.String("builder", options.builder),
-+		attribute.String("context", options.contextPath),
-+		attribute.String("dockerfile", options.dockerfileName),
-+	)
- 	if err != nil {
- 		return err
- 	}
-diff --git a/util/tracing/trace.go b/util/tracing/trace.go
-index d16ca9f..4e22013 100644
---- a/util/tracing/trace.go
-+++ b/util/tracing/trace.go
-@@ -2,7 +2,6 @@ package tracing
- 
- import (
- 	"context"
--	"os"
- 	"strings"
- 
- 	"github.com/moby/buildkit/util/tracing/delegated"
-@@ -13,7 +12,7 @@ import (
- 	"go.opentelemetry.io/otel/trace"
- )
- 
--func TraceCurrentCommand(ctx context.Context, name string) (context.Context, func(error), error) {
-+func TraceCurrentCommand(ctx context.Context, args []string, attrs ...attribute.KeyValue) (context.Context, func(error), error) {
- 	opts := []sdktrace.TracerProviderOption{
- 		sdktrace.WithResource(detect.Resource()),
- 		sdktrace.WithBatcher(delegated.DefaultExporter),
-@@ -25,8 +24,8 @@ func TraceCurrentCommand(ctx context.Context, name string) (context.Context, fun
- 	}
- 
- 	tp := sdktrace.NewTracerProvider(opts...)
--	ctx, span := tp.Tracer("").Start(ctx, name, trace.WithAttributes(
--		attribute.String("command", strings.Join(os.Args, " ")),
-+	ctx, span := tp.Tracer("").Start(ctx, strings.Join(args, " "), trace.WithAttributes(
-+		attrs...,
- 	))
- 
- 	return ctx, func(err error) {
diff -pruN 0.19.3+ds1-4/debian/patches/series 0.21.3-0ubuntu1/debian/patches/series
--- 0.19.3+ds1-4/debian/patches/series	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/patches/series	1970-01-01 00:00:00.000000000 +0000
@@ -1,4 +0,0 @@
-0001-Skip-tests-that-assume-a-git-repo-or-network-access.patch
-0003-Add-an-option-to-generate-the-manual-pages.patch
-0003-Revert-vendor-update-buildkit-to-v0.17.0-rc2.patch
-0004-otel-avoid-tracing-raw-os-arguments.patch
diff -pruN 0.19.3+ds1-4/debian/rules 0.21.3-0ubuntu1/debian/rules
--- 0.19.3+ds1-4/debian/rules	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/rules	2025-06-09 13:52:48.000000000 +0000
@@ -1,33 +1,54 @@
 #!/usr/bin/make -f
+# -*- makefile -*-
 
 include /usr/share/dpkg/pkg-info.mk
 
-export DH_GOPKG := github.com/docker/buildx
-LDFLAGS := -X $(DH_GOPKG)/version.Version=$(DEB_VERSION_UPSTREAM)
-LDFLAGS += -X $(DH_GOPKG)/version.Revision=$(DEB_VERSION)
-LDFLAGS += -X $(DH_GOPKG)/version.Package=$(DH_GOPKG)
+export VERSION := $(DEB_VERSION_UPSTREAM)
 
-export DH_GOLANG_INSTALL_EXTRA := driver/kubernetes/context/fixtures
+export BUILDX_GITCOMMIT := $(DEB_VERSION)
 
-# The integration tests depend on a "registry" binary, unavailable in Debian
-export SKIP_INTEGRATION_TESTS=1
-
-%:
-	dh $@ --builddirectory=_build --buildsystem=golang
+# temporary build path (see http://golang.org/doc/code.html#GOPATH)
+OUR_GOPATH := $(CURDIR)/.gopath
+export GOPATH := $(OUR_GOPATH)
+export GOCACHE := $(CURDIR)/.gocache
+
+# https://blog.golang.org/go116-module-changes (TODO figure out a new solution for Go 1.17+)
+export GO111MODULE := auto
+
+# Build with Golang 1.24
+export PATH := /usr/lib/go-1.24/bin:$(PATH)
+
+override_dh_gencontrol:
+	echo 'libc:Built-Using=$(shell dpkg-query -f '$${source:Package} (= $${source:Version})' -W libc-dev-bin)' >> debian/docker-buildx.substvars
+	# use "dh_golang" to generate "misc:Built-Using" (via "go list")
+	DH_GOLANG_BUILDPKG=' \
+		github.com/docker/buildx/cmd/buildx \
+	' dh_golang --builddirectory='$(OUR_GOPATH:$(CURDIR)/%=%)'
+	dh_gencontrol
 
 override_dh_auto_build:
-	dh_auto_build --builddirectory=_build --buildsystem=golang -- -ldflags '$(LDFLAGS)'
-	# Generate the manual pages
-	_build/bin/docs --formats=man --source=docs/reference
+	make \
+		VERSION='$(VERSION)' \
+		REVISION='$(BUILDX_GITCOMMIT)' \
+		build
+
+# Basic smoke test to ensure binary built successfully
+# (especially since its actual tests, even unit tests, are *really* involved.
+# They depend on docker itself which requires user's permission adjustement or
+# running with root. That's just the beginning :)
+# Let's smoke test the plugin via autopkgtest. FWIW this is the same thing we
+# do with src:docker.io-app.
+override_dh_auto_test:
+ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
+	./bin/build/docker-buildx version
+endif
 
-# Install binaries manually, using the install file, as the executable need to
-# be in a specific directory.
 override_dh_auto_install:
-	dh_auto_install -- --no-binaries
+	# we use dh_install / dh-exec for installing files where they need to be
 
-# Required for dh-golang-autopkgtest as a directory named "build" exists in
-# the sources.
-build: .FORCE
+override_dh_dwz:
+	# do not call dh_dwz to avoid "dwz: Too few files for multifile optimization"
+	# dh_dwz --no-dwz-multifile also does not work :)
 
-.PHONY: .FORCE
-.FORCE:
+%:
+	dh $@
diff -pruN 0.19.3+ds1-4/debian/tests/basic-smoke 0.21.3-0ubuntu1/debian/tests/basic-smoke
--- 0.19.3+ds1-4/debian/tests/basic-smoke	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/tests/basic-smoke	2025-06-09 13:52:48.000000000 +0000
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+set -eux -o pipefail
+
+# Create a new builder
+docker buildx create --name mybuilder --use --bootstrap
+
+# Check the status of the builder
+docker buildx inspect mybuilder | grep Status | grep running
+
+# Write a very basic Dockerfile
+cat >Dockerfile <<EOF
+FROM ubuntu:22.04
+RUN apt-get update
+CMD cat /etc/os-release | grep VERSION
+EOF
+
+# There is still an issue related to loading multi-arch images. More info see
+# https://github.com/docker/buildx/issues/59 and
+# https://github.com/docker/roadmap/issues/371 .
+# Users can pass --push and publish the manifest list to a registry directly.
+# For our local test purpose let's just create a tarball with the content of
+# the image for now.
+docker buildx --builder mybuilder build --output=type=oci,dest=hello-buildx.tar --platform linux/amd64,linux/arm64,linux/s390x -t hello-buildx . 
+
+file hello-buildx.tar | grep 'tar archive'
+
+# Build image in a single architecture and load it
+docker buildx --builder mybuilder build --platform linux/amd64 -t hello-buildx:buildx-latest --load . 
+
+# Check if the image was loaded
+docker image ls | grep hello-buildx | grep buildx-latest
+
+# Run a container based on the built image
+docker run hello-buildx:buildx-latest | grep Jammy
+
+# Stop and remove the builder
+docker buildx stop mybuilder
+docker buildx rm mybuilder
diff -pruN 0.19.3+ds1-4/debian/tests/control 0.21.3-0ubuntu1/debian/tests/control
--- 0.19.3+ds1-4/debian/tests/control	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/tests/control	2025-06-09 13:52:48.000000000 +0000
@@ -0,0 +1,3 @@
+Tests: basic-smoke
+Depends: @, docker.io, file
+Restrictions: isolation-machine needs-root allow-stderr
diff -pruN 0.19.3+ds1-4/debian/upstream/metadata 0.21.3-0ubuntu1/debian/upstream/metadata
--- 0.19.3+ds1-4/debian/upstream/metadata	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/upstream/metadata	1970-01-01 00:00:00.000000000 +0000
@@ -1,5 +0,0 @@
----
-Bug-Database: https://github.com/docker/buildx/issues
-Bug-Submit: https://github.com/docker/buildx/issues/new
-Repository: https://github.com/docker/buildx.git
-Repository-Browse: https://github.com/docker/buildx
diff -pruN 0.19.3+ds1-4/debian/watch 0.21.3-0ubuntu1/debian/watch
--- 0.19.3+ds1-4/debian/watch	2025-10-29 17:50:55.000000000 +0000
+++ 0.21.3-0ubuntu1/debian/watch	2025-07-03 13:26:42.000000000 +0000
@@ -1,8 +1,8 @@
-Version: 5
+version=4
 
-Template: Github
-Owner: docker
-Project: buildx
-Dversion-Mangle: auto
-Uversion-Mangle: auto
-Repacksuffix: +ds1
+opts=\
+searchmode=plain,\
+dversionmangle=s/[+~](debian|dfsg|ds|deb)\d*$//,\
+uversionmangle=s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha)\d*)$/$1~$2/ \
+  https://api.github.com/repos/docker/buildx/tags?per_page=50 \
+  https://api.github.com/repos/docker/buildx/tarball/refs/tags/[vV]?((?:\d+\.)*\d+)
diff -pruN 0.19.3+ds1-4/docs/bake-reference.md 0.21.3-0ubuntu1/docs/bake-reference.md
--- 0.19.3+ds1-4/docs/bake-reference.md	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/bake-reference.md	2025-03-17 16:14:25.000000000 +0000
@@ -19,8 +19,8 @@ By default, Bake uses the following look
 3. `docker-compose.yml`
 4. `docker-compose.yaml`
 5. `docker-bake.json`
-6. `docker-bake.override.json`
-7. `docker-bake.hcl`
+6. `docker-bake.hcl`
+7. `docker-bake.override.json`
 8. `docker-bake.override.hcl`
 
 You can specify the file location explicitly using the `--file` flag:
@@ -221,8 +221,10 @@ The following table shows the complete l
 | [`attest`](#targetattest)                       | List    | Build attestations                                                   |
 | [`cache-from`](#targetcache-from)               | List    | External cache sources                                               |
 | [`cache-to`](#targetcache-to)                   | List    | External cache destinations                                          |
+| [`call`](#targetcall)                           | String  | Specify the frontend method to call for the target.                  |
 | [`context`](#targetcontext)                     | String  | Set of files located in the specified path or URL                    |
 | [`contexts`](#targetcontexts)                   | Map     | Additional build contexts                                            |
+| [`description`](#targetdescription)             | String  | Description of a target                                              |
 | [`dockerfile-inline`](#targetdockerfile-inline) | String  | Inline Dockerfile string                                             |
 | [`dockerfile`](#targetdockerfile)               | String  | Dockerfile location                                                  |
 | [`inherits`](#targetinherits)                   | List    | Inherit attributes from other targets                                |
@@ -283,19 +285,11 @@ The key takes a list of annotations, in
 
 ```hcl
 target "default" {
-  output = ["type=image,name=foo"]
+  output = [{ type = "image", name = "foo" }]
   annotations = ["org.opencontainers.image.authors=dvdksn"]
 }
 ```
 
-is the same as
-
-```hcl
-target "default" {
-  output = ["type=image,name=foo,annotation.org.opencontainers.image.authors=dvdksn"]
-}
-```
-
 By default, the annotation is added to image manifests. You can configure the
 level of the annotations by adding a prefix to the annotation, containing a
 comma-separated list of all the levels that you want to annotate. The following
@@ -303,7 +297,7 @@ example adds annotations to both the ima
 
 ```hcl
 target "default" {
-  output = ["type=image,name=foo"]
+  output = [{ type = "image", name = "foo" }]
   annotations = ["index,manifest:org.opencontainers.image.authors=dvdksn"]
 }
 ```
@@ -319,8 +313,13 @@ This attribute accepts the long-form CSV
 ```hcl
 target "default" {
   attest = [
-    "type=provenance,mode=min",
-    "type=sbom"
+    {
+      type = "provenance",
+      mode = "max",
+    },
+    {
+      type = "sbom",
+    }
   ]
 }
 ```
@@ -336,8 +335,15 @@ This takes a list value, so you can spec
 ```hcl
 target "app" {
   cache-from = [
-    "type=s3,region=eu-west-1,bucket=mybucket",
-    "user/repo:cache",
+    {
+      type = "s3",
+      region = "eu-west-1",
+      bucket = "mybucket"
+    },
+    {
+      type = "registry",
+      ref = "user/repo:cache"
+    }
   ]
 }
 ```
@@ -353,8 +359,14 @@ This takes a list value, so you can spec
 ```hcl
 target "app" {
   cache-to = [
-    "type=s3,region=eu-west-1,bucket=mybucket",
-    "type=inline"
+    {
+      type = "s3",
+      region = "eu-west-1",
+      bucket = "mybucket"
+    },
+    {
+      type = "inline",
+    }
   ]
 }
 ```
@@ -371,6 +383,13 @@ target "app" {
 }
 ```
 
+Supported values are:
+
+- `build` builds the target (default)
+- `check`: evaluates [build checks](https://docs.docker.com/build/checks/) for the target
+- `outline`: displays the target's build arguments and their default values if available
+- `targets`: lists all Bake targets in the loaded definition, along with its [description](#targetdescription).
+
 For more information about frontend methods, refer to the CLI reference for
 [`docker buildx build --call`](https://docs.docker.com/reference/cli/docker/buildx/build/#call).
 
@@ -481,6 +500,25 @@ FROM baseapp
 RUN echo "Hello world"
 ```
 
+### `target.description`
+
+Defines a human-readable description for the target, clarifying its purpose or
+functionality.
+
+```hcl
+target "lint" {
+    description = "Runs golangci-lint to detect style errors"
+    args = {
+        GOLANGCI_LINT_VERSION = null
+    }
+    dockerfile = "lint.Dockerfile"
+}
+```
+
+This attribute is useful when combined with the `docker buildx bake --list=targets`
+option, providing a more informative output when listing the available build
+targets in a Bake file.
+
 ### `target.dockerfile-inline`
 
 Uses the string value as an inline Dockerfile for the build target.
@@ -835,7 +873,7 @@ The following example configures the tar
 
 ```hcl
 target "default" {
-  output = ["type=cacheonly"]
+  output = [{ type = "cacheonly" }]
 }
 ```
 
@@ -875,8 +913,8 @@ variable "HOME" {
 
 target "default" {
   secret = [
-    "type=env,id=KUBECONFIG",
-    "type=file,id=aws,src=${HOME}/.aws/credentials"
+    { type = "env", id = "KUBECONFIG" },
+    { type = "file", id = "aws", src = "${HOME}/.aws/credentials" },
   ]
 }
 ```
@@ -920,7 +958,7 @@ This can be useful if you need to access
 
 ```hcl
 target "default" {
-  ssh = ["default"]
+  ssh = [{ id = "default" }]
 }
 ```
 
diff -pruN 0.19.3+ds1-4/docs/reference/buildx.md 0.21.3-0ubuntu1/docs/reference/buildx.md
--- 0.19.3+ds1-4/docs/reference/buildx.md	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx.md	2025-03-17 16:14:25.000000000 +0000
@@ -17,6 +17,7 @@ Extended build capabilities with BuildKi
 | [`debug`](buildx_debug.md)           | Start debugger (EXPERIMENTAL)                   |
 | [`dial-stdio`](buildx_dial-stdio.md) | Proxy current stdio streams to builder instance |
 | [`du`](buildx_du.md)                 | Disk usage                                      |
+| [`history`](buildx_history.md)       | Commands to work on build records               |
 | [`imagetools`](buildx_imagetools.md) | Commands to work on images in registry          |
 | [`inspect`](buildx_inspect.md)       | Inspect current builder instance                |
 | [`ls`](buildx_ls.md)                 | List builder instances                          |
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_bake.md 0.21.3-0ubuntu1/docs/reference/buildx_bake.md
--- 0.19.3+ds1-4/docs/reference/buildx_bake.md	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_bake.md	2025-03-17 16:14:25.000000000 +0000
@@ -13,24 +13,25 @@ Build from a file
 
 ### Options
 
-| Name                                | Type          | Default | Description                                                                                         |
-|:------------------------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------|
-| `--allow`                           | `stringArray` |         | Allow build to access specified resources                                                           |
-| [`--builder`](#builder)             | `string`      |         | Override the configured builder instance                                                            |
-| [`--call`](#call)                   | `string`      | `build` | Set method for evaluating build (`check`, `outline`, `targets`)                                     |
-| [`--check`](#check)                 | `bool`        |         | Shorthand for `--call=check`                                                                        |
-| `-D`, `--debug`                     | `bool`        |         | Enable debug logging                                                                                |
-| [`-f`](#file), [`--file`](#file)    | `stringArray` |         | Build definition file                                                                               |
-| `--load`                            | `bool`        |         | Shorthand for `--set=*.output=type=docker`                                                          |
-| [`--metadata-file`](#metadata-file) | `string`      |         | Write build result metadata to a file                                                               |
-| [`--no-cache`](#no-cache)           | `bool`        |         | Do not use cache when building the image                                                            |
-| [`--print`](#print)                 | `bool`        |         | Print the options without building                                                                  |
-| [`--progress`](#progress)           | `string`      | `auto`  | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
-| [`--provenance`](#provenance)       | `string`      |         | Shorthand for `--set=*.attest=type=provenance`                                                      |
-| [`--pull`](#pull)                   | `bool`        |         | Always attempt to pull all referenced images                                                        |
-| `--push`                            | `bool`        |         | Shorthand for `--set=*.output=type=registry`                                                        |
-| [`--sbom`](#sbom)                   | `string`      |         | Shorthand for `--set=*.attest=type=sbom`                                                            |
-| [`--set`](#set)                     | `stringArray` |         | Override target value (e.g., `targetpattern.key=value`)                                             |
+| Name                                | Type          | Default | Description                                                                                                  |
+|:------------------------------------|:--------------|:--------|:-------------------------------------------------------------------------------------------------------------|
+| [`--allow`](#allow)                 | `stringArray` |         | Allow build to access specified resources                                                                    |
+| [`--builder`](#builder)             | `string`      |         | Override the configured builder instance                                                                     |
+| [`--call`](#call)                   | `string`      | `build` | Set method for evaluating build (`check`, `outline`, `targets`)                                              |
+| [`--check`](#check)                 | `bool`        |         | Shorthand for `--call=check`                                                                                 |
+| `-D`, `--debug`                     | `bool`        |         | Enable debug logging                                                                                         |
+| [`-f`](#file), [`--file`](#file)    | `stringArray` |         | Build definition file                                                                                        |
+| [`--list`](#list)                   | `string`      |         | List targets or variables                                                                                    |
+| `--load`                            | `bool`        |         | Shorthand for `--set=*.output=type=docker`                                                                   |
+| [`--metadata-file`](#metadata-file) | `string`      |         | Write build result metadata to a file                                                                        |
+| [`--no-cache`](#no-cache)           | `bool`        |         | Do not use cache when building the image                                                                     |
+| [`--print`](#print)                 | `bool`        |         | Print the options without building                                                                           |
+| [`--progress`](#progress)           | `string`      | `auto`  | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
+| [`--provenance`](#provenance)       | `string`      |         | Shorthand for `--set=*.attest=type=provenance`                                                               |
+| [`--pull`](#pull)                   | `bool`        |         | Always attempt to pull all referenced images                                                                 |
+| `--push`                            | `bool`        |         | Shorthand for `--set=*.output=type=registry`                                                                 |
+| [`--sbom`](#sbom)                   | `string`      |         | Shorthand for `--set=*.attest=type=sbom`                                                                     |
+| [`--set`](#set)                     | `stringArray` |         | Override target value (e.g., `targetpattern.key=value`)                                                      |
 
 
 <!---MARKER_GEN_END-->
@@ -50,6 +51,80 @@ guide for introduction to writing bake f
 
 ## Examples
 
+### <a name="allow"></a> Allow extra privileged entitlement (--allow)
+
+```text
+--allow=ENTITLEMENT[=VALUE]
+```
+
+Entitlements are designed to provide controlled access to privileged
+operations. By default, Buildx and BuildKit operates with restricted
+permissions to protect users and their systems from unintended side effects or
+security risks. The `--allow` flag explicitly grants access to additional
+entitlements, making it clear when a build or bake operation requires elevated
+privileges.
+
+In addition to BuildKit's `network.host` and `security.insecure` entitlements
+(see [`docker buildx build --allow`](https://docs.docker.com/reference/cli/docker/buildx/build/#allow),
+Bake supports file system entitlements that grant granular control over file
+system access. These are particularly useful when working with builds that need
+access to files outside the default working directory.
+
+Bake supports the following filesystem entitlements:
+
+- `--allow fs=<path|*>` - Grant read and write access to files outside of the
+  working directory.
+- `--allow fs.read=<path|*>` - Grant read access to files outside of the
+  working directory.
+- `--allow fs.write=<path|*>` - Grant write access to files outside of the
+  working directory.
+
+The `fs` entitlements take a path value (relative or absolute) to a directory
+on the filesystem. Alternatively, you can pass a wildcard (`*`) to allow Bake
+to access the entire filesystem.
+
+### Example: fs.read
+
+Given the following Bake configuration, Bake would need to access the parent
+directory, relative to the Bake file.
+
+```hcl
+target "app" {
+  context = "../src"
+}
+```
+
+Assuming `docker buildx bake app` is executed in the same directory as the
+`docker-bake.hcl` file, you would need to explicitly allow Bake to read from
+the `../src` directory. In this case, the following invocations all work:
+
+```console
+$ docker buildx bake --allow fs.read=* app
+$ docker buildx bake --allow fs.read=../src app
+$ docker buildx bake --allow fs=* app
+```
+
+### Example: fs.write
+
+The following `docker-bake.hcl` file requires write access to the `/tmp`
+directory.
+
+```hcl
+target "app" {
+  output = "/tmp"
+}
+```
+
+Assuming `docker buildx bake app` is executed outside of the `/tmp` directory,
+you would need to allow the `fs.write` entitlement, either by specifying the
+path or using a wildcard:
+
+```console
+$ docker buildx bake --allow fs=/tmp app
+$ docker buildx bake --allow fs.write=/tmp app
+$ docker buildx bake --allow fs.write=* app
+```
+
 ### <a name="builder"></a> Override the configured builder instance (--builder)
 
 Same as [`buildx --builder`](buildx.md#builder).
@@ -101,6 +176,42 @@ $ docker buildx bake -f docker-bake.dev.
 See the [Bake file reference](https://docs.docker.com/build/bake/reference/)
 for more details.
 
+### <a name="list"></a> List targets and variables (--list)
+
+The `--list` flag displays all available targets or variables in the Bake
+configuration, along with a description (if set using the `description`
+property in the Bake file).
+
+To list all targets:
+
+```console {title="List targets"}
+$ docker buildx bake --list=targets
+TARGET              DESCRIPTION
+binaries
+default             binaries
+update-docs
+validate
+validate-golangci   Validate .golangci.yml schema (does not run Go linter)
+```
+
+To list variables:
+
+```console
+$ docker buildx bake --list=variables
+VARIABLE      VALUE                DESCRIPTION
+REGISTRY      docker.io/username   Registry and namespace
+IMAGE_NAME    my-app               Image name
+GO_VERSION    <null>
+```
+
+By default, the output of `docker buildx bake --list` is presented in a table
+format. Alternatively, you can use a long-form CSV syntax and specify a
+`format` attribute to output the list in JSON.
+
+```console
+$ docker buildx bake --list=type=targets,format=json
+```
+
 ### <a name="metadata-file"></a> Write build results metadata to a file (--metadata-file)
 
 Similar to [`buildx build --metadata-file`](buildx_build.md#metadata-file) but
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_build.md 0.21.3-0ubuntu1/docs/reference/buildx_build.md
--- 0.19.3+ds1-4/docs/reference/buildx_build.md	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_build.md	2025-03-17 16:14:25.000000000 +0000
@@ -13,46 +13,46 @@ Start a build
 
 ### Options
 
-| Name                                    | Type          | Default   | Description                                                                                         |
-|:----------------------------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
-| [`--add-host`](#add-host)               | `stringSlice` |           | Add a custom host-to-IP mapping (format: `host:ip`)                                                 |
-| [`--allow`](#allow)                     | `stringSlice` |           | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)                      |
-| [`--annotation`](#annotation)           | `stringArray` |           | Add annotation to the image                                                                         |
-| [`--attest`](#attest)                   | `stringArray` |           | Attestation parameters (format: `type=sbom,generator=image`)                                        |
-| [`--build-arg`](#build-arg)             | `stringArray` |           | Set build-time variables                                                                            |
-| [`--build-context`](#build-context)     | `stringArray` |           | Additional build contexts (e.g., name=path)                                                         |
-| [`--builder`](#builder)                 | `string`      |           | Override the configured builder instance                                                            |
-| [`--cache-from`](#cache-from)           | `stringArray` |           | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)                       |
-| [`--cache-to`](#cache-to)               | `stringArray` |           | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)                   |
-| [`--call`](#call)                       | `string`      | `build`   | Set method for evaluating build (`check`, `outline`, `targets`)                                     |
-| [`--cgroup-parent`](#cgroup-parent)     | `string`      |           | Set the parent cgroup for the `RUN` instructions during build                                       |
-| [`--check`](#check)                     | `bool`        |           | Shorthand for `--call=check`                                                                        |
-| `-D`, `--debug`                         | `bool`        |           | Enable debug logging                                                                                |
-| `--detach`                              | `bool`        |           | Detach buildx server (supported only on linux) (EXPERIMENTAL)                                       |
-| [`-f`](#file), [`--file`](#file)        | `string`      |           | Name of the Dockerfile (default: `PATH/Dockerfile`)                                                 |
-| `--iidfile`                             | `string`      |           | Write the image ID to a file                                                                        |
-| `--label`                               | `stringArray` |           | Set metadata for an image                                                                           |
-| [`--load`](#load)                       | `bool`        |           | Shorthand for `--output=type=docker`                                                                |
-| [`--metadata-file`](#metadata-file)     | `string`      |           | Write build result metadata to a file                                                               |
-| [`--network`](#network)                 | `string`      | `default` | Set the networking mode for the `RUN` instructions during build                                     |
-| `--no-cache`                            | `bool`        |           | Do not use cache when building the image                                                            |
-| [`--no-cache-filter`](#no-cache-filter) | `stringArray` |           | Do not cache specified stages                                                                       |
-| [`-o`](#output), [`--output`](#output)  | `stringArray` |           | Output destination (format: `type=local,dest=path`)                                                 |
-| [`--platform`](#platform)               | `stringArray` |           | Set target platform for build                                                                       |
-| [`--progress`](#progress)               | `string`      | `auto`    | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
-| [`--provenance`](#provenance)           | `string`      |           | Shorthand for `--attest=type=provenance`                                                            |
-| `--pull`                                | `bool`        |           | Always attempt to pull all referenced images                                                        |
-| [`--push`](#push)                       | `bool`        |           | Shorthand for `--output=type=registry`                                                              |
-| `-q`, `--quiet`                         | `bool`        |           | Suppress the build output and print image ID on success                                             |
-| `--root`                                | `string`      |           | Specify root directory of server to connect (EXPERIMENTAL)                                          |
-| [`--sbom`](#sbom)                       | `string`      |           | Shorthand for `--attest=type=sbom`                                                                  |
-| [`--secret`](#secret)                   | `stringArray` |           | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`)                           |
-| `--server-config`                       | `string`      |           | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL)              |
-| [`--shm-size`](#shm-size)               | `bytes`       | `0`       | Shared memory size for build containers                                                             |
-| [`--ssh`](#ssh)                         | `stringArray` |           | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
-| [`-t`](#tag), [`--tag`](#tag)           | `stringArray` |           | Name and optionally a tag (format: `name:tag`)                                                      |
-| [`--target`](#target)                   | `string`      |           | Set the target build stage to build                                                                 |
-| [`--ulimit`](#ulimit)                   | `ulimit`      |           | Ulimit options                                                                                      |
+| Name                                    | Type          | Default   | Description                                                                                                  |
+|:----------------------------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
+| [`--add-host`](#add-host)               | `stringSlice` |           | Add a custom host-to-IP mapping (format: `host:ip`)                                                          |
+| [`--allow`](#allow)                     | `stringArray` |           | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)                               |
+| [`--annotation`](#annotation)           | `stringArray` |           | Add annotation to the image                                                                                  |
+| [`--attest`](#attest)                   | `stringArray` |           | Attestation parameters (format: `type=sbom,generator=image`)                                                 |
+| [`--build-arg`](#build-arg)             | `stringArray` |           | Set build-time variables                                                                                     |
+| [`--build-context`](#build-context)     | `stringArray` |           | Additional build contexts (e.g., name=path)                                                                  |
+| [`--builder`](#builder)                 | `string`      |           | Override the configured builder instance                                                                     |
+| [`--cache-from`](#cache-from)           | `stringArray` |           | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)                                |
+| [`--cache-to`](#cache-to)               | `stringArray` |           | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)                            |
+| [`--call`](#call)                       | `string`      | `build`   | Set method for evaluating build (`check`, `outline`, `targets`)                                              |
+| [`--cgroup-parent`](#cgroup-parent)     | `string`      |           | Set the parent cgroup for the `RUN` instructions during build                                                |
+| [`--check`](#check)                     | `bool`        |           | Shorthand for `--call=check`                                                                                 |
+| `-D`, `--debug`                         | `bool`        |           | Enable debug logging                                                                                         |
+| `--detach`                              | `bool`        |           | Detach buildx server (supported only on linux) (EXPERIMENTAL)                                                |
+| [`-f`](#file), [`--file`](#file)        | `string`      |           | Name of the Dockerfile (default: `PATH/Dockerfile`)                                                          |
+| `--iidfile`                             | `string`      |           | Write the image ID to a file                                                                                 |
+| `--label`                               | `stringArray` |           | Set metadata for an image                                                                                    |
+| [`--load`](#load)                       | `bool`        |           | Shorthand for `--output=type=docker`                                                                         |
+| [`--metadata-file`](#metadata-file)     | `string`      |           | Write build result metadata to a file                                                                        |
+| [`--network`](#network)                 | `string`      | `default` | Set the networking mode for the `RUN` instructions during build                                              |
+| `--no-cache`                            | `bool`        |           | Do not use cache when building the image                                                                     |
+| [`--no-cache-filter`](#no-cache-filter) | `stringArray` |           | Do not cache specified stages                                                                                |
+| [`-o`](#output), [`--output`](#output)  | `stringArray` |           | Output destination (format: `type=local,dest=path`)                                                          |
+| [`--platform`](#platform)               | `stringArray` |           | Set target platform for build                                                                                |
+| [`--progress`](#progress)               | `string`      | `auto`    | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
+| [`--provenance`](#provenance)           | `string`      |           | Shorthand for `--attest=type=provenance`                                                                     |
+| `--pull`                                | `bool`        |           | Always attempt to pull all referenced images                                                                 |
+| [`--push`](#push)                       | `bool`        |           | Shorthand for `--output=type=registry`                                                                       |
+| `-q`, `--quiet`                         | `bool`        |           | Suppress the build output and print image ID on success                                                      |
+| `--root`                                | `string`      |           | Specify root directory of server to connect (EXPERIMENTAL)                                                   |
+| [`--sbom`](#sbom)                       | `string`      |           | Shorthand for `--attest=type=sbom`                                                                           |
+| [`--secret`](#secret)                   | `stringArray` |           | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`)                                    |
+| `--server-config`                       | `string`      |           | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL)                       |
+| [`--shm-size`](#shm-size)               | `bytes`       | `0`       | Shared memory size for build containers                                                                      |
+| [`--ssh`](#ssh)                         | `stringArray` |           | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`)          |
+| [`-t`](#tag), [`--tag`](#tag)           | `stringArray` |           | Name and optionally a tag (format: `name:tag`)                                                               |
+| [`--target`](#target)                   | `string`      |           | Set the target build stage to build                                                                          |
+| [`--ulimit`](#ulimit)                   | `ulimit`      |           | Ulimit options                                                                                               |
 
 
 <!---MARKER_GEN_END-->
@@ -828,8 +828,12 @@ $ docker buildx build --platform=darwin
 --progress=VALUE
 ```
 
-Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use `plain` to show container
-output (default `auto`).
+Set type of progress output. Supported values are:
+- `auto` (default): Uses the `tty` mode if the client is a TTY, or `plain` otherwise
+- `tty`: An interactive stream of the output with color and redrawing
+- `plain`: Prints the raw build progress in a plaintext format
+- `quiet`: Suppress the build output and print image ID on success (same as `--quiet`)
+- `rawjson`: Prints the raw build progress as JSON lines
 
 > [!NOTE]
 > You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_debug_build.md 0.21.3-0ubuntu1/docs/reference/buildx_debug_build.md
--- 0.19.3+ds1-4/docs/reference/buildx_debug_build.md	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_debug_build.md	2025-03-17 16:14:25.000000000 +0000
@@ -9,46 +9,46 @@ Start a build
 
 ### Options
 
-| Name                | Type          | Default   | Description                                                                                         |
-|:--------------------|:--------------|:----------|:----------------------------------------------------------------------------------------------------|
-| `--add-host`        | `stringSlice` |           | Add a custom host-to-IP mapping (format: `host:ip`)                                                 |
-| `--allow`           | `stringSlice` |           | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)                      |
-| `--annotation`      | `stringArray` |           | Add annotation to the image                                                                         |
-| `--attest`          | `stringArray` |           | Attestation parameters (format: `type=sbom,generator=image`)                                        |
-| `--build-arg`       | `stringArray` |           | Set build-time variables                                                                            |
-| `--build-context`   | `stringArray` |           | Additional build contexts (e.g., name=path)                                                         |
-| `--builder`         | `string`      |           | Override the configured builder instance                                                            |
-| `--cache-from`      | `stringArray` |           | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)                       |
-| `--cache-to`        | `stringArray` |           | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)                   |
-| `--call`            | `string`      | `build`   | Set method for evaluating build (`check`, `outline`, `targets`)                                     |
-| `--cgroup-parent`   | `string`      |           | Set the parent cgroup for the `RUN` instructions during build                                       |
-| `--check`           | `bool`        |           | Shorthand for `--call=check`                                                                        |
-| `-D`, `--debug`     | `bool`        |           | Enable debug logging                                                                                |
-| `--detach`          | `bool`        |           | Detach buildx server (supported only on linux) (EXPERIMENTAL)                                       |
-| `-f`, `--file`      | `string`      |           | Name of the Dockerfile (default: `PATH/Dockerfile`)                                                 |
-| `--iidfile`         | `string`      |           | Write the image ID to a file                                                                        |
-| `--label`           | `stringArray` |           | Set metadata for an image                                                                           |
-| `--load`            | `bool`        |           | Shorthand for `--output=type=docker`                                                                |
-| `--metadata-file`   | `string`      |           | Write build result metadata to a file                                                               |
-| `--network`         | `string`      | `default` | Set the networking mode for the `RUN` instructions during build                                     |
-| `--no-cache`        | `bool`        |           | Do not use cache when building the image                                                            |
-| `--no-cache-filter` | `stringArray` |           | Do not cache specified stages                                                                       |
-| `-o`, `--output`    | `stringArray` |           | Output destination (format: `type=local,dest=path`)                                                 |
-| `--platform`        | `stringArray` |           | Set target platform for build                                                                       |
-| `--progress`        | `string`      | `auto`    | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
-| `--provenance`      | `string`      |           | Shorthand for `--attest=type=provenance`                                                            |
-| `--pull`            | `bool`        |           | Always attempt to pull all referenced images                                                        |
-| `--push`            | `bool`        |           | Shorthand for `--output=type=registry`                                                              |
-| `-q`, `--quiet`     | `bool`        |           | Suppress the build output and print image ID on success                                             |
-| `--root`            | `string`      |           | Specify root directory of server to connect (EXPERIMENTAL)                                          |
-| `--sbom`            | `string`      |           | Shorthand for `--attest=type=sbom`                                                                  |
-| `--secret`          | `stringArray` |           | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`)                           |
-| `--server-config`   | `string`      |           | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL)              |
-| `--shm-size`        | `bytes`       | `0`       | Shared memory size for build containers                                                             |
-| `--ssh`             | `stringArray` |           | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
-| `-t`, `--tag`       | `stringArray` |           | Name and optionally a tag (format: `name:tag`)                                                      |
-| `--target`          | `string`      |           | Set the target build stage to build                                                                 |
-| `--ulimit`          | `ulimit`      |           | Ulimit options                                                                                      |
+| Name                | Type          | Default   | Description                                                                                                  |
+|:--------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
+| `--add-host`        | `stringSlice` |           | Add a custom host-to-IP mapping (format: `host:ip`)                                                          |
+| `--allow`           | `stringArray` |           | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)                               |
+| `--annotation`      | `stringArray` |           | Add annotation to the image                                                                                  |
+| `--attest`          | `stringArray` |           | Attestation parameters (format: `type=sbom,generator=image`)                                                 |
+| `--build-arg`       | `stringArray` |           | Set build-time variables                                                                                     |
+| `--build-context`   | `stringArray` |           | Additional build contexts (e.g., name=path)                                                                  |
+| `--builder`         | `string`      |           | Override the configured builder instance                                                                     |
+| `--cache-from`      | `stringArray` |           | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)                                |
+| `--cache-to`        | `stringArray` |           | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)                            |
+| `--call`            | `string`      | `build`   | Set method for evaluating build (`check`, `outline`, `targets`)                                              |
+| `--cgroup-parent`   | `string`      |           | Set the parent cgroup for the `RUN` instructions during build                                                |
+| `--check`           | `bool`        |           | Shorthand for `--call=check`                                                                                 |
+| `-D`, `--debug`     | `bool`        |           | Enable debug logging                                                                                         |
+| `--detach`          | `bool`        |           | Detach buildx server (supported only on linux) (EXPERIMENTAL)                                                |
+| `-f`, `--file`      | `string`      |           | Name of the Dockerfile (default: `PATH/Dockerfile`)                                                          |
+| `--iidfile`         | `string`      |           | Write the image ID to a file                                                                                 |
+| `--label`           | `stringArray` |           | Set metadata for an image                                                                                    |
+| `--load`            | `bool`        |           | Shorthand for `--output=type=docker`                                                                         |
+| `--metadata-file`   | `string`      |           | Write build result metadata to a file                                                                        |
+| `--network`         | `string`      | `default` | Set the networking mode for the `RUN` instructions during build                                              |
+| `--no-cache`        | `bool`        |           | Do not use cache when building the image                                                                     |
+| `--no-cache-filter` | `stringArray` |           | Do not cache specified stages                                                                                |
+| `-o`, `--output`    | `stringArray` |           | Output destination (format: `type=local,dest=path`)                                                          |
+| `--platform`        | `stringArray` |           | Set target platform for build                                                                                |
+| `--progress`        | `string`      | `auto`    | Set type of progress output (`auto`, `quiet`, `plain`, `tty`, `rawjson`). Use plain to show container output |
+| `--provenance`      | `string`      |           | Shorthand for `--attest=type=provenance`                                                                     |
+| `--pull`            | `bool`        |           | Always attempt to pull all referenced images                                                                 |
+| `--push`            | `bool`        |           | Shorthand for `--output=type=registry`                                                                       |
+| `-q`, `--quiet`     | `bool`        |           | Suppress the build output and print image ID on success                                                      |
+| `--root`            | `string`      |           | Specify root directory of server to connect (EXPERIMENTAL)                                                   |
+| `--sbom`            | `string`      |           | Shorthand for `--attest=type=sbom`                                                                           |
+| `--secret`          | `stringArray` |           | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`)                                    |
+| `--server-config`   | `string`      |           | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL)                       |
+| `--shm-size`        | `bytes`       | `0`       | Shared memory size for build containers                                                                      |
+| `--ssh`             | `stringArray` |           | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`)          |
+| `-t`, `--tag`       | `stringArray` |           | Name and optionally a tag (format: `name:tag`)                                                               |
+| `--target`          | `string`      |           | Set the target build stage to build                                                                          |
+| `--ulimit`          | `ulimit`      |           | Ulimit options                                                                                               |
 
 
 <!---MARKER_GEN_END-->
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_history.md 0.21.3-0ubuntu1/docs/reference/buildx_history.md
--- 0.19.3+ds1-4/docs/reference/buildx_history.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_history.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,27 @@
+# docker buildx history
+
+<!---MARKER_GEN_START-->
+Commands to work on build records
+
+### Subcommands
+
+| Name                                   | Description                                    |
+|:---------------------------------------|:-----------------------------------------------|
+| [`inspect`](buildx_history_inspect.md) | Inspect a build                                |
+| [`logs`](buildx_history_logs.md)       | Print the logs of a build                      |
+| [`ls`](buildx_history_ls.md)           | List build records                             |
+| [`open`](buildx_history_open.md)       | Open a build in Docker Desktop                 |
+| [`rm`](buildx_history_rm.md)           | Remove build records                           |
+| [`trace`](buildx_history_trace.md)     | Show the OpenTelemetry trace of a build record |
+
+
+### Options
+
+| Name            | Type     | Default | Description                              |
+|:----------------|:---------|:--------|:-----------------------------------------|
+| `--builder`     | `string` |         | Override the configured builder instance |
+| `-D`, `--debug` | `bool`   |         | Enable debug logging                     |
+
+
+<!---MARKER_GEN_END-->
+
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_history_inspect.md 0.21.3-0ubuntu1/docs/reference/buildx_history_inspect.md
--- 0.19.3+ds1-4/docs/reference/buildx_history_inspect.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_history_inspect.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,117 @@
+# docker buildx history inspect
+
+<!---MARKER_GEN_START-->
+Inspect a build
+
+### Subcommands
+
+| Name                                                 | Description                |
+|:-----------------------------------------------------|:---------------------------|
+| [`attachment`](buildx_history_inspect_attachment.md) | Inspect a build attachment |
+
+
+### Options
+
+| Name                  | Type     | Default  | Description                              |
+|:----------------------|:---------|:---------|:-----------------------------------------|
+| `--builder`           | `string` |          | Override the configured builder instance |
+| `-D`, `--debug`       | `bool`   |          | Enable debug logging                     |
+| [`--format`](#format) | `string` | `pretty` | Format the output                        |
+
+
+<!---MARKER_GEN_END-->
+
+## Examples
+
+### <a name="format"></a> Format the output (--format)
+
+The formatting options (`--format`) pretty-prints the output to `pretty` (default),
+`json` or using a Go template.
+
+```console
+$ docker buildx history inspect
+Name:           buildx (binaries)
+Context:        .
+Dockerfile:     Dockerfile
+VCS Repository: https://github.com/crazy-max/buildx.git
+VCS Revision:   f15eaa1ee324ffbbab29605600d27a84cab86361
+Target:         binaries
+Platforms:      linux/amd64
+Keep Git Dir:   true
+
+Started:        2025-02-07 11:56:24
+Duration:       1m  1s
+Build Steps:    16/16 (25% cached)
+
+Image Resolve Mode:     local
+
+Materials:
+URI                                                             DIGEST
+pkg:docker/docker/dockerfile@1                                  sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25
+pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64        sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037
+pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64           sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3
+
+Attachments:
+DIGEST                                                                  PLATFORM        TYPE
+sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3                 https://slsa.dev/provenance/v0.2
+
+Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b
+```
+
+```console
+$ docker buildx history inspect --format json
+{
+  "Name": "buildx (binaries)",
+  "Ref": "5w7vkqfi0rf59hw4hnmn627r9",
+  "Context": ".",
+  "Dockerfile": "Dockerfile",
+  "VCSRepository": "https://github.com/crazy-max/buildx.git",
+  "VCSRevision": "f15eaa1ee324ffbbab29605600d27a84cab86361",
+  "Target": "binaries",
+  "Platform": [
+    "linux/amd64"
+  ],
+  "KeepGitDir": true,
+  "StartedAt": "2025-02-07T12:01:05.75807272+01:00",
+  "CompletedAt": "2025-02-07T12:02:07.991778875+01:00",
+  "Duration": 62233706155,
+  "Status": "completed",
+  "NumCompletedSteps": 16,
+  "NumTotalSteps": 16,
+  "NumCachedSteps": 4,
+  "Config": {
+    "ImageResolveMode": "local"
+  },
+  "Materials": [
+    {
+      "URI": "pkg:docker/docker/dockerfile@1",
+      "Digests": [
+        "sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25"
+      ]
+    },
+    {
+      "URI": "pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64",
+      "Digests": [
+        "sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037"
+      ]
+    },
+    {
+      "URI": "pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64",
+      "Digests": [
+        "sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3"
+      ]
+    }
+  ],
+  "Attachments": [
+    {
+      "Digest": "sha256:450fdd2e6b868fecd69e9891c2c404ba461aa38a47663b4805edeb8d2baf80b1",
+      "Type": "https://slsa.dev/provenance/v0.2"
+    }
+  ]
+}
+```
+
+```console
+$ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})"
+buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361)
+```
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_history_inspect_attachment.md 0.21.3-0ubuntu1/docs/reference/buildx_history_inspect_attachment.md
--- 0.19.3+ds1-4/docs/reference/buildx_history_inspect_attachment.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_history_inspect_attachment.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,17 @@
+# docker buildx history inspect attachment
+
+<!---MARKER_GEN_START-->
+Inspect a build attachment
+
+### Options
+
+| Name            | Type     | Default | Description                              |
+|:----------------|:---------|:--------|:-----------------------------------------|
+| `--builder`     | `string` |         | Override the configured builder instance |
+| `-D`, `--debug` | `bool`   |         | Enable debug logging                     |
+| `--platform`    | `string` |         | Platform of attachment                   |
+| `--type`        | `string` |         | Type of attachment                       |
+
+
+<!---MARKER_GEN_END-->
+
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_history_logs.md 0.21.3-0ubuntu1/docs/reference/buildx_history_logs.md
--- 0.19.3+ds1-4/docs/reference/buildx_history_logs.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_history_logs.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,16 @@
+# docker buildx history logs
+
+<!---MARKER_GEN_START-->
+Print the logs of a build
+
+### Options
+
+| Name            | Type     | Default | Description                                       |
+|:----------------|:---------|:--------|:--------------------------------------------------|
+| `--builder`     | `string` |         | Override the configured builder instance          |
+| `-D`, `--debug` | `bool`   |         | Enable debug logging                              |
+| `--progress`    | `string` | `plain` | Set type of progress output (plain, rawjson, tty) |
+
+
+<!---MARKER_GEN_END-->
+
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_history_ls.md 0.21.3-0ubuntu1/docs/reference/buildx_history_ls.md
--- 0.19.3+ds1-4/docs/reference/buildx_history_ls.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_history_ls.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,17 @@
+# docker buildx history ls
+
+<!---MARKER_GEN_START-->
+List build records
+
+### Options
+
+| Name            | Type     | Default | Description                              |
+|:----------------|:---------|:--------|:-----------------------------------------|
+| `--builder`     | `string` |         | Override the configured builder instance |
+| `-D`, `--debug` | `bool`   |         | Enable debug logging                     |
+| `--format`      | `string` | `table` | Format the output                        |
+| `--no-trunc`    | `bool`   |         | Don't truncate output                    |
+
+
+<!---MARKER_GEN_END-->
+
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_history_open.md 0.21.3-0ubuntu1/docs/reference/buildx_history_open.md
--- 0.19.3+ds1-4/docs/reference/buildx_history_open.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_history_open.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,15 @@
+# docker buildx history open
+
+<!---MARKER_GEN_START-->
+Open a build in Docker Desktop
+
+### Options
+
+| Name            | Type     | Default | Description                              |
+|:----------------|:---------|:--------|:-----------------------------------------|
+| `--builder`     | `string` |         | Override the configured builder instance |
+| `-D`, `--debug` | `bool`   |         | Enable debug logging                     |
+
+
+<!---MARKER_GEN_END-->
+
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_history_rm.md 0.21.3-0ubuntu1/docs/reference/buildx_history_rm.md
--- 0.19.3+ds1-4/docs/reference/buildx_history_rm.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_history_rm.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,16 @@
+# docker buildx history rm
+
+<!---MARKER_GEN_START-->
+Remove build records
+
+### Options
+
+| Name            | Type     | Default | Description                              |
+|:----------------|:---------|:--------|:-----------------------------------------|
+| `--all`         | `bool`   |         | Remove all build records                 |
+| `--builder`     | `string` |         | Override the configured builder instance |
+| `-D`, `--debug` | `bool`   |         | Enable debug logging                     |
+
+
+<!---MARKER_GEN_END-->
+
diff -pruN 0.19.3+ds1-4/docs/reference/buildx_history_trace.md 0.21.3-0ubuntu1/docs/reference/buildx_history_trace.md
--- 0.19.3+ds1-4/docs/reference/buildx_history_trace.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/docs/reference/buildx_history_trace.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,17 @@
+# docker buildx history trace
+
+<!---MARKER_GEN_START-->
+Show the OpenTelemetry trace of a build record
+
+### Options
+
+| Name            | Type     | Default       | Description                              |
+|:----------------|:---------|:--------------|:-----------------------------------------|
+| `--addr`        | `string` | `127.0.0.1:0` | Address to bind the UI server            |
+| `--builder`     | `string` |               | Override the configured builder instance |
+| `--compare`     | `string` |               | Compare with another build reference     |
+| `-D`, `--debug` | `bool`   |               | Enable debug logging                     |
+
+
+<!---MARKER_GEN_END-->
+
diff -pruN 0.19.3+ds1-4/driver/docker-container/driver.go 0.21.3-0ubuntu1/driver/docker-container/driver.go
--- 0.19.3+ds1-4/driver/docker-container/driver.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/driver/docker-container/driver.go	2025-03-17 16:14:25.000000000 +0000
@@ -23,10 +23,10 @@ import (
 	"github.com/docker/docker/api/types/mount"
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/api/types/system"
-	dockerclient "github.com/docker/docker/client"
 	"github.com/docker/docker/errdefs"
 	dockerarchive "github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/moby/buildkit/client"
 	"github.com/pkg/errors"
@@ -70,7 +70,7 @@ func (d *Driver) Bootstrap(ctx context.C
 	return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
 		_, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
 		if err != nil {
-			if dockerclient.IsErrNotFound(err) {
+			if errdefs.IsNotFound(err) {
 				return d.create(ctx, sub)
 			}
 			return err
@@ -95,19 +95,20 @@ func (d *Driver) create(ctx context.Cont
 		if err != nil {
 			return err
 		}
-		rc, err := d.DockerAPI.ImageCreate(ctx, imageName, image.CreateOptions{
+		resp, err := d.DockerAPI.ImageCreate(ctx, imageName, image.CreateOptions{
 			RegistryAuth: ra,
 		})
 		if err != nil {
 			return err
 		}
-		_, err = io.Copy(io.Discard, rc)
-		return err
+		defer resp.Close()
+		return jsonmessage.DisplayJSONMessagesStream(resp, io.Discard, 0, false, nil)
 	}); err != nil {
 		// image pulling failed, check if it exists in local image store.
 		// if not, return pulling error. otherwise log it.
-		_, _, errInspect := d.DockerAPI.ImageInspectWithRaw(ctx, imageName)
-		if errInspect != nil {
+		_, errInspect := d.DockerAPI.ImageInspect(ctx, imageName)
+		found := errInspect == nil
+		if !found {
 			return err
 		}
 		l.Wrap("pulling failed, using local image "+imageName, func() error { return nil })
@@ -306,7 +307,7 @@ func (d *Driver) start(ctx context.Conte
 func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
 	ctn, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
 	if err != nil {
-		if dockerclient.IsErrNotFound(err) {
+		if errdefs.IsNotFound(err) {
 			return &driver.Info{
 				Status: driver.Inactive,
 			}, nil
diff -pruN 0.19.3+ds1-4/go.mod 0.21.3-0ubuntu1/go.mod
--- 0.19.3+ds1-4/go.mod	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/go.mod	2025-03-17 16:14:25.000000000 +0000
@@ -5,89 +5,95 @@ go 1.22.0
 require (
 	github.com/Masterminds/semver/v3 v3.2.1
 	github.com/Microsoft/go-winio v0.6.2
-	github.com/aws/aws-sdk-go-v2/config v1.26.6
-	github.com/compose-spec/compose-go/v2 v2.4.4
+	github.com/aws/aws-sdk-go-v2/config v1.27.27
+	github.com/compose-spec/compose-go/v2 v2.4.7
 	github.com/containerd/console v1.0.4
-	github.com/containerd/containerd v1.7.24
+	github.com/containerd/containerd/v2 v2.0.2
 	github.com/containerd/continuity v0.4.5
-	github.com/containerd/errdefs v0.3.0
+	github.com/containerd/errdefs v1.0.0
 	github.com/containerd/log v0.1.0
-	github.com/containerd/platforms v0.2.1
+	github.com/containerd/platforms v1.0.0-rc.1
 	github.com/containerd/typeurl/v2 v2.2.3
-	github.com/creack/pty v1.1.21
+	github.com/creack/pty v1.1.24
+	github.com/davecgh/go-spew v1.1.1
 	github.com/distribution/reference v0.6.0
-	github.com/docker/cli v27.4.0-rc.2+incompatible
-	github.com/docker/cli-docs-tool v0.8.0
-	github.com/docker/docker v27.4.0-rc.2+incompatible
+	github.com/docker/cli v28.0.0-rc.2+incompatible
+	github.com/docker/cli-docs-tool v0.9.0
+	github.com/docker/docker v28.0.0-rc.2+incompatible
 	github.com/docker/go-units v0.5.0
 	github.com/gofrs/flock v0.12.1
 	github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
 	github.com/google/uuid v1.6.0
-	github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992
-	github.com/hashicorp/hcl/v2 v2.20.1
+	github.com/hashicorp/go-cty-funcs v0.0.0-20241120183456-c51673e0b3dd
+	github.com/hashicorp/go-multierror v1.1.1
+	github.com/hashicorp/hcl/v2 v2.23.0
 	github.com/in-toto/in-toto-golang v0.5.0
 	github.com/mitchellh/hashstructure/v2 v2.0.2
-	github.com/moby/buildkit v0.18.0
+	github.com/moby/buildkit v0.20.0
 	github.com/moby/sys/mountinfo v0.7.2
 	github.com/moby/sys/signal v0.7.1
 	github.com/morikuni/aec v1.0.0
 	github.com/opencontainers/go-digest v1.0.0
 	github.com/opencontainers/image-spec v1.1.0
 	github.com/pelletier/go-toml v1.9.5
+	github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
 	github.com/pkg/errors v0.9.1
 	github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10
 	github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b
 	github.com/sirupsen/logrus v1.9.3
 	github.com/spf13/cobra v1.8.1
 	github.com/spf13/pflag v1.0.5
-	github.com/stretchr/testify v1.9.0
-	github.com/tonistiigi/fsutil v0.0.0-20241121093142-31cf1f437184
+	github.com/stretchr/testify v1.10.0
+	github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a
 	github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4
-	github.com/zclconf/go-cty v1.14.4
-	go.opentelemetry.io/otel v1.28.0
-	go.opentelemetry.io/otel/metric v1.28.0
-	go.opentelemetry.io/otel/sdk v1.28.0
-	go.opentelemetry.io/otel/trace v1.28.0
-	golang.org/x/mod v0.21.0
-	golang.org/x/sync v0.8.0
-	golang.org/x/sys v0.26.0
-	golang.org/x/term v0.24.0
-	golang.org/x/text v0.18.0
-	google.golang.org/grpc v1.66.3
+	github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250211190051-7d4944a45bb6
+	github.com/zclconf/go-cty v1.16.0
+	go.opentelemetry.io/otel v1.31.0
+	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0
+	go.opentelemetry.io/otel/metric v1.31.0
+	go.opentelemetry.io/otel/sdk v1.31.0
+	go.opentelemetry.io/otel/trace v1.31.0
+	golang.org/x/mod v0.22.0
+	golang.org/x/sync v0.10.0
+	golang.org/x/sys v0.29.0
+	golang.org/x/term v0.27.0
+	golang.org/x/text v0.21.0
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38
+	google.golang.org/grpc v1.69.4
 	google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
-	google.golang.org/protobuf v1.35.1
+	google.golang.org/protobuf v1.35.2
 	gopkg.in/yaml.v3 v3.0.1
-	k8s.io/api v0.29.2
-	k8s.io/apimachinery v0.29.2
-	k8s.io/client-go v0.29.2
+	k8s.io/api v0.31.2
+	k8s.io/apimachinery v0.31.2
+	k8s.io/client-go v0.31.2
 )
 
 require (
-	github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
-	github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+	github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
+	github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
 	github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
 	github.com/agext/levenshtein v1.2.3 // indirect
 	github.com/apparentlymart/go-cidr v1.0.1 // indirect
 	github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
-	github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect
-	github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect
-	github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
-	github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
-	github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
-	github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect
-	github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
-	github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
-	github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
-	github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
-	github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
-	github.com/aws/smithy-go v1.19.0 // indirect
+	github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect
+	github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect
+	github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
+	github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
+	github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect
+	github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect
+	github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
+	github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
+	github.com/aws/smithy-go v1.20.3 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/cenkalti/backoff/v4 v4.3.0 // indirect
 	github.com/cespare/xxhash/v2 v2.3.0 // indirect
-	github.com/containerd/containerd/api v1.7.19 // indirect
-	github.com/containerd/ttrpc v1.2.5 // indirect
-	github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
-	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/containerd/containerd/api v1.8.0 // indirect
+	github.com/containerd/errdefs/pkg v0.3.0 // indirect
+	github.com/containerd/ttrpc v1.2.7 // indirect
+	github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
 	github.com/docker/distribution v2.8.3+incompatible // indirect
 	github.com/docker/docker-credential-helpers v0.8.2 // indirect
 	github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
@@ -96,11 +102,12 @@ require (
 	github.com/emicklei/go-restful/v3 v3.11.0 // indirect
 	github.com/felixge/httpsnoop v1.0.4 // indirect
 	github.com/fvbommel/sortorder v1.0.1 // indirect
+	github.com/fxamacker/cbor/v2 v2.7.0 // indirect
 	github.com/go-logr/logr v1.4.2 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-openapi/jsonpointer v0.19.6 // indirect
 	github.com/go-openapi/jsonreference v0.20.2 // indirect
-	github.com/go-openapi/swag v0.22.3 // indirect
+	github.com/go-openapi/swag v0.22.4 // indirect
 	github.com/go-viper/mapstructure/v2 v2.0.0 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
 	github.com/golang/protobuf v1.5.4 // indirect
@@ -109,10 +116,9 @@ require (
 	github.com/google/gofuzz v1.2.0 // indirect
 	github.com/gorilla/mux v1.8.1 // indirect
 	github.com/gorilla/websocket v1.5.0 // indirect
-	github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
+	github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
 	github.com/hashicorp/errwrap v1.1.0 // indirect
 	github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
-	github.com/hashicorp/go-multierror v1.1.1 // indirect
 	github.com/imdario/mergo v0.3.16 // indirect
 	github.com/inconshreveable/mousetrap v1.1.0 // indirect
 	github.com/josharian/intern v1.0.0 // indirect
@@ -127,17 +133,17 @@ require (
 	github.com/moby/docker-image-spec v1.3.1 // indirect
 	github.com/moby/locker v1.0.1 // indirect
 	github.com/moby/patternmatcher v0.6.0 // indirect
-	github.com/moby/spdystream v0.2.0 // indirect
+	github.com/moby/spdystream v0.4.0 // indirect
 	github.com/moby/sys/sequential v0.6.0 // indirect
 	github.com/moby/sys/user v0.3.0 // indirect
 	github.com/moby/sys/userns v0.1.0 // indirect
-	github.com/moby/term v0.5.0 // indirect
+	github.com/moby/term v0.5.2 // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
 	github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/client_golang v1.20.2 // indirect
+	github.com/prometheus/client_golang v1.20.5 // indirect
 	github.com/prometheus/client_model v0.6.1 // indirect
 	github.com/prometheus/common v0.55.0 // indirect
 	github.com/prometheus/procfs v0.15.1 // indirect
@@ -149,34 +155,42 @@ require (
 	github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205 // indirect
 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
 	github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect
+	github.com/x448/float16 v0.8.4 // indirect
 	github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
 	github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
 	github.com/xeipuuv/gojsonschema v1.2.0 // indirect
-	go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
-	go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect
-	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect
-	go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect
+	go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect
+	go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect
+	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect
+	go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
 	go.opentelemetry.io/proto/otlp v1.3.1 // indirect
-	golang.org/x/crypto v0.27.0 // indirect
-	golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
-	golang.org/x/net v0.29.0 // indirect
-	golang.org/x/oauth2 v0.21.0 // indirect
+	golang.org/x/crypto v0.31.0 // indirect
+	golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect
+	golang.org/x/net v0.33.0 // indirect
+	golang.org/x/oauth2 v0.23.0 // indirect
 	golang.org/x/time v0.6.0 // indirect
-	golang.org/x/tools v0.25.0 // indirect
-	google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
+	golang.org/x/tools v0.27.0 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
-	k8s.io/klog/v2 v2.110.1 // indirect
-	k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
-	k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
+	k8s.io/klog/v2 v2.130.1 // indirect
+	k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
+	k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
 	sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
 	sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
-	sigs.k8s.io/yaml v1.3.0 // indirect
+	sigs.k8s.io/yaml v1.4.0 // indirect
+)
+
+exclude (
+	// FIXME(thaJeztah): remoove this once kubernetes updated their dependencies to no longer need this.
+	//
+	// For additional details, see this PR and links mentioned in that PR:
+	// https://github.com/kubernetes-sigs/kustomize/pull/5830#issuecomment-2569960859
+	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
+	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
 )
diff -pruN 0.19.3+ds1-4/go.sum 0.21.3-0ubuntu1/go.sum
--- 0.19.3+ds1-4/go.sum	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/go.sum	2025-03-17 16:14:25.000000000 +0000
@@ -1,21 +1,17 @@
-cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
-cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
-cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
-cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
-github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA=
-github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
+github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIScnXFlF784X79oi7MzVT6GWqr/W1uUt0pB5CsDs9M=
+github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
 github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
 github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
-github.com/Microsoft/hcsshim v0.12.8 h1:BtDWYlFMcWhorrvSSo2M7z0csPdw6t7no/C3FsSvqiI=
-github.com/Microsoft/hcsshim v0.12.8/go.mod h1:cibQ4BqhJ32FXDwPdQhKhwrwophnh3FuT4nwQZF907w=
+github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
+github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
 github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
 github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
 github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
@@ -32,32 +28,32 @@ github.com/apparentlymart/go-textseg/v15
 github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
-github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
-github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o=
-github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
-github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
-github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
+github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
+github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
+github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90=
+github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII=
+github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM=
+github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
+github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE=
+github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
+github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
+github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
 github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -79,61 +75,62 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:U
 github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
 github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
-github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
-github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
 github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
 github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
-github.com/compose-spec/compose-go/v2 v2.4.4 h1:cvHBl5Jf1iNBmRrZCICmHvaoskYc1etTPEMLKVwokAY=
-github.com/compose-spec/compose-go/v2 v2.4.4/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc=
-github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
-github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
-github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
+github.com/compose-spec/compose-go/v2 v2.4.7 h1:WNpz5bIbKG+G+w9pfu72B1ZXr+Og9jez8TMEo8ecXPk=
+github.com/compose-spec/compose-go/v2 v2.4.7/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc=
+github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
+github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
 github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
 github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
-github.com/containerd/containerd v1.7.24 h1:zxszGrGjrra1yYJW/6rhm9cJ1ZQ8rkKBR48brqsa7nA=
-github.com/containerd/containerd v1.7.24/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw=
-github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA=
-github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig=
+github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0=
+github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
+github.com/containerd/containerd/v2 v2.0.2 h1:GmH/tRBlTvrXOLwSpWE2vNAm8+MqI6nmxKpKBNKY8Wc=
+github.com/containerd/containerd/v2 v2.0.2/go.mod h1:wIqEvQ/6cyPFUGJ5yMFanspPabMLor+bF865OHvNTTI=
 github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
 github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
-github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
-github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
+github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
+github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
 github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
 github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
 github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
 github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
-github.com/containerd/nydus-snapshotter v0.14.0 h1:6/eAi6d7MjaeLLuMO8Udfe5GVsDudmrDNO4SGETMBco=
-github.com/containerd/nydus-snapshotter v0.14.0/go.mod h1:TT4jv2SnIDxEBu4H2YOvWQHPOap031ydTaHTuvc5VQk=
-github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
-github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
-github.com/containerd/stargz-snapshotter v0.15.1 h1:fpsP4kf/Z4n2EYnU0WT8ZCE3eiKDwikDhL6VwxIlgeA=
-github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
-github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
-github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU=
-github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
+github.com/containerd/nydus-snapshotter v0.15.0 h1:RqZRs1GPeM6T3wmuxJV9u+2Rg4YETVMwTmiDeX+iWC8=
+github.com/containerd/nydus-snapshotter v0.15.0/go.mod h1:biq0ijpeZe0I5yZFSJyHzFSjjRZQ7P7y/OuHyd7hYOw=
+github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E=
+github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4=
+github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y=
+github.com/containerd/plugin v1.0.0/go.mod h1:hQfJe5nmWfImiqT1q8Si3jLv3ynMUIBB47bQ+KexvO8=
+github.com/containerd/stargz-snapshotter v0.16.3 h1:zbQMm8dRuPHEOD4OqAYGajJJUwCeUzt4j7w9Iaw58u4=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
+github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ=
+github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
 github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
 github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
 github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
-github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
-github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
+github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
 github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
 github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/cli v27.4.0-rc.2+incompatible h1:A0GZwegDlt2wdt3tpmrUzkVOZmbhvd7i05wPSf7Oo74=
-github.com/docker/cli v27.4.0-rc.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
-github.com/docker/cli-docs-tool v0.8.0 h1:YcDWl7rQJC3lJ7WVZRwSs3bc9nka97QLWfyJQli8yJU=
-github.com/docker/cli-docs-tool v0.8.0/go.mod h1:8TQQ3E7mOXoYUs811LiPdUnAhXrcVsBIrW21a5pUbdk=
+github.com/docker/cli v28.0.0-rc.2+incompatible h1:2N1dpr3qtlJwIQpqXm7oNwWNAUGzpKlsCeJ32ejvpTk=
+github.com/docker/cli v28.0.0-rc.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli-docs-tool v0.9.0 h1:CVwQbE+ZziwlPqrJ7LRyUF6GvCA+6gj7MTCsayaK9t0=
+github.com/docker/cli-docs-tool v0.9.0/go.mod h1:ClrwlNW+UioiRyH9GiAOe1o3J/TsY3Tr1ipoypjAUtc=
 github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
 github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
 github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v27.4.0-rc.2+incompatible h1:9OJjVGtelk/zGC3TyKweJ29b9Axzh0s/0vtU4mneumE=
-github.com/docker/docker v27.4.0-rc.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.0.0-rc.2+incompatible h1:p+Ri+C0mmbPkhYVD9Sxnp/TnNnZoQWEj/EwOC465Uq4=
+github.com/docker/docker v28.0.0-rc.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
 github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
 github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
 github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@@ -141,8 +138,6 @@ github.com/docker/go v1.5.1-1.0.20160303
 github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
 github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
 github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
 github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
 github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
 github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
@@ -153,19 +148,20 @@ github.com/docker/libtrust v0.0.0-201607
 github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
 github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
 github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
-github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
 github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
 github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
 github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE=
 github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
 github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
@@ -174,13 +170,14 @@ github.com/go-openapi/jsonpointer v0.19.
 github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
 github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
 github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
 github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
+github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
 github.com/go-sql-driver/mysql v1.3.0 h1:pgwjLi/dvffoP9aabwkT3AKpXQM93QARkjFhDDqC1UE=
 github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
 github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
 github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
 github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc=
@@ -213,8 +210,8 @@ github.com/google/go-cmp v0.6.0/go.mod h
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
 github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
+github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -223,11 +220,10 @@ github.com/google/uuid v1.6.0/go.mod h1:
 github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
 github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
 github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
 github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
 github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
 github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
 github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -235,12 +231,12 @@ github.com/hashicorp/errwrap v1.1.0 h1:O
 github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
 github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992 h1:fYOrSfO5C9PmFGtmRWSYGqq52SOoE2dXMtAn2Xzh1LQ=
-github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA=
+github.com/hashicorp/go-cty-funcs v0.0.0-20241120183456-c51673e0b3dd h1:nwSMaLX+rf/ZPHTJHWO9K73be04SritSKvKuvpBvC2A=
+github.com/hashicorp/go-cty-funcs v0.0.0-20241120183456-c51673e0b3dd/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA=
 github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
 github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc=
-github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4=
+github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos=
+github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
 github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
@@ -301,16 +297,16 @@ github.com/mitchellh/hashstructure/v2 v2
 github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
 github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
 github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/moby/buildkit v0.18.0 h1:KSelhNINJcNA3FCWBbGCytvicjP+kjU5kZlZhkTUkVo=
-github.com/moby/buildkit v0.18.0/go.mod h1:vCR5CX8NGsPTthTg681+9kdmfvkvqJBXEv71GZe5msU=
+github.com/moby/buildkit v0.20.0 h1:aF5RujjQ310Pn6SLL/wQYIrSsPXy0sQ5KvWifwq1h8Y=
+github.com/moby/buildkit v0.20.0/go.mod h1:HYFUIK+iGDRxRgdphZ9Nv0y1Fz7mv0HrU7xZoXx217E=
 github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
 github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
 github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
 github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
 github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
 github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
-github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
-github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
+github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
 github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
 github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
 github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
@@ -321,8 +317,8 @@ github.com/moby/sys/user v0.3.0 h1:9ni5D
 github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
 github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
 github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
-github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
-github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
+github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -341,12 +337,12 @@ github.com/niemeyer/pretty v0.0.0-202002
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
 github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
-github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
-github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
+github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
+github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
 github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
-github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
-github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
 github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@@ -355,12 +351,16 @@ github.com/opencontainers/image-spec v1.
 github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
 github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
 github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
-github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
+github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0=
+github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
+github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
+github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
 github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
 github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
 github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
 github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -373,8 +373,8 @@ github.com/prometheus/client_golang v0.9
 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
 github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
-github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
 github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -393,8 +393,8 @@ github.com/prometheus/procfs v0.15.1 h1:
 github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
 github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
 github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
@@ -435,23 +435,29 @@ github.com/stretchr/testify v1.7.0/go.mo
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
 github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
 github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205 h1:eUk79E1w8yMtXeHSzjKorxuC8qJOnyXQnLaJehxpJaI=
 github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205/go.mod h1:3Iuxbr0P7D3zUzBMAZB+ois3h/et0shEz0qApgHYGpY=
-github.com/tonistiigi/fsutil v0.0.0-20241121093142-31cf1f437184 h1:RgyoSI38Y36zjQaszel/0RAcIehAnjA1B0RiUV9SDO4=
-github.com/tonistiigi/fsutil v0.0.0-20241121093142-31cf1f437184/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw=
+github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a h1:EfGw4G0x/8qXWgtcZ6KVaPS+wpWOQMaypczzP8ojkMY=
+github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw=
 github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8=
 github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
+github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250211190051-7d4944a45bb6 h1:RT/a0RvdX84iwtOrUK45+wjcNpaG+hS7n7XFYqj4axg=
+github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250211190051-7d4944a45bb6/go.mod h1:3Ez1Paeg+0Ghu3KwpEGC1HgZ4CHDlg+Ez/5Baeomk54=
 github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
 github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
 github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw=
 github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
-github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
-github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
+github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
 github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -462,38 +468,40 @@ github.com/xeipuuv/gojsonschema v1.2.0/g
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ=
-github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
-github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
-github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI=
-github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8=
+github.com/zclconf/go-cty v1.16.0 h1:xPKEhst+BW5D0wxebMZkxgapvOE/dw7bFTlgSc9nD6w=
+github.com/zclconf/go-cty v1.16.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
+github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
 go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
 go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 h1:gbhw/u49SS3gkPWiYweQNJGm/uJN5GkI/FrosxSHT7A=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1/go.mod h1:GnOaBaFQ2we3b9AGWJpsBa7v1S5RlQzlC3O7dRMxZhM=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
-go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
-go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
-go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
-go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
-go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
-go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
-go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08=
-go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg=
-go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
-go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
+go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
 go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
 go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
 go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -506,14 +514,14 @@ golang.org/x/crypto v0.0.0-2020030221094
 golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
-golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
-golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
-golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo=
+golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak=
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
-golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
+golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -521,18 +529,18 @@ golang.org/x/net v0.0.0-20190613194153-d
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
-golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
-golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
-golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
+golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -545,42 +553,40 @@ golang.org/x/sys v0.0.0-20200930185726-f
 golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
-golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
-golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
+golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
-golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
 golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
 golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE=
-golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg=
+golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o=
+golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
-google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
-google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw=
+google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
 google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.66.3 h1:TWlsh8Mv0QI/1sIbs1W36lqRclxrmF+eFJ4DbI0fuhA=
-google.golang.org/grpc v1.66.3/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
+google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
+google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A=
 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA=
-google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
-google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
+google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
 gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
@@ -608,21 +614,25 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqL
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
 gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
-k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A=
-k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0=
-k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8=
-k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU=
-k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg=
-k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA=
-k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
-k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
-k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
-k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
+k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
+k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
+k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
+k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
+k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
 sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
-sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+tags.cncf.io/container-device-interface v0.8.0 h1:8bCFo/g9WODjWx3m6EYl3GfUG31eKJbaggyBDxEldRc=
+tags.cncf.io/container-device-interface v0.8.0/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y=
+tags.cncf.io/container-device-interface/specs-go v0.8.0 h1:QYGFzGxvYK/ZLMrjhvY0RjpUavIn4KcmRmVP/JjdBTA=
+tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws=
diff -pruN 0.19.3+ds1-4/hack/Vagrantfile.freebsd 0.21.3-0ubuntu1/hack/Vagrantfile.freebsd
--- 0.19.3+ds1-4/hack/Vagrantfile.freebsd	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/Vagrantfile.freebsd	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,18 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure("2") do |config|
+  config.vm.box = "generic/freebsd14"
+  config.vm.boot_timeout = 900
+  config.vm.synced_folder ".", "/vagrant", type: "rsync"
+  config.ssh.keep_alive = true
+
+  config.vm.provision "init", type: "shell", run: "once" do |sh|
+    sh.inline = <<~SHELL
+      pkg bootstrap
+      pkg install -y go123 git
+      ln -s /usr/local/bin/go123 /usr/local/bin/go
+      go install gotest.tools/gotestsum@#{ENV['GOTESTSUM_VERSION']}
+    SHELL
+  end
+end
diff -pruN 0.19.3+ds1-4/hack/Vagrantfile.openbsd 0.21.3-0ubuntu1/hack/Vagrantfile.openbsd
--- 0.19.3+ds1-4/hack/Vagrantfile.openbsd	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/Vagrantfile.openbsd	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,21 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure("2") do |config|
+  config.vm.box = "pygolo/openbsd7"
+  config.vm.box_version = "7.5"
+  config.vm.boot_timeout = 900
+  config.vm.synced_folder ".", "/vagrant", type: "rsync"
+  config.ssh.keep_alive = true
+
+  config.vm.provision "init", type: "shell", run: "once" do |sh|
+    sh.inline = <<~SHELL
+      pkg_add -x git
+
+      ftp https://go.dev/dl/go1.23.3.openbsd-amd64.tar.gz
+      tar -C /usr/local -xzf go1.23.3.openbsd-amd64.tar.gz
+      ln -s /usr/local/go/bin/go /usr/local/bin/go
+      go install gotest.tools/gotestsum@#{ENV['GOTESTSUM_VERSION']}
+    SHELL
+  end
+end
diff -pruN 0.19.3+ds1-4/hack/dockerfiles/authors.Dockerfile 0.21.3-0ubuntu1/hack/dockerfiles/authors.Dockerfile
--- 0.19.3+ds1-4/hack/dockerfiles/authors.Dockerfile	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/dockerfiles/authors.Dockerfile	2025-03-17 16:14:25.000000000 +0000
@@ -1,6 +1,6 @@
 # syntax=docker/dockerfile:1
 
-ARG ALPINE_VERSION=3.20
+ARG ALPINE_VERSION=3.21
 
 FROM alpine:${ALPINE_VERSION} AS gen
 RUN apk add --no-cache git
diff -pruN 0.19.3+ds1-4/hack/dockerfiles/docs.Dockerfile 0.21.3-0ubuntu1/hack/dockerfiles/docs.Dockerfile
--- 0.19.3+ds1-4/hack/dockerfiles/docs.Dockerfile	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/dockerfiles/docs.Dockerfile	2025-03-17 16:14:25.000000000 +0000
@@ -1,15 +1,17 @@
 # syntax=docker/dockerfile:1
 
 ARG GO_VERSION=1.23
+ARG ALPINE_VERSION=3.21
+
 ARG FORMATS=md,yaml
 
-FROM golang:${GO_VERSION}-alpine AS docsgen
+FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS docsgen
 WORKDIR /src
 RUN --mount=target=. \
   --mount=target=/root/.cache,type=cache \
   go build -mod=vendor -o /out/docsgen ./docs/generate.go
 
-FROM alpine AS gen
+FROM alpine:${ALPINE_VERSION} AS gen
 RUN apk add --no-cache rsync git
 WORKDIR /src
 COPY --from=docsgen /out/docsgen /usr/bin
diff -pruN 0.19.3+ds1-4/hack/dockerfiles/govulncheck.Dockerfile 0.21.3-0ubuntu1/hack/dockerfiles/govulncheck.Dockerfile
--- 0.19.3+ds1-4/hack/dockerfiles/govulncheck.Dockerfile	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/dockerfiles/govulncheck.Dockerfile	2025-03-17 16:14:25.000000000 +0000
@@ -1,10 +1,12 @@
 # syntax=docker/dockerfile:1
 
 ARG GO_VERSION=1.23
+ARG ALPINE_VERSION=3.21
+
 ARG GOVULNCHECK_VERSION=v1.1.3
 ARG FORMAT="text"
 
-FROM golang:${GO_VERSION}-alpine AS base
+FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
 WORKDIR /go/src/github.com/docker/buildx
 RUN apk add --no-cache jq moreutils
 ARG GOVULNCHECK_VERSION
diff -pruN 0.19.3+ds1-4/hack/dockerfiles/lint.Dockerfile 0.21.3-0ubuntu1/hack/dockerfiles/lint.Dockerfile
--- 0.19.3+ds1-4/hack/dockerfiles/lint.Dockerfile	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/dockerfiles/lint.Dockerfile	2025-03-17 16:14:25.000000000 +0000
@@ -1,15 +1,17 @@
 # syntax=docker/dockerfile:1
 
 ARG GO_VERSION=1.23
-ARG XX_VERSION=1.5.0
+ARG ALPINE_VERSION=3.21
+ARG XX_VERSION=1.6.1
+
 ARG GOLANGCI_LINT_VERSION=1.62.0
 ARG GOPLS_VERSION=v0.26.0
 # disabled: deprecated unusedvariable simplifyrange
-ARG GOPLS_ANALYZERS="embeddirective fillreturns infertypeargs nonewvars norangeoverfunc noresultvalues simplifycompositelit simplifyslice undeclaredname unusedparams useany"
+ARG GOPLS_ANALYZERS="embeddirective fillreturns infertypeargs nonewvars noresultvalues simplifycompositelit simplifyslice undeclaredname unusedparams useany"
 
 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
 
-FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golang-base
+FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golang-base
 RUN apk add --no-cache git gcc musl-dev
 
 FROM golang-base AS lint-base
@@ -74,4 +76,4 @@ RUN --mount=target=. \
   done
 EOF
 
-FROM lint
\ No newline at end of file
+FROM lint
diff -pruN 0.19.3+ds1-4/hack/dockerfiles/vendor.Dockerfile 0.21.3-0ubuntu1/hack/dockerfiles/vendor.Dockerfile
--- 0.19.3+ds1-4/hack/dockerfiles/vendor.Dockerfile	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/dockerfiles/vendor.Dockerfile	2025-03-17 16:14:25.000000000 +0000
@@ -1,9 +1,11 @@
 # syntax=docker/dockerfile:1
 
 ARG GO_VERSION=1.23
+ARG ALPINE_VERSION=3.21
+
 ARG MODOUTDATED_VERSION=v0.9.0
 
-FROM golang:${GO_VERSION}-alpine AS base
+FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
 RUN apk add --no-cache git rsync
 WORKDIR /src
 
diff -pruN 0.19.3+ds1-4/hack/test 0.21.3-0ubuntu1/hack/test
--- 0.19.3+ds1-4/hack/test	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/test	2025-03-17 16:14:25.000000000 +0000
@@ -2,6 +2,8 @@
 
 set -eu -o pipefail
 
+: "${GITHUB_ACTIONS=}"
+
 : "${BUILDX_CMD=docker buildx}"
 
 : "${TEST_COVERAGE=}"
@@ -37,7 +39,15 @@ if [ "$TEST_COVERAGE" = "1" ]; then
   export GO_TEST_COVERPROFILE="/testreports/coverage-report$TEST_REPORT_SUFFIX.txt"
 fi
 
-cid=$(docker create --rm --privileged \
+dockerConfigMount=""
+if [ "$GITHUB_ACTIONS" = "true" ]; then
+  dockerConfigPath="$HOME/.docker/config.json"
+  if [ -f "$dockerConfigPath" ]; then
+    dockerConfigMount="-v $dockerConfigPath:/root/.docker/config.json:ro"
+  fi
+fi
+
+cid=$(docker create --rm --privileged $dockerConfigMount \
   -v /tmp $testReportsVol \
   --volumes-from=$cacheVolume \
   -e GITHUB_REF \
diff -pruN 0.19.3+ds1-4/hack/test-driver 0.21.3-0ubuntu1/hack/test-driver
--- 0.19.3+ds1-4/hack/test-driver	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/hack/test-driver	2025-03-17 16:14:25.000000000 +0000
@@ -167,7 +167,8 @@ buildxCmd bake ${bakePlatformFlag} \
   --file="${bakedef}" \
   --builder="${builderName}" \
   --set "*.context=${context}" \
-  --metadata-file="${context}/metadata-bake-def.json"
+  --metadata-file="${context}/metadata-bake-def.json" \
+  --allow fs="${context}"
 cat "${context}/metadata-bake-def.json"
 
 # bake all target
@@ -175,6 +176,7 @@ buildxCmd bake ${bakePlatformFlag} \
   --file="${bakedef}" \
   --builder="${builderName}" \
   --set "*.context=${context}" \
+  --allow fs="${context}" \
   --metadata-file="${context}/metadata-bake-all.json" \
   all
 cat "${context}/metadata-bake-all.json"
diff -pruN 0.19.3+ds1-4/localstate/localstate.go 0.21.3-0ubuntu1/localstate/localstate.go
--- 0.19.3+ds1-4/localstate/localstate.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/localstate/localstate.go	2025-03-17 16:14:25.000000000 +0000
@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
+	"strconv"
 	"sync"
 
 	"github.com/docker/buildx/util/confutil"
@@ -14,6 +15,7 @@ import (
 )
 
 const (
+	version  = 2
 	refsDir  = "refs"
 	groupDir = "__group__"
 )
@@ -31,12 +33,8 @@ type State struct {
 }
 
 type StateGroup struct {
-	// Definition is the raw representation of the group (bake definition)
-	Definition []byte
 	// Targets are the targets invoked
 	Targets []string `json:",omitempty"`
-	// Inputs are the user inputs (bake overrides)
-	Inputs []string `json:",omitempty"`
 	// Refs are used to track all the refs that belong to the same group
 	Refs []string
 }
@@ -52,9 +50,7 @@ func New(cfg *confutil.Config) (*LocalSt
 	if err := cfg.MkdirAll(refsDir, 0700); err != nil {
 		return nil, err
 	}
-	return &LocalState{
-		cfg: cfg,
-	}, nil
+	return &LocalState{cfg: cfg}, nil
 }
 
 func (ls *LocalState) ReadRef(builderName, nodeName, id string) (*State, error) {
@@ -87,8 +83,12 @@ func (ls *LocalState) SaveRef(builderNam
 	return ls.cfg.AtomicWriteFile(filepath.Join(refDir, id), dt, 0644)
 }
 
+func (ls *LocalState) GroupDir() string {
+	return filepath.Join(ls.cfg.Dir(), refsDir, groupDir)
+}
+
 func (ls *LocalState) ReadGroup(id string) (*StateGroup, error) {
-	dt, err := os.ReadFile(filepath.Join(ls.cfg.Dir(), refsDir, groupDir, id))
+	dt, err := os.ReadFile(filepath.Join(ls.GroupDir(), id))
 	if err != nil {
 		return nil, err
 	}
@@ -208,7 +208,7 @@ func (ls *LocalState) removeGroup(id str
 	if id == "" {
 		return errors.Errorf("group ref empty")
 	}
-	f := filepath.Join(ls.cfg.Dir(), refsDir, groupDir, id)
+	f := filepath.Join(ls.GroupDir(), id)
 	if _, err := os.Lstat(f); err != nil {
 		if !os.IsNotExist(err) {
 			return err
@@ -230,3 +230,16 @@ func (ls *LocalState) validate(builderNa
 	}
 	return nil
 }
+
+func (ls *LocalState) readVersion() int {
+	if vdt, err := os.ReadFile(filepath.Join(ls.cfg.Dir(), refsDir, "version")); err == nil {
+		if v, err := strconv.Atoi(string(vdt)); err == nil {
+			return v
+		}
+	}
+	return 1
+}
+
+func (ls *LocalState) writeVersion(version int) error {
+	return ls.cfg.AtomicWriteFile(filepath.Join(refsDir, "version"), []byte(strconv.Itoa(version)), 0600)
+}
diff -pruN 0.19.3+ds1-4/localstate/localstate_test.go 0.21.3-0ubuntu1/localstate/localstate_test.go
--- 0.19.3+ds1-4/localstate/localstate_test.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/localstate/localstate_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -68,10 +68,8 @@ var (
 
 	testStateGroupID = "kvqs0sgly2rmitz84r25u9qd0"
 	testStateGroup   = StateGroup{
-		Definition: []byte(`{"group":{"default":{"targets":["pre-checkin"]},"pre-checkin":{"targets":["vendor-update","format","build"]}},"target":{"build":{"context":".","dockerfile":"dev.Dockerfile","target":"build-update","platforms":["linux/amd64"],"output":["."]},"format":{"context":".","dockerfile":"dev.Dockerfile","target":"format-update","platforms":["linux/amd64"],"output":["."]},"vendor-update":{"context":".","dockerfile":"dev.Dockerfile","target":"vendor-update","platforms":["linux/amd64"],"output":["."]}}}`),
-		Targets:    []string{"pre-checkin"},
-		Inputs:     []string{"*.platform=linux/amd64"},
-		Refs:       []string{"builder/builder0/hx2qf1w11qvz1x3k471c5i8xw", "builder/builder0/968zj0g03jmlx0s8qslnvh6rl", "builder/builder0/naf44f9i1710lf7y12lv5hb1z"},
+		Targets: []string{"pre-checkin"},
+		Refs:    []string{"builder/builder0/hx2qf1w11qvz1x3k471c5i8xw", "builder/builder0/968zj0g03jmlx0s8qslnvh6rl", "builder/builder0/naf44f9i1710lf7y12lv5hb1z"},
 	}
 
 	testStateGroupRef1ID = "hx2qf1w11qvz1x3k471c5i8xw"
diff -pruN 0.19.3+ds1-4/localstate/migrate.go 0.21.3-0ubuntu1/localstate/migrate.go
--- 0.19.3+ds1-4/localstate/migrate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/localstate/migrate.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,56 @@
+package localstate
+
+import (
+	"encoding/json"
+	"os"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+)
+
+func (ls *LocalState) MigrateIfNeeded() error {
+	currentVersion := ls.readVersion()
+	if currentVersion == version {
+		return nil
+	}
+	migrations := map[int]func(*LocalState) error{
+		2: (*LocalState).migration2,
+	}
+	for v := currentVersion + 1; v <= version; v++ {
+		migration, found := migrations[v]
+		if !found {
+			return errors.Errorf("localstate migration v%d not found", v)
+		}
+		if err := migration(ls); err != nil {
+			return errors.Wrapf(err, "localstate migration v%d failed", v)
+		}
+	}
+	return ls.writeVersion(version)
+}
+
+func (ls *LocalState) migration2() error {
+	return filepath.Walk(ls.GroupDir(), func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		if info.IsDir() {
+			return nil
+		}
+		dt, err := os.ReadFile(path)
+		if err != nil {
+			return err
+		}
+		var stg StateGroup
+		if err := json.Unmarshal(dt, &stg); err != nil {
+			return err
+		}
+		mdt, err := json.Marshal(stg)
+		if err != nil {
+			return err
+		}
+		if err := os.WriteFile(path, mdt, 0600); err != nil {
+			return err
+		}
+		return nil
+	})
+}
diff -pruN 0.19.3+ds1-4/monitor/commands/reload.go 0.21.3-0ubuntu1/monitor/commands/reload.go
--- 0.19.3+ds1-4/monitor/commands/reload.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/monitor/commands/reload.go	2025-03-17 16:14:25.000000000 +0000
@@ -66,7 +66,7 @@ func (cm *ReloadCmd) Exec(ctx context.Co
 	if err != nil {
 		var be *controllererrors.BuildError
 		if errors.As(err, &be) {
-			ref = be.Ref
+			ref = be.SessionID
 			resultUpdated = true
 		} else {
 			fmt.Printf("failed to reload: %v\n", err)
diff -pruN 0.19.3+ds1-4/tests/bake.go 0.21.3-0ubuntu1/tests/bake.go
--- 0.19.3+ds1-4/tests/bake.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/tests/bake.go	2025-03-17 16:14:25.000000000 +0000
@@ -2,7 +2,11 @@ package tests
 
 import (
 	"bytes"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
 	"encoding/json"
+	"encoding/pem"
 	"fmt"
 	"os"
 	"path/filepath"
@@ -33,6 +37,8 @@ func bakeCmd(sb integration.Sandbox, opt
 
 var bakeTests = []func(t *testing.T, sb integration.Sandbox){
 	testBakePrint,
+	testBakePrintSensitive,
+	testBakePrintOverrideEmpty,
 	testBakeLocal,
 	testBakeLocalMulti,
 	testBakeRemote,
@@ -55,6 +61,8 @@ var bakeTests = []func(t *testing.T, sb
 	testBakeDefinitionExistingOutsideNoParallel,
 	testBakeDefinitionSymlinkOutsideNoParallel,
 	testBakeDefinitionSymlinkOutsideGrantedNoParallel,
+	testBakeSSHPathNoParallel,
+	testBakeSSHDefaultNoParallel,
 	testBakeShmSize,
 	testBakeUlimits,
 	testBakeMetadataProvenance,
@@ -86,7 +94,8 @@ target "build" {
     HELLO = "foo"
   }
 }
-`)},
+`),
+		},
 		{
 			"Compose",
 			"compose.yml",
@@ -97,7 +106,8 @@ services:
       context: .
       args:
         HELLO: foo
-`)},
+`),
+		},
 	}
 
 	for _, tc := range testCases {
@@ -158,6 +168,166 @@ RUN echo "Hello ${HELLO}"
 	}
 }
 
+func testBakePrintSensitive(t *testing.T, sb integration.Sandbox) {
+	testCases := []struct {
+		name string
+		f    string
+		dt   []byte
+	}{
+		{
+			"HCL",
+			"docker-bake.hcl",
+			[]byte(`
+target "build" {
+  args = {
+    HELLO = "foo"
+  }
+
+  cache-from = [
+    "type=gha,token=abc",
+    "type=s3,region=us-west-2,bucket=my_bucket,name=my_image",
+  ]
+}
+`),
+		},
+		{
+			"Compose",
+			"compose.yml",
+			[]byte(`
+services:
+  build:
+    build:
+      context: .
+      args:
+        HELLO: foo
+      cache_from:
+        - type=gha,token=abc
+        - type=s3,region=us-west-2,bucket=my_bucket,name=my_image
+`),
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			dir := tmpdir(
+				t,
+				fstest.CreateFile(tc.f, tc.dt, 0600),
+				fstest.CreateFile("Dockerfile", []byte(`
+FROM busybox
+ARG HELLO
+RUN echo "Hello ${HELLO}"
+	`), 0600),
+			)
+
+			cmd := buildxCmd(sb, withDir(dir), withArgs("bake", "--print", "build"),
+				withEnv(
+					"ACTIONS_RUNTIME_TOKEN=sensitive_token",
+					"ACTIONS_CACHE_URL=https://cache.github.com",
+					"AWS_ACCESS_KEY_ID=definitely_dont_look_here",
+					"AWS_SECRET_ACCESS_KEY=hackers_please_dont_steal",
+					"AWS_SESSION_TOKEN=not_a_mitm_attack",
+				),
+			)
+			stdout := bytes.Buffer{}
+			stderr := bytes.Buffer{}
+			cmd.Stdout = &stdout
+			cmd.Stderr = &stderr
+			require.NoError(t, cmd.Run(), stdout.String(), stderr.String())
+
+			var def struct {
+				Group  map[string]*bake.Group  `json:"group,omitempty"`
+				Target map[string]*bake.Target `json:"target"`
+			}
+			require.NoError(t, json.Unmarshal(stdout.Bytes(), &def))
+
+			require.Len(t, def.Group, 1)
+			require.Contains(t, def.Group, "default")
+
+			require.Equal(t, []string{"build"}, def.Group["default"].Targets)
+			require.Len(t, def.Target, 1)
+			require.Contains(t, def.Target, "build")
+			require.Equal(t, ".", *def.Target["build"].Context)
+			require.Equal(t, "Dockerfile", *def.Target["build"].Dockerfile)
+			require.Equal(t, map[string]*string{"HELLO": ptrstr("foo")}, def.Target["build"].Args)
+			require.NotNil(t, def.Target["build"].CacheFrom)
+			require.Len(t, def.Target["build"].CacheFrom, 2)
+
+			require.JSONEq(t, `{
+  "group": {
+    "default": {
+      "targets": [
+        "build"
+      ]
+    }
+  },
+  "target": {
+    "build": {
+      "context": ".",
+      "dockerfile": "Dockerfile",
+      "args": {
+        "HELLO": "foo"
+      },
+      "cache-from": [
+        {
+          "type": "gha",
+          "token": "abc"
+        },
+        {
+          "type": "s3",
+          "region": "us-west-2",
+          "bucket": "my_bucket",
+          "name": "my_image"
+        }
+      ]
+    }
+  }
+}
+`, stdout.String())
+		})
+	}
+}
+
+func testBakePrintOverrideEmpty(t *testing.T, sb integration.Sandbox) {
+	dockerfile := []byte(`
+FROM scratch
+COPY foo /foo
+	`)
+	bakefile := []byte(`
+target "default" {
+	cache-to = ["type=gha,mode=min,scope=integration-tests"]
+}
+`)
+	dir := tmpdir(
+		t,
+		fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
+		fstest.CreateFile("Dockerfile", dockerfile, 0600),
+		fstest.CreateFile("foo", []byte("foo"), 0600),
+	)
+
+	cmd := buildxCmd(sb, withDir(dir), withArgs("bake", "--print", "--set", "*.cache-to="))
+	stdout := bytes.Buffer{}
+	stderr := bytes.Buffer{}
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+	require.NoError(t, cmd.Run(), stdout.String(), stderr.String())
+
+	require.JSONEq(t, `{
+	"group": {
+		"default": {
+			"targets": [
+				"default"
+			]
+		}
+	},
+	"target": {
+		"default": {
+			"context": ".",
+			"dockerfile": "Dockerfile"
+		}
+	}
+}`, stdout.String())
+}
+
 func testBakeLocal(t *testing.T, sb integration.Sandbox) {
 	dockerfile := []byte(`
 FROM scratch
@@ -743,6 +913,7 @@ target "default" {
 		})
 	}
 }
+
 func testBakeSetNonExistingOutsideNoParallel(t *testing.T, sb integration.Sandbox) {
 	for _, ent := range []bool{true, false} {
 		t.Run(fmt.Sprintf("ent=%v", ent), func(t *testing.T) {
@@ -969,6 +1140,95 @@ target "default" {
 	}
 }
 
+func testBakeSSHPathNoParallel(t *testing.T, sb integration.Sandbox) {
+	for _, ent := range []bool{true, false} {
+		t.Run(fmt.Sprintf("ent=%v", ent), func(t *testing.T) {
+			t.Setenv("BUILDX_BAKE_ENTITLEMENTS_FS", strconv.FormatBool(ent))
+			dockerfile := []byte(`
+FROM scratch
+COPY Dockerfile /foo
+	`)
+			keyDir := t.TempDir()
+			err := writeTempPrivateKey(filepath.Join(keyDir, "id_rsa"))
+			require.NoError(t, err)
+			bakefile := []byte(fmt.Sprintf(`
+target "default" {
+	ssh = ["key=%s"]
+}
+`, filepath.Join(keyDir, "id_rsa")))
+			dir := tmpdir(
+				t,
+				fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
+				fstest.CreateFile("Dockerfile", dockerfile, 0600),
+			)
+
+			// not allowed
+			cmd := buildxCmd(sb, withDir(dir), withArgs("bake", "--progress=plain"))
+			out, err := cmd.CombinedOutput()
+			if ent {
+				require.Error(t, err, string(out))
+				require.Contains(t, string(out), "ERROR: additional privileges requested")
+				require.Contains(t, string(out), "Read access to path")
+				require.Contains(t, string(out), "/id_rsa")
+			} else {
+				require.NoError(t, err, string(out))
+			}
+
+			// directory allowed
+			cmd = buildxCmd(sb, withDir(dir), withArgs("bake", "--progress=plain", "--allow", "fs.read="+keyDir))
+			out, err = cmd.CombinedOutput()
+			require.NoError(t, err, string(out))
+
+			// file allowed
+			cmd = buildxCmd(sb, withDir(dir), withArgs("bake", "--progress=plain", "--allow", "fs.read="+filepath.Join(keyDir, "id_rsa")))
+			out, err = cmd.CombinedOutput()
+			require.NoError(t, err, string(out))
+		})
+	}
+}
+
+func testBakeSSHDefaultNoParallel(t *testing.T, sb integration.Sandbox) {
+	for _, ent := range []bool{true, false} {
+		t.Run(fmt.Sprintf("ent=%v", ent), func(t *testing.T) {
+			t.Setenv("BUILDX_BAKE_ENTITLEMENTS_FS", strconv.FormatBool(ent))
+			dockerfile := []byte(`
+FROM scratch
+COPY Dockerfile /foo
+	`)
+			keyDir := t.TempDir()
+			// not a socket but key behaves the same and doesn't create parse error
+			err := writeTempPrivateKey(filepath.Join(keyDir, "ssh-agent.sock"))
+			require.NoError(t, err)
+			t.Setenv("SSH_AUTH_SOCK", filepath.Join(keyDir, "ssh-agent.sock"))
+			bakefile := []byte(`
+target "default" {
+	ssh = ["default"]
+}
+`)
+			dir := tmpdir(
+				t,
+				fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
+				fstest.CreateFile("Dockerfile", dockerfile, 0600),
+			)
+
+			// not allowed
+			cmd := buildxCmd(sb, withDir(dir), withArgs("bake", "--progress=plain"))
+			out, err := cmd.CombinedOutput()
+			if ent {
+				require.Error(t, err, string(out))
+				require.Contains(t, string(out), "ERROR: additional privileges requested")
+				require.Contains(t, string(out), "Forwarding default SSH agent socket")
+			} else {
+				require.NoError(t, err, string(out))
+			}
+
+			cmd = buildxCmd(sb, withDir(dir), withArgs("bake", "--progress=plain", "--allow=ssh"))
+			out, err = cmd.CombinedOutput()
+			require.NoError(t, err, string(out))
+		})
+	}
+}
+
 func testBakeUlimits(t *testing.T, sb integration.Sandbox) {
 	dockerfile := []byte(`
 FROM busybox AS build
@@ -1382,7 +1642,7 @@ target "abc" {
 	out, err := bakeCmd(
 		sb,
 		withDir(dir),
-		withArgs("--list-targets"),
+		withArgs("--list=targets"),
 	)
 	require.NoError(t, err, out)
 
@@ -1411,7 +1671,7 @@ target "default" {
 	out, err := bakeCmd(
 		sb,
 		withDir(dir),
-		withArgs("--list-variables"),
+		withArgs("--list=variables"),
 	)
 	require.NoError(t, err, out)
 
@@ -1752,3 +2012,15 @@ target "third" {
 		require.Contains(t, stdout.String(), dockerfilePathThird+":3")
 	})
 }
+
+func writeTempPrivateKey(fp string) error {
+	privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+	if err != nil {
+		return err
+	}
+	privateKeyPEM := pem.EncodeToMemory(&pem.Block{
+		Type:  "RSA PRIVATE KEY",
+		Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
+	})
+	return os.WriteFile(fp, privateKeyPEM, 0600)
+}
diff -pruN 0.19.3+ds1-4/tests/imagetools.go 0.21.3-0ubuntu1/tests/imagetools.go
--- 0.19.3+ds1-4/tests/imagetools.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/tests/imagetools.go	2025-03-17 16:14:25.000000000 +0000
@@ -5,7 +5,7 @@ import (
 	"os/exec"
 	"testing"
 
-	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/v2/core/images"
 	"github.com/containerd/continuity/fs/fstest"
 	"github.com/containerd/platforms"
 	"github.com/moby/buildkit/util/testutil/integration"
diff -pruN 0.19.3+ds1-4/tests/workers/backend.go 0.21.3-0ubuntu1/tests/workers/backend.go
--- 0.19.3+ds1-4/tests/workers/backend.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/tests/workers/backend.go	2025-03-17 16:14:25.000000000 +0000
@@ -19,6 +19,10 @@ func (s *backend) Address() string {
 	return s.builder
 }
 
+func (s *backend) DebugAddress() string {
+	return ""
+}
+
 func (s *backend) DockerAddress() string {
 	return s.context
 }
diff -pruN 0.19.3+ds1-4/util/buildflags/attests.go 0.21.3-0ubuntu1/util/buildflags/attests.go
--- 0.19.3+ds1-4/util/buildflags/attests.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/attests.go	2025-03-17 16:14:25.000000000 +0000
@@ -1,7 +1,9 @@
 package buildflags
 
 import (
+	"encoding/json"
 	"fmt"
+	"maps"
 	"strconv"
 	"strings"
 
@@ -10,6 +12,166 @@ import (
 	"github.com/tonistiigi/go-csvvalue"
 )
 
+type Attests []*Attest
+
+func (a Attests) Merge(other Attests) Attests {
+	if other == nil {
+		a.Normalize()
+		return a
+	} else if a == nil {
+		other.Normalize()
+		return other
+	}
+
+	return append(a, other...).Normalize()
+}
+
+func (a Attests) Normalize() Attests {
+	if len(a) == 0 {
+		return nil
+	}
+	return removeAttestDupes(a)
+}
+
+func (a Attests) ToPB() []*controllerapi.Attest {
+	if len(a) == 0 {
+		return nil
+	}
+
+	entries := make([]*controllerapi.Attest, len(a))
+	for i, entry := range a {
+		entries[i] = entry.ToPB()
+	}
+	return entries
+}
+
+type Attest struct {
+	Type     string            `json:"type"`
+	Disabled bool              `json:"disabled,omitempty"`
+	Attrs    map[string]string `json:"attrs,omitempty"`
+}
+
+func (a *Attest) Equal(other *Attest) bool {
+	if a.Type != other.Type || a.Disabled != other.Disabled {
+		return false
+	}
+	return maps.Equal(a.Attrs, other.Attrs)
+}
+
+func (a *Attest) String() string {
+	var b csvBuilder
+	if a.Type != "" {
+		b.Write("type", a.Type)
+	}
+	if a.Disabled {
+		b.Write("disabled", "true")
+	}
+	if len(a.Attrs) > 0 {
+		b.WriteAttributes(a.Attrs)
+	}
+	return b.String()
+}
+
+func (a *Attest) ToPB() *controllerapi.Attest {
+	var b csvBuilder
+	if a.Type != "" {
+		b.Write("type", a.Type)
+	}
+	if a.Disabled {
+		b.Write("disabled", "true")
+	}
+	b.WriteAttributes(a.Attrs)
+
+	return &controllerapi.Attest{
+		Type:     a.Type,
+		Disabled: a.Disabled,
+		Attrs:    b.String(),
+	}
+}
+
+func (a *Attest) MarshalJSON() ([]byte, error) {
+	m := make(map[string]interface{}, len(a.Attrs)+2)
+	for k, v := range a.Attrs {
+		m[k] = v
+	}
+	m["type"] = a.Type
+	if a.Disabled {
+		m["disabled"] = true
+	}
+	return json.Marshal(m)
+}
+
+func (a *Attest) UnmarshalJSON(data []byte) error {
+	var m map[string]interface{}
+	if err := json.Unmarshal(data, &m); err != nil {
+		return err
+	}
+
+	if typ, ok := m["type"]; ok {
+		a.Type, ok = typ.(string)
+		if !ok {
+			return errors.Errorf("attest type must be a string")
+		}
+		delete(m, "type")
+	}
+
+	if disabled, ok := m["disabled"]; ok {
+		a.Disabled, ok = disabled.(bool)
+		if !ok {
+			return errors.Errorf("attest disabled attribute must be a boolean")
+		}
+		delete(m, "disabled")
+	}
+
+	attrs := make(map[string]string, len(m))
+	for k, v := range m {
+		s, ok := v.(string)
+		if !ok {
+			return errors.Errorf("attest attribute %q must be a string", k)
+		}
+		attrs[k] = s
+	}
+	a.Attrs = attrs
+	return nil
+}
+
+func (a *Attest) UnmarshalText(text []byte) error {
+	in := string(text)
+	fields, err := csvvalue.Fields(in, nil)
+	if err != nil {
+		return err
+	}
+
+	a.Attrs = map[string]string{}
+	for _, field := range fields {
+		key, value, ok := strings.Cut(field, "=")
+		if !ok {
+			return errors.Errorf("invalid value %s", field)
+		}
+
+		switch strings.TrimSpace(strings.ToLower(key)) {
+		case "type":
+			a.Type = value
+		case "disabled":
+			disabled, err := strconv.ParseBool(value)
+			if err != nil {
+				return errors.Wrapf(err, "invalid value %s", field)
+			}
+			a.Disabled = disabled
+		default:
+			a.Attrs[key] = value
+		}
+	}
+	return a.validate()
+}
+
+func (a *Attest) validate() error {
+	if a.Type == "" {
+		return errors.Errorf("attestation type not specified")
+	}
+	return nil
+}
+
 func CanonicalizeAttest(attestType string, in string) string {
 	if in == "" {
 		return ""
@@ -21,21 +183,34 @@ func CanonicalizeAttest(attestType strin
 }
 
 func ParseAttests(in []string) ([]*controllerapi.Attest, error) {
-	out := []*controllerapi.Attest{}
-	found := map[string]struct{}{}
-	for _, in := range in {
-		in := in
-		attest, err := ParseAttest(in)
-		if err != nil {
+	var outs []*Attest
+	for _, s := range in {
+		var out Attest
+		if err := out.UnmarshalText([]byte(s)); err != nil {
 			return nil, err
 		}
+		outs = append(outs, &out)
+	}
+	return ConvertAttests(outs)
+}
+
+// ConvertAttests converts Attestations for the controller API from
+// the ones in this package.
+//
+// Attestations of the same type will cause an error. Some tools,
+// like bake, remove the duplicates before calling this function.
+func ConvertAttests(in []*Attest) ([]*controllerapi.Attest, error) {
+	out := make([]*controllerapi.Attest, 0, len(in))
 
+	// Check for dupplicate attestations while we convert them
+	// to the controller API.
+	found := map[string]struct{}{}
+	for _, attest := range in {
 		if _, ok := found[attest.Type]; ok {
 			return nil, errors.Errorf("duplicate attestation field %s", attest.Type)
 		}
 		found[attest.Type] = struct{}{}
-
-		out = append(out, attest)
+		out = append(out, attest.ToPB())
 	}
 	return out, nil
 }
@@ -77,3 +252,17 @@ func ParseAttest(in string) (*controller
 
 	return &attest, nil
 }
+
+func removeAttestDupes(s []*Attest) []*Attest {
+	res := []*Attest{}
+	m := map[string]int{}
+	for _, att := range s {
+		if i, ok := m[att.Type]; ok {
+			res[i] = att
+		} else {
+			m[att.Type] = len(res)
+			res = append(res, att)
+		}
+	}
+	return res
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/attests_cty.go 0.21.3-0ubuntu1/util/buildflags/attests_cty.go
--- 0.19.3+ds1-4/util/buildflags/attests_cty.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/attests_cty.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,102 @@
+package buildflags
+
+import (
+	"strconv"
+	"sync"
+
+	"github.com/zclconf/go-cty/cty"
+	"github.com/zclconf/go-cty/cty/convert"
+)
+
+var attestType = sync.OnceValue(func() cty.Type {
+	return cty.Map(cty.String)
+})
+
+func (e *Attests) FromCtyValue(in cty.Value, p cty.Path) error {
+	got := in.Type()
+	if got.IsTupleType() || got.IsListType() {
+		return e.fromCtyValue(in, p)
+	}
+
+	want := cty.List(attestType())
+	return p.NewErrorf("%s", convert.MismatchMessage(got, want))
+}
+
+func (e *Attests) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
+	*e = make([]*Attest, 0, in.LengthInt())
+
+	yield := func(value cty.Value) bool {
+		entry := &Attest{}
+		if retErr = entry.FromCtyValue(value, p); retErr != nil {
+			return false
+		}
+		*e = append(*e, entry)
+		return true
+	}
+	eachElement(in)(yield)
+	return retErr
+}
+
+func (e Attests) ToCtyValue() cty.Value {
+	if len(e) == 0 {
+		return cty.ListValEmpty(attestType())
+	}
+
+	vals := make([]cty.Value, len(e))
+	for i, entry := range e {
+		vals[i] = entry.ToCtyValue()
+	}
+	return cty.ListVal(vals)
+}
+
+func (e *Attest) FromCtyValue(in cty.Value, p cty.Path) error {
+	if in.Type() == cty.String {
+		if err := e.UnmarshalText([]byte(in.AsString())); err != nil {
+			return p.NewError(err)
+		}
+		return nil
+	}
+
+	conv, err := convert.Convert(in, cty.Map(cty.String))
+	if err != nil {
+		return err
+	}
+
+	e.Attrs = map[string]string{}
+	for it := conv.ElementIterator(); it.Next(); {
+		k, v := it.Element()
+		if !v.IsKnown() {
+			continue
+		}
+
+		switch key := k.AsString(); key {
+		case "type":
+			e.Type = v.AsString()
+		case "disabled":
+			b, err := strconv.ParseBool(v.AsString())
+			if err != nil {
+				return err
+			}
+			e.Disabled = b
+		default:
+			e.Attrs[key] = v.AsString()
+		}
+	}
+	return nil
+}
+
+func (e *Attest) ToCtyValue() cty.Value {
+	if e == nil {
+		return cty.NullVal(cty.Map(cty.String))
+	}
+
+	vals := make(map[string]cty.Value, len(e.Attrs)+2)
+	for k, v := range e.Attrs {
+		vals[k] = cty.StringVal(v)
+	}
+	vals["type"] = cty.StringVal(e.Type)
+	if e.Disabled {
+		vals["disabled"] = cty.StringVal("true")
+	}
+	return cty.MapVal(vals)
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/attests_test.go 0.21.3-0ubuntu1/util/buildflags/attests_test.go
--- 0.19.3+ds1-4/util/buildflags/attests_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/attests_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,117 @@
+package buildflags
+
+import (
+	"encoding/json"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"github.com/zclconf/go-cty/cty"
+)
+
+func TestAttests(t *testing.T) {
+	t.Run("MarshalJSON", func(t *testing.T) {
+		attests := Attests{
+			{Type: "provenance", Attrs: map[string]string{"mode": "max"}},
+			{Type: "sbom", Disabled: true},
+			{Type: "sbom", Attrs: map[string]string{
+				"generator": "scanner",
+				"ENV1":      `"foo,bar"`,
+				"Env2":      "hello",
+			}},
+		}
+
+		expected := `[{"type":"provenance","mode":"max"},{"type":"sbom","disabled":true},{"ENV1":"\"foo,bar\"","Env2":"hello","generator":"scanner","type":"sbom"}]`
+		actual, err := json.Marshal(attests)
+		require.NoError(t, err)
+		require.JSONEq(t, expected, string(actual))
+	})
+
+	t.Run("UnmarshalJSON", func(t *testing.T) {
+		in := `[{"type":"provenance","mode":"max"},{"type":"sbom","disabled":true},{"ENV1":"\"foo,bar\"","Env2":"hello","generator":"scanner","type":"sbom"}]`
+
+		var actual Attests
+		err := json.Unmarshal([]byte(in), &actual)
+		require.NoError(t, err)
+
+		expected := Attests{
+			{Type: "provenance", Attrs: map[string]string{"mode": "max"}},
+			{Type: "sbom", Disabled: true, Attrs: map[string]string{}},
+			{Type: "sbom", Disabled: false, Attrs: map[string]string{
+				"generator": "scanner",
+				"ENV1":      `"foo,bar"`,
+				"Env2":      "hello",
+			}},
+		}
+		require.Equal(t, expected, actual)
+	})
+
+	t.Run("FromCtyValue", func(t *testing.T) {
+		in := cty.TupleVal([]cty.Value{
+			cty.ObjectVal(map[string]cty.Value{
+				"type": cty.StringVal("provenance"),
+				"mode": cty.StringVal("max"),
+			}),
+			cty.ObjectVal(map[string]cty.Value{
+				"type":      cty.StringVal("sbom"),
+				"generator": cty.StringVal("scan"),
+				"ENV1":      cty.StringVal(`foo,bar`),
+				"Env2":      cty.StringVal(`hello`),
+			}),
+			cty.StringVal("type=sbom,disabled=true"),
+			cty.StringVal(`type=sbom,generator=scan,"FOO=bar,baz",Hello=World`),
+		})
+
+		var actual Attests
+		err := actual.FromCtyValue(in, nil)
+		require.NoError(t, err)
+
+		expected := Attests{
+			{Type: "provenance", Attrs: map[string]string{"mode": "max"}},
+			{Type: "sbom", Attrs: map[string]string{
+				"generator": "scan",
+				"ENV1":      "foo,bar",
+				"Env2":      "hello",
+			}},
+			{Type: "sbom", Disabled: true, Attrs: map[string]string{}},
+			{Type: "sbom", Attrs: map[string]string{
+				"generator": "scan",
+				"FOO":       "bar,baz",
+				"Hello":     "World",
+			}},
+		}
+		require.Equal(t, expected, actual)
+	})
+
+	t.Run("ToCtyValue", func(t *testing.T) {
+		attests := Attests{
+			{Type: "provenance", Attrs: map[string]string{"mode": "max"}},
+			{Type: "sbom", Disabled: true},
+			{Type: "sbom", Attrs: map[string]string{
+				"generator": "scan",
+				"ENV1":      `"foo,bar"`,
+				"Env2":      "hello",
+			}},
+		}
+
+		actual := attests.ToCtyValue()
+		expected := cty.ListVal([]cty.Value{
+			cty.MapVal(map[string]cty.Value{
+				"type": cty.StringVal("provenance"),
+				"mode": cty.StringVal("max"),
+			}),
+			cty.MapVal(map[string]cty.Value{
+				"type":     cty.StringVal("sbom"),
+				"disabled": cty.StringVal("true"),
+			}),
+			cty.MapVal(map[string]cty.Value{
+				"type":      cty.StringVal("sbom"),
+				"generator": cty.StringVal("scan"),
+				"ENV1":      cty.StringVal(`"foo,bar"`),
+				"Env2":      cty.StringVal("hello"),
+			}),
+		})
+
+		result := actual.Equals(expected)
+		require.True(t, result.True())
+	})
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/cache.go 0.21.3-0ubuntu1/util/buildflags/cache.go
--- 0.19.3+ds1-4/util/buildflags/cache.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/cache.go	2025-03-17 16:14:25.000000000 +0000
@@ -2,88 +2,241 @@ package buildflags
 
 import (
 	"context"
+	"encoding/json"
+	"maps"
 	"os"
+	"strconv"
 	"strings"
 
 	awsconfig "github.com/aws/aws-sdk-go-v2/config"
 	controllerapi "github.com/docker/buildx/controller/pb"
 	"github.com/pkg/errors"
 	"github.com/tonistiigi/go-csvvalue"
+	"github.com/zclconf/go-cty/cty"
+	jsoncty "github.com/zclconf/go-cty/cty/json"
 )
 
-func ParseCacheEntry(in []string) ([]*controllerapi.CacheOptionsEntry, error) {
-	outs := make([]*controllerapi.CacheOptionsEntry, 0, len(in))
-	for _, in := range in {
-		if in == "" {
+type CacheOptions []*CacheOptionsEntry
+
+func (o CacheOptions) Merge(other CacheOptions) CacheOptions {
+	if other == nil {
+		return o.Normalize()
+	} else if o == nil {
+		return other.Normalize()
+	}
+
+	return append(o, other...).Normalize()
+}
+
+func (o CacheOptions) Normalize() CacheOptions {
+	if len(o) == 0 {
+		return nil
+	}
+	return removeDupes(o)
+}
+
+func (o CacheOptions) ToPB() []*controllerapi.CacheOptionsEntry {
+	if len(o) == 0 {
+		return nil
+	}
+
+	var outs []*controllerapi.CacheOptionsEntry
+	for _, entry := range o {
+		pb := entry.ToPB()
+		if !isActive(pb) {
 			continue
 		}
-		fields, err := csvvalue.Fields(in, nil)
-		if err != nil {
-			return nil, err
+		outs = append(outs, pb)
+	}
+	return outs
+}
+
+type CacheOptionsEntry struct {
+	Type  string            `json:"type"`
+	Attrs map[string]string `json:"attrs,omitempty"`
+}
+
+func (e *CacheOptionsEntry) Equal(other *CacheOptionsEntry) bool {
+	if e.Type != other.Type {
+		return false
+	}
+	return maps.Equal(e.Attrs, other.Attrs)
+}
+
+func (e *CacheOptionsEntry) String() string {
+	// Special registry syntax.
+	if e.Type == "registry" && len(e.Attrs) == 1 {
+		if ref, ok := e.Attrs["ref"]; ok {
+			return ref
 		}
-		if isRefOnlyFormat(fields) {
-			for _, field := range fields {
-				outs = append(outs, &controllerapi.CacheOptionsEntry{
-					Type:  "registry",
-					Attrs: map[string]string{"ref": field},
-				})
-			}
-			continue
+	}
+
+	var b csvBuilder
+	if e.Type != "" {
+		b.Write("type", e.Type)
+	}
+	if len(e.Attrs) > 0 {
+		b.WriteAttributes(e.Attrs)
+	}
+	return b.String()
+}
+
+func (e *CacheOptionsEntry) ToPB() *controllerapi.CacheOptionsEntry {
+	ci := &controllerapi.CacheOptionsEntry{
+		Type:  e.Type,
+		Attrs: maps.Clone(e.Attrs),
+	}
+	addGithubToken(ci)
+	addAwsCredentials(ci)
+	return ci
+}
+
+func (e *CacheOptionsEntry) MarshalJSON() ([]byte, error) {
+	m := maps.Clone(e.Attrs)
+	if m == nil {
+		m = map[string]string{}
+	}
+	m["type"] = e.Type
+	return json.Marshal(m)
+}
+
+func (e *CacheOptionsEntry) UnmarshalJSON(data []byte) error {
+	var m map[string]string
+	if err := json.Unmarshal(data, &m); err != nil {
+		return err
+	}
+
+	e.Type = m["type"]
+	delete(m, "type")
+
+	e.Attrs = m
+	return e.validate(data)
+}
+
+func (e *CacheOptionsEntry) UnmarshalText(text []byte) error {
+	in := string(text)
+	fields, err := csvvalue.Fields(in, nil)
+	if err != nil {
+		return err
+	}
+
+	if len(fields) == 1 && !strings.Contains(fields[0], "=") {
+		e.Type = "registry"
+		e.Attrs = map[string]string{"ref": fields[0]}
+		return nil
+	}
+
+	e.Type = ""
+	e.Attrs = map[string]string{}
+
+	for _, field := range fields {
+		parts := strings.SplitN(field, "=", 2)
+		if len(parts) != 2 {
+			return errors.Errorf("invalid value %s", field)
+		}
+		key := strings.ToLower(parts[0])
+		value := parts[1]
+		switch key {
+		case "type":
+			e.Type = value
+		default:
+			e.Attrs[key] = value
+		}
+	}
+
+	if e.Type == "" {
+		return errors.Errorf("type required form> %q", in)
+	}
+	return e.validate(text)
+}
+
+func (e *CacheOptionsEntry) validate(gv interface{}) error {
+	if e.Type == "" {
+		var text []byte
+		switch gv := gv.(type) {
+		case []byte:
+			text = gv
+		case string:
+			text = []byte(gv)
+		case cty.Value:
+			text, _ = jsoncty.Marshal(gv, gv.Type())
+		default:
+			text, _ = json.Marshal(gv)
 		}
+		return errors.Errorf("type required form> %q", string(text))
+	}
+	return nil
+}
 
-		out := controllerapi.CacheOptionsEntry{
-			Attrs: map[string]string{},
+func ParseCacheEntry(in []string) (CacheOptions, error) {
+	if len(in) == 0 {
+		return nil, nil
+	}
+
+	opts := make(CacheOptions, 0, len(in))
+	for _, in := range in {
+		if in == "" {
+			continue
 		}
-		for _, field := range fields {
-			parts := strings.SplitN(field, "=", 2)
-			if len(parts) != 2 {
-				return nil, errors.Errorf("invalid value %s", field)
+
+		if !strings.Contains(in, "=") {
+			// This is ref only format. Each field in the CSV is its own entry.
+			fields, err := csvvalue.Fields(in, nil)
+			if err != nil {
+				return nil, err
 			}
-			key := strings.ToLower(parts[0])
-			value := parts[1]
-			switch key {
-			case "type":
-				out.Type = value
-			default:
-				out.Attrs[key] = value
+
+			for _, field := range fields {
+				opt := CacheOptionsEntry{}
+				if err := opt.UnmarshalText([]byte(field)); err != nil {
+					return nil, err
+				}
+				opts = append(opts, &opt)
 			}
-		}
-		if out.Type == "" {
-			return nil, errors.Errorf("type required form> %q", in)
-		}
-		if !addGithubToken(&out) {
 			continue
 		}
-		addAwsCredentials(&out)
-		outs = append(outs, &out)
-	}
-	return outs, nil
-}
 
-func isRefOnlyFormat(in []string) bool {
-	for _, v := range in {
-		if strings.Contains(v, "=") {
-			return false
+		var out CacheOptionsEntry
+		if err := out.UnmarshalText([]byte(in)); err != nil {
+			return nil, err
 		}
+		opts = append(opts, &out)
 	}
-	return true
+	return opts, nil
 }
 
-func addGithubToken(ci *controllerapi.CacheOptionsEntry) bool {
+func addGithubToken(ci *controllerapi.CacheOptionsEntry) {
 	if ci.Type != "gha" {
-		return true
+		return
+	}
+	version, ok := ci.Attrs["version"]
+	if !ok {
+		// https://github.com/actions/toolkit/blob/2b08dc18f261b9fdd978b70279b85cbef81af8bc/packages/cache/src/internal/config.ts#L19
+		if v, ok := os.LookupEnv("ACTIONS_CACHE_SERVICE_V2"); ok {
+			if b, err := strconv.ParseBool(v); err == nil && b {
+				version = "2"
+			}
+		}
 	}
 	if _, ok := ci.Attrs["token"]; !ok {
 		if v, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN"); ok {
 			ci.Attrs["token"] = v
 		}
 	}
+	if _, ok := ci.Attrs["url_v2"]; !ok && version == "2" {
+		// https://github.com/actions/toolkit/blob/2b08dc18f261b9fdd978b70279b85cbef81af8bc/packages/cache/src/internal/config.ts#L34-L35
+		if v, ok := os.LookupEnv("ACTIONS_RESULTS_URL"); ok {
+			ci.Attrs["url_v2"] = v
+		}
+	}
 	if _, ok := ci.Attrs["url"]; !ok {
+		// https://github.com/actions/toolkit/blob/2b08dc18f261b9fdd978b70279b85cbef81af8bc/packages/cache/src/internal/config.ts#L28-L33
 		if v, ok := os.LookupEnv("ACTIONS_CACHE_URL"); ok {
 			ci.Attrs["url"] = v
+		} else if v, ok := os.LookupEnv("ACTIONS_RESULTS_URL"); ok {
+			ci.Attrs["url"] = v
 		}
 	}
-	return ci.Attrs["token"] != "" && ci.Attrs["url"] != ""
 }
 
 func addAwsCredentials(ci *controllerapi.CacheOptionsEntry) {
@@ -115,3 +268,11 @@ func addAwsCredentials(ci *controllerapi
 		ci.Attrs["session_token"] = credentials.SessionToken
 	}
 }
+
+func isActive(pb *controllerapi.CacheOptionsEntry) bool {
+	// Always active if not gha.
+	if pb.Type != "gha" {
+		return true
+	}
+	return pb.Attrs["token"] != "" && (pb.Attrs["url"] != "" || pb.Attrs["url_v2"] != "")
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/cache_cty.go 0.21.3-0ubuntu1/util/buildflags/cache_cty.go
--- 0.19.3+ds1-4/util/buildflags/cache_cty.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/cache_cty.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,87 @@
+package buildflags
+
+import (
+	"sync"
+
+	"github.com/zclconf/go-cty/cty"
+	"github.com/zclconf/go-cty/cty/convert"
+)
+
+var cacheOptionsEntryType = sync.OnceValue(func() cty.Type {
+	return cty.Map(cty.String)
+})
+
+func (o *CacheOptions) FromCtyValue(in cty.Value, p cty.Path) error {
+	got := in.Type()
+	if got.IsTupleType() || got.IsListType() {
+		return o.fromCtyValue(in, p)
+	}
+
+	want := cty.List(cacheOptionsEntryType())
+	return p.NewErrorf("%s", convert.MismatchMessage(got, want))
+}
+
+func (o *CacheOptions) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
+	*o = make([]*CacheOptionsEntry, 0, in.LengthInt())
+
+	yield := func(value cty.Value) bool {
+		// Special handling for a string type to handle ref only format.
+		if value.Type() == cty.String {
+			var entries CacheOptions
+			entries, retErr = ParseCacheEntry([]string{value.AsString()})
+			if retErr != nil {
+				return false
+			}
+			*o = append(*o, entries...)
+			return true
+		}
+
+		entry := &CacheOptionsEntry{}
+		if retErr = entry.FromCtyValue(value, p); retErr != nil {
+			return false
+		}
+		*o = append(*o, entry)
+		return true
+	}
+	eachElement(in)(yield)
+	return retErr
+}
+
+func (o CacheOptions) ToCtyValue() cty.Value {
+	if len(o) == 0 {
+		return cty.ListValEmpty(cacheOptionsEntryType())
+	}
+
+	vals := make([]cty.Value, len(o))
+	for i, entry := range o {
+		vals[i] = entry.ToCtyValue()
+	}
+	return cty.ListVal(vals)
+}
+
+func (o *CacheOptionsEntry) FromCtyValue(in cty.Value, p cty.Path) error {
+	conv, err := convert.Convert(in, cty.Map(cty.String))
+	if err != nil {
+		return err
+	}
+
+	m := conv.AsValueMap()
+	if err := getAndDelete(m, "type", &o.Type); err != nil {
+		return err
+	}
+	o.Attrs = asMap(m)
+	return o.validate(in)
+}
+
+func (o *CacheOptionsEntry) ToCtyValue() cty.Value {
+	if o == nil {
+		return cty.NullVal(cty.Map(cty.String))
+	}
+
+	vals := make(map[string]cty.Value, len(o.Attrs)+1)
+	for k, v := range o.Attrs {
+		vals[k] = cty.StringVal(v)
+	}
+	vals["type"] = cty.StringVal(o.Type)
+	return cty.MapVal(vals)
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/cache_test.go 0.21.3-0ubuntu1/util/buildflags/cache_test.go
--- 0.19.3+ds1-4/util/buildflags/cache_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/cache_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,120 @@
+package buildflags
+
+import (
+	"encoding/json"
+	"testing"
+
+	"github.com/docker/buildx/controller/pb"
+	"github.com/stretchr/testify/require"
+	"github.com/zclconf/go-cty/cty"
+)
+
+func TestCacheOptions_DerivedVars(t *testing.T) {
+	t.Setenv("ACTIONS_RUNTIME_TOKEN", "sensitive_token")
+	t.Setenv("ACTIONS_CACHE_URL", "https://cache.github.com")
+	t.Setenv("AWS_ACCESS_KEY_ID", "definitely_dont_look_here")
+	t.Setenv("AWS_SECRET_ACCESS_KEY", "hackers_please_dont_steal")
+	t.Setenv("AWS_SESSION_TOKEN", "not_a_mitm_attack")
+
+	cacheFrom, err := ParseCacheEntry([]string{"type=gha", "type=s3,region=us-west-2,bucket=my_bucket,name=my_image"})
+	require.NoError(t, err)
+	require.Equal(t, []*pb.CacheOptionsEntry{
+		{
+			Type: "gha",
+			Attrs: map[string]string{
+				"token": "sensitive_token",
+				"url":   "https://cache.github.com",
+			},
+		},
+		{
+			Type: "s3",
+			Attrs: map[string]string{
+				"region":            "us-west-2",
+				"bucket":            "my_bucket",
+				"name":              "my_image",
+				"access_key_id":     "definitely_dont_look_here",
+				"secret_access_key": "hackers_please_dont_steal",
+				"session_token":     "not_a_mitm_attack",
+			},
+		},
+	}, cacheFrom.ToPB())
+}
+
+func TestCacheOptions(t *testing.T) {
+	t.Run("MarshalJSON", func(t *testing.T) {
+		cache := CacheOptions{
+			{Type: "registry", Attrs: map[string]string{"ref": "user/app:cache"}},
+			{Type: "local", Attrs: map[string]string{"src": "path/to/cache"}},
+		}
+
+		expected := `[{"type":"registry","ref":"user/app:cache"},{"type":"local","src":"path/to/cache"}]`
+		actual, err := json.Marshal(cache)
+		require.NoError(t, err)
+		require.JSONEq(t, expected, string(actual))
+	})
+
+	t.Run("UnmarshalJSON", func(t *testing.T) {
+		in := `[{"type":"registry","ref":"user/app:cache"},{"type":"local","src":"path/to/cache"}]`
+
+		var actual CacheOptions
+		err := json.Unmarshal([]byte(in), &actual)
+		require.NoError(t, err)
+
+		expected := CacheOptions{
+			{Type: "registry", Attrs: map[string]string{"ref": "user/app:cache"}},
+			{Type: "local", Attrs: map[string]string{"src": "path/to/cache"}},
+		}
+		require.Equal(t, expected, actual)
+	})
+
+	t.Run("FromCtyValue", func(t *testing.T) {
+		in := cty.TupleVal([]cty.Value{
+			cty.ObjectVal(map[string]cty.Value{
+				"type": cty.StringVal("registry"),
+				"ref":  cty.StringVal("user/app:cache"),
+			}),
+			cty.StringVal("type=local,src=path/to/cache"),
+		})
+
+		var actual CacheOptions
+		err := actual.FromCtyValue(in, nil)
+		require.NoError(t, err)
+
+		expected := CacheOptions{
+			{Type: "registry", Attrs: map[string]string{"ref": "user/app:cache"}},
+			{Type: "local", Attrs: map[string]string{"src": "path/to/cache"}},
+		}
+		require.Equal(t, expected, actual)
+	})
+
+	t.Run("ToCtyValue", func(t *testing.T) {
+		attests := CacheOptions{
+			{Type: "registry", Attrs: map[string]string{"ref": "user/app:cache"}},
+			{Type: "local", Attrs: map[string]string{"src": "path/to/cache"}},
+		}
+
+		actual := attests.ToCtyValue()
+		expected := cty.ListVal([]cty.Value{
+			cty.MapVal(map[string]cty.Value{
+				"type": cty.StringVal("registry"),
+				"ref":  cty.StringVal("user/app:cache"),
+			}),
+			cty.MapVal(map[string]cty.Value{
+				"type": cty.StringVal("local"),
+				"src":  cty.StringVal("path/to/cache"),
+			}),
+		})
+
+		result := actual.Equals(expected)
+		require.True(t, result.True())
+	})
+}
+
+func TestCacheOptions_RefOnlyFormat(t *testing.T) {
+	opts, err := ParseCacheEntry([]string{"ref1", "ref2"})
+	require.NoError(t, err)
+	require.Equal(t, CacheOptions{
+		{Type: "registry", Attrs: map[string]string{"ref": "ref1"}},
+		{Type: "registry", Attrs: map[string]string{"ref": "ref2"}},
+	}, opts)
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/context.go 0.21.3-0ubuntu1/util/buildflags/context.go
--- 0.19.3+ds1-4/util/buildflags/context.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/context.go	2025-03-17 16:14:25.000000000 +0000
@@ -11,8 +11,13 @@ func ParseContextNames(values []string)
 	if len(values) == 0 {
 		return nil, nil
 	}
+
 	result := make(map[string]string, len(values))
 	for _, value := range values {
+		if value == "" {
+			continue
+		}
+
 		kv := strings.SplitN(value, "=", 2)
 		if len(kv) != 2 {
 			return nil, errors.Errorf("invalid context value: %s, expected key=value", value)
diff -pruN 0.19.3+ds1-4/util/buildflags/entitlements.go 0.21.3-0ubuntu1/util/buildflags/entitlements.go
--- 0.19.3+ds1-4/util/buildflags/entitlements.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/entitlements.go	2025-03-17 16:14:25.000000000 +0000
@@ -1,15 +1,20 @@
 package buildflags
 
-import "github.com/moby/buildkit/util/entitlements"
+import (
+	"github.com/moby/buildkit/util/entitlements"
+)
 
-func ParseEntitlements(in []string) ([]entitlements.Entitlement, error) {
-	out := make([]entitlements.Entitlement, 0, len(in))
+func ParseEntitlements(in []string) ([]string, error) {
+	out := make([]string, 0, len(in))
 	for _, v := range in {
-		e, err := entitlements.Parse(v)
-		if err != nil {
+		if v == "" {
+			continue
+		}
+
+		if _, _, err := entitlements.Parse(v); err != nil {
 			return nil, err
 		}
-		out = append(out, e)
+		out = append(out, v)
 	}
 	return out, nil
 }
diff -pruN 0.19.3+ds1-4/util/buildflags/export.go 0.21.3-0ubuntu1/util/buildflags/export.go
--- 0.19.3+ds1-4/util/buildflags/export.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/export.go	2025-03-17 16:14:25.000000000 +0000
@@ -1,7 +1,11 @@
 package buildflags
 
 import (
+	"encoding/csv"
+	"encoding/json"
+	"maps"
 	"regexp"
+	"sort"
 	"strings"
 
 	"github.com/containerd/platforms"
@@ -13,72 +17,171 @@ import (
 	"github.com/tonistiigi/go-csvvalue"
 )
 
-func ParseExports(inp []string) ([]*controllerapi.ExportEntry, error) {
-	var outs []*controllerapi.ExportEntry
-	if len(inp) == 0 {
-		return nil, nil
+type Exports []*ExportEntry
+
+func (e Exports) Merge(other Exports) Exports {
+	if other == nil {
+		e.Normalize()
+		return e
+	} else if e == nil {
+		other.Normalize()
+		return other
 	}
-	for _, s := range inp {
-		if s == "" {
-			continue
-		}
-		fields, err := csvvalue.Fields(s, nil)
-		if err != nil {
-			return nil, err
-		}
 
-		out := controllerapi.ExportEntry{
-			Attrs: map[string]string{},
-		}
-		if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") {
-			if s != "-" {
-				outs = append(outs, &controllerapi.ExportEntry{
-					Type:        client.ExporterLocal,
-					Destination: s,
-				})
-				continue
-			}
-			out = controllerapi.ExportEntry{
-				Type:        client.ExporterTar,
-				Destination: s,
-			}
-		}
+	return append(e, other...).Normalize()
+}
 
-		if out.Type == "" {
-			for _, field := range fields {
-				parts := strings.SplitN(field, "=", 2)
-				if len(parts) != 2 {
-					return nil, errors.Errorf("invalid value %s", field)
-				}
-				key := strings.TrimSpace(strings.ToLower(parts[0]))
-				value := parts[1]
-				switch key {
-				case "type":
-					out.Type = value
-				default:
-					out.Attrs[key] = value
-				}
-			}
-		}
-		if out.Type == "" {
-			return nil, errors.Errorf("type is required for output")
+func (e Exports) Normalize() Exports {
+	if len(e) == 0 {
+		return nil
+	}
+	return removeDupes(e)
+}
+
+func (e Exports) ToPB() []*controllerapi.ExportEntry {
+	if len(e) == 0 {
+		return nil
+	}
+
+	entries := make([]*controllerapi.ExportEntry, len(e))
+	for i, entry := range e {
+		entries[i] = entry.ToPB()
+	}
+	return entries
+}
+
+type ExportEntry struct {
+	Type        string            `json:"type"`
+	Attrs       map[string]string `json:"attrs,omitempty"`
+	Destination string            `json:"dest,omitempty"`
+}
+
+func (e *ExportEntry) Equal(other *ExportEntry) bool {
+	if e.Type != other.Type || e.Destination != other.Destination {
+		return false
+	}
+	return maps.Equal(e.Attrs, other.Attrs)
+}
+
+func (e *ExportEntry) String() string {
+	var b csvBuilder
+	if e.Type != "" {
+		b.Write("type", e.Type)
+	}
+	if e.Destination != "" {
+		b.Write("dest", e.Destination)
+	}
+	if len(e.Attrs) > 0 {
+		b.WriteAttributes(e.Attrs)
+	}
+	return b.String()
+}
+
+func (e *ExportEntry) ToPB() *controllerapi.ExportEntry {
+	return &controllerapi.ExportEntry{
+		Type:        e.Type,
+		Attrs:       maps.Clone(e.Attrs),
+		Destination: e.Destination,
+	}
+}
+
+func (e *ExportEntry) MarshalJSON() ([]byte, error) {
+	m := maps.Clone(e.Attrs)
+	if m == nil {
+		m = map[string]string{}
+	}
+	m["type"] = e.Type
+	if e.Destination != "" {
+		m["dest"] = e.Destination
+	}
+	return json.Marshal(m)
+}
+
+func (e *ExportEntry) UnmarshalJSON(data []byte) error {
+	var m map[string]string
+	if err := json.Unmarshal(data, &m); err != nil {
+		return err
+	}
+
+	e.Type = m["type"]
+	delete(m, "type")
+
+	e.Destination = m["dest"]
+	delete(m, "dest")
+
+	e.Attrs = m
+	return e.validate()
+}
+
+func (e *ExportEntry) UnmarshalText(text []byte) error {
+	s := string(text)
+	fields, err := csvvalue.Fields(s, nil)
+	if err != nil {
+		return err
+	}
+
+	// Clear the target entry.
+	e.Type = ""
+	e.Attrs = map[string]string{}
+	e.Destination = ""
+
+	if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") {
+		if s != "-" {
+			e.Type = client.ExporterLocal
+			e.Destination = s
+			return nil
 		}
 
-		if out.Type == "registry" {
-			out.Type = client.ExporterImage
-			if _, ok := out.Attrs["push"]; !ok {
-				out.Attrs["push"] = "true"
+		e.Type = client.ExporterTar
+		e.Destination = s
+	}
+
+	if e.Type == "" {
+		for _, field := range fields {
+			parts := strings.SplitN(field, "=", 2)
+			if len(parts) != 2 {
+				return errors.Errorf("invalid value %s", field)
+			}
+			key := strings.TrimSpace(strings.ToLower(parts[0]))
+			value := parts[1]
+			switch key {
+			case "type":
+				e.Type = value
+			case "dest":
+				e.Destination = value
+			default:
+				e.Attrs[key] = value
 			}
 		}
+	}
+	return e.validate()
+}
+
+func (e *ExportEntry) validate() error {
+	if e.Type == "" {
+		return errors.Errorf("type is required for output")
+	}
+	return nil
+}
 
-		if dest, ok := out.Attrs["dest"]; ok {
-			out.Destination = dest
-			delete(out.Attrs, "dest")
+func ParseExports(inp []string) ([]*controllerapi.ExportEntry, error) {
+	if len(inp) == 0 {
+		return nil, nil
+	}
+
+	export := make(Exports, 0, len(inp))
+	for _, s := range inp {
+		if s == "" {
+			continue
 		}
 
-		outs = append(outs, &out)
+		var out ExportEntry
+		if err := out.UnmarshalText([]byte(s)); err != nil {
+			return nil, err
+		}
+		export = append(export, &out)
 	}
-	return outs, nil
+	return export.ToPB(), nil
 }
 
 func ParseAnnotations(inp []string) (map[exptypes.AnnotationKey]string, error) {
@@ -89,6 +192,10 @@ func ParseAnnotations(inp []string) (map
 
 	annotations := make(map[exptypes.AnnotationKey]string)
 	for _, inp := range inp {
+		if inp == "" {
+			continue
+		}
+
 		k, v, ok := strings.Cut(inp, "=")
 		if !ok {
 			return nil, errors.Errorf("invalid annotation %q, expected key=value", inp)
@@ -144,3 +251,41 @@ func ParseAnnotations(inp []string) (map
 	}
 	return annotations, nil
 }
+
+type csvBuilder struct {
+	sb strings.Builder
+}
+
+func (w *csvBuilder) Write(key, value string) {
+	if w.sb.Len() > 0 {
+		w.sb.WriteByte(',')
+	}
+
+	pair := key + "=" + value
+	if strings.ContainsRune(pair, ',') || strings.ContainsRune(pair, '"') {
+		var attr strings.Builder
+		writer := csv.NewWriter(&attr)
+		writer.Write([]string{pair})
+		writer.Flush()
+		// Strips the extra newline added by the csv writer
+		pair = strings.TrimSpace(attr.String())
+	}
+
+	w.sb.WriteString(pair)
+}
+
+func (w *csvBuilder) WriteAttributes(attrs map[string]string) {
+	keys := make([]string, 0, len(attrs))
+	for key := range attrs {
+		keys = append(keys, key)
+	}
+	sort.Strings(keys)
+
+	for _, key := range keys {
+		w.Write(key, attrs[key])
+	}
+}
+
+func (w *csvBuilder) String() string {
+	return w.sb.String()
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/export_cty.go 0.21.3-0ubuntu1/util/buildflags/export_cty.go
--- 0.19.3+ds1-4/util/buildflags/export_cty.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/export_cty.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,87 @@
+package buildflags
+
+import (
+	"sync"
+
+	"github.com/zclconf/go-cty/cty"
+	"github.com/zclconf/go-cty/cty/convert"
+)
+
+var exportEntryType = sync.OnceValue(func() cty.Type {
+	return cty.Map(cty.String)
+})
+
+func (e *Exports) FromCtyValue(in cty.Value, p cty.Path) error {
+	got := in.Type()
+	if got.IsTupleType() || got.IsListType() {
+		return e.fromCtyValue(in, p)
+	}
+
+	want := cty.List(exportEntryType())
+	return p.NewErrorf("%s", convert.MismatchMessage(got, want))
+}
+
+func (e *Exports) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
+	*e = make([]*ExportEntry, 0, in.LengthInt())
+
+	yield := func(value cty.Value) bool {
+		entry := &ExportEntry{}
+		if retErr = entry.FromCtyValue(value, p); retErr != nil {
+			return false
+		}
+		*e = append(*e, entry)
+		return true
+	}
+	eachElement(in)(yield)
+	return retErr
+}
+
+func (e Exports) ToCtyValue() cty.Value {
+	if len(e) == 0 {
+		return cty.ListValEmpty(exportEntryType())
+	}
+
+	vals := make([]cty.Value, len(e))
+	for i, entry := range e {
+		vals[i] = entry.ToCtyValue()
+	}
+	return cty.ListVal(vals)
+}
+
+func (e *ExportEntry) FromCtyValue(in cty.Value, p cty.Path) error {
+	if in.Type() == cty.String {
+		if err := e.UnmarshalText([]byte(in.AsString())); err != nil {
+			return p.NewError(err)
+		}
+		return nil
+	}
+
+	conv, err := convert.Convert(in, cty.Map(cty.String))
+	if err != nil {
+		return err
+	}
+
+	m := conv.AsValueMap()
+	if err := getAndDelete(m, "type", &e.Type); err != nil {
+		return err
+	}
+	if err := getAndDelete(m, "dest", &e.Destination); err != nil {
+		return err
+	}
+	e.Attrs = asMap(m)
+	return e.validate()
+}
+
+func (e *ExportEntry) ToCtyValue() cty.Value {
+	if e == nil {
+		return cty.NullVal(cty.Map(cty.String))
+	}
+
+	vals := make(map[string]cty.Value, len(e.Attrs)+2)
+	for k, v := range e.Attrs {
+		vals[k] = cty.StringVal(v)
+	}
+	vals["type"] = cty.StringVal(e.Type)
+	vals["dest"] = cty.StringVal(e.Destination)
+	return cty.MapVal(vals)
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/secrets.go 0.21.3-0ubuntu1/util/buildflags/secrets.go
--- 0.19.3+ds1-4/util/buildflags/secrets.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/secrets.go	2025-03-17 16:14:25.000000000 +0000
@@ -1,6 +1,7 @@
 package buildflags
 
 import (
+	"encoding/json"
 	"strings"
 
 	controllerapi "github.com/docker/buildx/controller/pb"
@@ -8,28 +9,95 @@ import (
 	"github.com/tonistiigi/go-csvvalue"
 )
 
-func ParseSecretSpecs(sl []string) ([]*controllerapi.Secret, error) {
-	fs := make([]*controllerapi.Secret, 0, len(sl))
-	for _, v := range sl {
-		if v == "" {
-			continue
-		}
-		s, err := parseSecret(v)
-		if err != nil {
-			return nil, err
-		}
-		fs = append(fs, s)
+type Secrets []*Secret
+
+func (s Secrets) Merge(other Secrets) Secrets {
+	if other == nil {
+		s.Normalize()
+		return s
+	} else if s == nil {
+		other.Normalize()
+		return other
 	}
-	return fs, nil
+
+	return append(s, other...).Normalize()
 }
 
-func parseSecret(value string) (*controllerapi.Secret, error) {
+func (s Secrets) Normalize() Secrets {
+	if len(s) == 0 {
+		return nil
+	}
+	return removeDupes(s)
+}
+
+func (s Secrets) ToPB() []*controllerapi.Secret {
+	if len(s) == 0 {
+		return nil
+	}
+
+	entries := make([]*controllerapi.Secret, len(s))
+	for i, entry := range s {
+		entries[i] = entry.ToPB()
+	}
+	return entries
+}
+
+type Secret struct {
+	ID       string `json:"id,omitempty"`
+	FilePath string `json:"src,omitempty"`
+	Env      string `json:"env,omitempty"`
+}
+
+func (s *Secret) Equal(other *Secret) bool {
+	return s.ID == other.ID && s.FilePath == other.FilePath && s.Env == other.Env
+}
+
+func (s *Secret) String() string {
+	var b csvBuilder
+	if s.ID != "" {
+		b.Write("id", s.ID)
+	}
+	if s.FilePath != "" {
+		b.Write("src", s.FilePath)
+	}
+	if s.Env != "" {
+		b.Write("env", s.Env)
+	}
+	return b.String()
+}
+
+func (s *Secret) ToPB() *controllerapi.Secret {
+	return &controllerapi.Secret{
+		ID:       s.ID,
+		FilePath: s.FilePath,
+		Env:      s.Env,
+	}
+}
+
+func (s *Secret) UnmarshalJSON(data []byte) error {
+	var v struct {
+		ID       string `json:"id,omitempty"`
+		FilePath string `json:"src,omitempty"`
+		Env      string `json:"env,omitempty"`
+	}
+	if err := json.Unmarshal(data, &v); err != nil {
+		return err
+	}
+
+	s.ID = v.ID
+	s.FilePath = v.FilePath
+	s.Env = v.Env
+	return nil
+}
+
+func (s *Secret) UnmarshalText(text []byte) error {
+	value := string(text)
 	fields, err := csvvalue.Fields(value, nil)
 	if err != nil {
-		return nil, errors.Wrap(err, "failed to parse csv secret")
+		return errors.Wrap(err, "failed to parse csv secret")
 	}
 
-	fs := controllerapi.Secret{}
+	*s = Secret{}
 
 	var typ string
 	for _, field := range fields {
@@ -37,29 +105,53 @@ func parseSecret(value string) (*control
 		key := strings.ToLower(parts[0])
 
 		if len(parts) != 2 {
-			return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field)
+			return errors.Errorf("invalid field '%s' must be a key=value pair", field)
 		}
 
 		value := parts[1]
 		switch key {
 		case "type":
 			if value != "file" && value != "env" {
-				return nil, errors.Errorf("unsupported secret type %q", value)
+				return errors.Errorf("unsupported secret type %q", value)
 			}
 			typ = value
 		case "id":
-			fs.ID = value
+			s.ID = value
 		case "source", "src":
-			fs.FilePath = value
+			s.FilePath = value
 		case "env":
-			fs.Env = value
+			s.Env = value
 		default:
-			return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field)
+			return errors.Errorf("unexpected key '%s' in '%s'", key, field)
+		}
+	}
+	if typ == "env" && s.Env == "" {
+		s.Env = s.FilePath
+		s.FilePath = ""
+	}
+	return nil
+}
+
+func ParseSecretSpecs(sl []string) ([]*controllerapi.Secret, error) {
+	fs := make([]*controllerapi.Secret, 0, len(sl))
+	for _, v := range sl {
+		if v == "" {
+			continue
 		}
+
+		s, err := parseSecret(v)
+		if err != nil {
+			return nil, err
+		}
+		fs = append(fs, s)
 	}
-	if typ == "env" && fs.Env == "" {
-		fs.Env = fs.FilePath
-		fs.FilePath = ""
+	return fs, nil
+}
+
+func parseSecret(value string) (*controllerapi.Secret, error) {
+	var s Secret
+	if err := s.UnmarshalText([]byte(value)); err != nil {
+		return nil, err
 	}
-	return &fs, nil
+	return s.ToPB(), nil
 }
diff -pruN 0.19.3+ds1-4/util/buildflags/secrets_cty.go 0.21.3-0ubuntu1/util/buildflags/secrets_cty.go
--- 0.19.3+ds1-4/util/buildflags/secrets_cty.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/secrets_cty.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,93 @@
+package buildflags
+
+import (
+	"sync"
+
+	"github.com/zclconf/go-cty/cty"
+	"github.com/zclconf/go-cty/cty/convert"
+)
+
+var secretType = sync.OnceValue(func() cty.Type {
+	return cty.ObjectWithOptionalAttrs(
+		map[string]cty.Type{
+			"id":  cty.String,
+			"src": cty.String,
+			"env": cty.String,
+		},
+		[]string{"id", "src", "env"},
+	)
+})
+
+func (s *Secrets) FromCtyValue(in cty.Value, p cty.Path) error {
+	got := in.Type()
+	if got.IsTupleType() || got.IsListType() {
+		return s.fromCtyValue(in, p)
+	}
+
+	want := cty.List(secretType())
+	return p.NewErrorf("%s", convert.MismatchMessage(got, want))
+}
+
+func (s *Secrets) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
+	*s = make([]*Secret, 0, in.LengthInt())
+
+	yield := func(value cty.Value) bool {
+		entry := &Secret{}
+		if retErr = entry.FromCtyValue(value, p); retErr != nil {
+			return false
+		}
+		*s = append(*s, entry)
+		return true
+	}
+	eachElement(in)(yield)
+	return retErr
+}
+
+func (s Secrets) ToCtyValue() cty.Value {
+	if len(s) == 0 {
+		return cty.ListValEmpty(secretType())
+	}
+
+	vals := make([]cty.Value, len(s))
+	for i, entry := range s {
+		vals[i] = entry.ToCtyValue()
+	}
+	return cty.ListVal(vals)
+}
+
+func (e *Secret) FromCtyValue(in cty.Value, p cty.Path) error {
+	if in.Type() == cty.String {
+		if err := e.UnmarshalText([]byte(in.AsString())); err != nil {
+			return p.NewError(err)
+		}
+		return nil
+	}
+
+	conv, err := convert.Convert(in, secretType())
+	if err != nil {
+		return err
+	}
+
+	if id := conv.GetAttr("id"); !id.IsNull() && id.IsKnown() {
+		e.ID = id.AsString()
+	}
+	if src := conv.GetAttr("src"); !src.IsNull() && src.IsKnown() {
+		e.FilePath = src.AsString()
+	}
+	if env := conv.GetAttr("env"); !env.IsNull() && env.IsKnown() {
+		e.Env = env.AsString()
+	}
+	return nil
+}
+
+func (e *Secret) ToCtyValue() cty.Value {
+	if e == nil {
+		return cty.NullVal(secretType())
+	}
+
+	return cty.ObjectVal(map[string]cty.Value{
+		"id":  cty.StringVal(e.ID),
+		"src": cty.StringVal(e.FilePath),
+		"env": cty.StringVal(e.Env),
+	})
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/secrets_test.go 0.21.3-0ubuntu1/util/buildflags/secrets_test.go
--- 0.19.3+ds1-4/util/buildflags/secrets_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/secrets_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,84 @@
+package buildflags
+
+import (
+	"encoding/json"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"github.com/zclconf/go-cty/cty"
+)
+
+func TestSecrets(t *testing.T) {
+	t.Run("MarshalJSON", func(t *testing.T) {
+		secrets := Secrets{
+			{ID: "mysecret", FilePath: "/local/secret"},
+			{ID: "mysecret2", Env: "TOKEN"},
+		}
+
+		expected := `[{"id":"mysecret","src":"/local/secret"},{"id":"mysecret2","env":"TOKEN"}]`
+		actual, err := json.Marshal(secrets)
+		require.NoError(t, err)
+		require.JSONEq(t, expected, string(actual))
+	})
+
+	t.Run("UnmarshalJSON", func(t *testing.T) {
+		in := `[{"id":"mysecret","src":"/local/secret"},{"id":"mysecret2","env":"TOKEN"}]`
+
+		var actual Secrets
+		err := json.Unmarshal([]byte(in), &actual)
+		require.NoError(t, err)
+
+		expected := Secrets{
+			{ID: "mysecret", FilePath: "/local/secret"},
+			{ID: "mysecret2", Env: "TOKEN"},
+		}
+		require.Equal(t, expected, actual)
+	})
+
+	t.Run("FromCtyValue", func(t *testing.T) {
+		in := cty.TupleVal([]cty.Value{
+			cty.ObjectVal(map[string]cty.Value{
+				"id":  cty.StringVal("mysecret"),
+				"src": cty.StringVal("/local/secret"),
+			}),
+			cty.ObjectVal(map[string]cty.Value{
+				"id":  cty.StringVal("mysecret2"),
+				"env": cty.StringVal("TOKEN"),
+			}),
+		})
+
+		var actual Secrets
+		err := actual.FromCtyValue(in, nil)
+		require.NoError(t, err)
+
+		expected := Secrets{
+			{ID: "mysecret", FilePath: "/local/secret"},
+			{ID: "mysecret2", Env: "TOKEN"},
+		}
+		require.Equal(t, expected, actual)
+	})
+
+	t.Run("ToCtyValue", func(t *testing.T) {
+		secrets := Secrets{
+			{ID: "mysecret", FilePath: "/local/secret"},
+			{ID: "mysecret2", Env: "TOKEN"},
+		}
+
+		actual := secrets.ToCtyValue()
+		expected := cty.ListVal([]cty.Value{
+			cty.ObjectVal(map[string]cty.Value{
+				"id":  cty.StringVal("mysecret"),
+				"src": cty.StringVal("/local/secret"),
+				"env": cty.StringVal(""),
+			}),
+			cty.ObjectVal(map[string]cty.Value{
+				"id":  cty.StringVal("mysecret2"),
+				"src": cty.StringVal(""),
+				"env": cty.StringVal("TOKEN"),
+			}),
+		})
+
+		result := actual.Equals(expected)
+		require.True(t, result.True())
+	})
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/ssh.go 0.21.3-0ubuntu1/util/buildflags/ssh.go
--- 0.19.3+ds1-4/util/buildflags/ssh.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/ssh.go	2025-03-17 16:14:25.000000000 +0000
@@ -1,12 +1,108 @@
 package buildflags
 
 import (
+	"cmp"
+	"encoding/json"
+	"slices"
 	"strings"
 
 	controllerapi "github.com/docker/buildx/controller/pb"
 	"github.com/moby/buildkit/util/gitutil"
 )
 
+type SSHKeys []*SSH
+
+func (s SSHKeys) Merge(other SSHKeys) SSHKeys {
+	if other == nil {
+		s.Normalize()
+		return s
+	} else if s == nil {
+		other.Normalize()
+		return other
+	}
+
+	return append(s, other...).Normalize()
+}
+
+func (s SSHKeys) Normalize() SSHKeys {
+	if len(s) == 0 {
+		return nil
+	}
+	return removeDupes(s)
+}
+
+func (s SSHKeys) ToPB() []*controllerapi.SSH {
+	if len(s) == 0 {
+		return nil
+	}
+
+	entries := make([]*controllerapi.SSH, len(s))
+	for i, entry := range s {
+		entries[i] = entry.ToPB()
+	}
+	return entries
+}
+
+type SSH struct {
+	ID    string   `json:"id,omitempty" cty:"id"`
+	Paths []string `json:"paths,omitempty" cty:"paths"`
+}
+
+func (s *SSH) Equal(other *SSH) bool {
+	return s.Less(other) == 0
+}
+
+func (s *SSH) Less(other *SSH) int {
+	if s.ID != other.ID {
+		return cmp.Compare(s.ID, other.ID)
+	}
+	return slices.Compare(s.Paths, other.Paths)
+}
+
+func (s *SSH) String() string {
+	if len(s.Paths) == 0 {
+		return s.ID
+	}
+
+	var b csvBuilder
+	paths := strings.Join(s.Paths, ",")
+	b.Write(s.ID, paths)
+	return b.String()
+}
+
+func (s *SSH) ToPB() *controllerapi.SSH {
+	return &controllerapi.SSH{
+		ID:    s.ID,
+		Paths: s.Paths,
+	}
+}
+
+func (s *SSH) UnmarshalJSON(data []byte) error {
+	var v struct {
+		ID    string   `json:"id,omitempty"`
+		Paths []string `json:"paths,omitempty"`
+	}
+	if err := json.Unmarshal(data, &v); err != nil {
+		return err
+	}
+
+	s.ID = v.ID
+	s.Paths = v.Paths
+	return nil
+}
+
+func (s *SSH) UnmarshalText(text []byte) error {
+	parts := strings.SplitN(string(text), "=", 2)
+
+	s.ID = parts[0]
+	if len(parts) > 1 {
+		s.Paths = strings.Split(parts[1], ",")
+	} else {
+		s.Paths = nil
+	}
+	return nil
+}
+
 func ParseSSHSpecs(sl []string) ([]*controllerapi.SSH, error) {
 	var outs []*controllerapi.SSH
 	if len(sl) == 0 {
@@ -17,14 +113,12 @@ func ParseSSHSpecs(sl []string) ([]*cont
 		if s == "" {
 			continue
 		}
-		parts := strings.SplitN(s, "=", 2)
-		out := controllerapi.SSH{
-			ID: parts[0],
-		}
-		if len(parts) > 1 {
-			out.Paths = strings.Split(parts[1], ",")
+
+		var out SSH
+		if err := out.UnmarshalText([]byte(s)); err != nil {
+			return nil, err
 		}
-		outs = append(outs, &out)
+		outs = append(outs, out.ToPB())
 	}
 	return outs, nil
 }
diff -pruN 0.19.3+ds1-4/util/buildflags/ssh_cty.go 0.21.3-0ubuntu1/util/buildflags/ssh_cty.go
--- 0.19.3+ds1-4/util/buildflags/ssh_cty.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/ssh_cty.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,102 @@
+package buildflags
+
+import (
+	"sync"
+
+	"github.com/zclconf/go-cty/cty"
+	"github.com/zclconf/go-cty/cty/convert"
+	"github.com/zclconf/go-cty/cty/gocty"
+)
+
+var sshType = sync.OnceValue(func() cty.Type {
+	return cty.ObjectWithOptionalAttrs(
+		map[string]cty.Type{
+			"id":    cty.String,
+			"paths": cty.List(cty.String),
+		},
+		[]string{"id", "paths"},
+	)
+})
+
+func (s *SSHKeys) FromCtyValue(in cty.Value, p cty.Path) error {
+	got := in.Type()
+	if got.IsTupleType() || got.IsListType() {
+		return s.fromCtyValue(in, p)
+	}
+
+	want := cty.List(sshType())
+	return p.NewErrorf("%s", convert.MismatchMessage(got, want))
+}
+
+func (s *SSHKeys) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
+	*s = make([]*SSH, 0, in.LengthInt())
+
+	yield := func(value cty.Value) bool {
+		entry := &SSH{}
+		if retErr = entry.FromCtyValue(value, p); retErr != nil {
+			return false
+		}
+		*s = append(*s, entry)
+		return true
+	}
+	eachElement(in)(yield)
+	return retErr
+}
+
+func (s SSHKeys) ToCtyValue() cty.Value {
+	if len(s) == 0 {
+		return cty.ListValEmpty(sshType())
+	}
+
+	vals := make([]cty.Value, len(s))
+	for i, entry := range s {
+		vals[i] = entry.ToCtyValue()
+	}
+	return cty.ListVal(vals)
+}
+
+func (e *SSH) FromCtyValue(in cty.Value, p cty.Path) error {
+	if in.Type() == cty.String {
+		if err := e.UnmarshalText([]byte(in.AsString())); err != nil {
+			return p.NewError(err)
+		}
+		return nil
+	}
+
+	conv, err := convert.Convert(in, sshType())
+	if err != nil {
+		return err
+	}
+
+	if id := conv.GetAttr("id"); !id.IsNull() && id.IsKnown() {
+		e.ID = id.AsString()
+	}
+	if paths := conv.GetAttr("paths"); !paths.IsNull() && paths.IsKnown() {
+		if err := gocty.FromCtyValue(paths, &e.Paths); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e *SSH) ToCtyValue() cty.Value {
+	if e == nil {
+		return cty.NullVal(sshType())
+	}
+
+	var ctyPaths cty.Value
+	if len(e.Paths) > 0 {
+		paths := make([]cty.Value, len(e.Paths))
+		for i, path := range e.Paths {
+			paths[i] = cty.StringVal(path)
+		}
+		ctyPaths = cty.ListVal(paths)
+	} else {
+		ctyPaths = cty.ListValEmpty(cty.String)
+	}
+
+	return cty.ObjectVal(map[string]cty.Value{
+		"id":    cty.StringVal(e.ID),
+		"paths": ctyPaths,
+	})
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/ssh_test.go 0.21.3-0ubuntu1/util/buildflags/ssh_test.go
--- 0.19.3+ds1-4/util/buildflags/ssh_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/ssh_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,85 @@
+package buildflags
+
+import (
+	"encoding/json"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"github.com/zclconf/go-cty/cty"
+)
+
+func TestSSHKeys(t *testing.T) {
+	t.Run("MarshalJSON", func(t *testing.T) {
+		sshkeys := SSHKeys{
+			{ID: "default", Paths: []string{}},
+			{ID: "key", Paths: []string{"path/to/key"}},
+		}
+
+		expected := `[{"id":"default"},{"id":"key","paths":["path/to/key"]}]`
+		actual, err := json.Marshal(sshkeys)
+		require.NoError(t, err)
+		require.JSONEq(t, expected, string(actual))
+	})
+
+	t.Run("UnmarshalJSON", func(t *testing.T) {
+		in := `[{"id":"default"},{"id":"key","paths":["path/to/key"]}]`
+
+		var actual SSHKeys
+		err := json.Unmarshal([]byte(in), &actual)
+		require.NoError(t, err)
+
+		expected := SSHKeys{
+			{ID: "default"},
+			{ID: "key", Paths: []string{"path/to/key"}},
+		}
+		require.Equal(t, expected, actual)
+	})
+
+	t.Run("FromCtyValue", func(t *testing.T) {
+		in := cty.TupleVal([]cty.Value{
+			cty.ObjectVal(map[string]cty.Value{
+				"id": cty.StringVal("default"),
+			}),
+			cty.ObjectVal(map[string]cty.Value{
+				"id": cty.StringVal("key"),
+				"paths": cty.TupleVal([]cty.Value{
+					cty.StringVal("path/to/key"),
+				}),
+			}),
+		})
+
+		var actual SSHKeys
+		err := actual.FromCtyValue(in, nil)
+		require.NoError(t, err)
+
+		expected := SSHKeys{
+			{ID: "default"},
+			{ID: "key", Paths: []string{"path/to/key"}},
+		}
+		require.Equal(t, expected, actual)
+	})
+
+	t.Run("ToCtyValue", func(t *testing.T) {
+		sshkeys := SSHKeys{
+			{ID: "default", Paths: []string{}},
+			{ID: "key", Paths: []string{"path/to/key"}},
+		}
+
+		actual := sshkeys.ToCtyValue()
+		expected := cty.ListVal([]cty.Value{
+			cty.ObjectVal(map[string]cty.Value{
+				"id":    cty.StringVal("default"),
+				"paths": cty.ListValEmpty(cty.String),
+			}),
+			cty.ObjectVal(map[string]cty.Value{
+				"id": cty.StringVal("key"),
+				"paths": cty.ListVal([]cty.Value{
+					cty.StringVal("path/to/key"),
+				}),
+			}),
+		})
+
+		result := actual.Equals(expected)
+		require.True(t, result.True())
+	})
+}
diff -pruN 0.19.3+ds1-4/util/buildflags/utils.go 0.21.3-0ubuntu1/util/buildflags/utils.go
--- 0.19.3+ds1-4/util/buildflags/utils.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/buildflags/utils.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,76 @@
+package buildflags
+
+import (
+	"github.com/zclconf/go-cty/cty"
+	"github.com/zclconf/go-cty/cty/gocty"
+)
+
+type comparable[E any] interface {
+	Equal(other E) bool
+}
+
+func removeDupes[E comparable[E]](s []E) []E {
+	// Move backwards through the slice.
+	// For each element, any elements after the current element are unique.
+	// If we find our current element conflicts with an existing element,
+	// then we swap the offender with the end of the slice and chop it off.
+
+	// Start at the second to last element.
+	// The last element is always unique.
+	for i := len(s) - 2; i >= 0; i-- {
+		elem := s[i]
+		// Check for duplicates after our current element.
+		for j := i + 1; j < len(s); j++ {
+			if elem.Equal(s[j]) {
+				// Found a duplicate, exchange the
+				// duplicate with the last element.
+				s[j], s[len(s)-1] = s[len(s)-1], s[j]
+				s = s[:len(s)-1]
+				break
+			}
+		}
+	}
+	return s
+}
+
+func getAndDelete(m map[string]cty.Value, attr string, gv interface{}) error {
+	if v, ok := m[attr]; ok && v.IsKnown() {
+		delete(m, attr)
+		return gocty.FromCtyValue(v, gv)
+	}
+	return nil
+}
+
+func asMap(m map[string]cty.Value) map[string]string {
+	out := make(map[string]string, len(m))
+	for k, v := range m {
+		if v.IsKnown() {
+			out[k] = v.AsString()
+		}
+	}
+	return out
+}
+
+func isEmptyOrUnknown(v cty.Value) bool {
+	return !v.IsKnown() || (v.Type() == cty.String && v.AsString() == "")
+}
+
+// Seq is a temporary definition of iter.Seq until we are able to migrate
+// to using go1.23 as our minimum version. This can be removed when go1.24
+// is released and usages can be changed to use rangefunc.
+type Seq[V any] func(yield func(V) bool)
+
+func eachElement(in cty.Value) Seq[cty.Value] {
+	return func(yield func(v cty.Value) bool) {
+		for elem := in.ElementIterator(); elem.Next(); {
+			_, value := elem.Element()
+			if isEmptyOrUnknown(value) {
+				continue
+			}
+
+			if !yield(value) {
+				return
+			}
+		}
+	}
+}
diff -pruN 0.19.3+ds1-4/util/confutil/config.go 0.21.3-0ubuntu1/util/confutil/config.go
--- 0.19.3+ds1-4/util/confutil/config.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/confutil/config.go	2025-03-17 16:14:25.000000000 +0000
@@ -8,7 +8,8 @@ import (
 	"sync"
 
 	"github.com/docker/cli/cli/command"
-	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/atomicwriter"
+	"github.com/moby/buildkit/cmd/buildkitd/config"
 	"github.com/pelletier/go-toml"
 	"github.com/pkg/errors"
 	fs "github.com/tonistiigi/fsutil/copy"
@@ -105,7 +106,7 @@ func (c *Config) MkdirAll(dir string, pe
 // AtomicWriteFile writes data to a file within the config dir atomically
 func (c *Config) AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
 	f := filepath.Join(c.dir, filename)
-	if err := ioutils.AtomicWriteFile(f, data, perm); err != nil {
+	if err := atomicwriter.WriteFile(f, data, perm); err != nil {
 		return err
 	}
 	if c.chowner == nil {
@@ -151,7 +152,11 @@ func LoadConfigTree(fp string) (*toml.Tr
 	defer f.Close()
 	t, err := toml.LoadReader(f)
 	if err != nil {
-		return t, errors.Wrap(err, "failed to parse config")
+		return t, errors.Wrap(err, "failed to parse buildkit config")
+	}
+	var bkcfg config.Config
+	if err = t.Unmarshal(&bkcfg); err != nil {
+		return t, errors.Wrap(err, "failed to parse buildkit config")
 	}
 	return t, nil
 }
diff -pruN 0.19.3+ds1-4/util/desktop/desktop.go 0.21.3-0ubuntu1/util/desktop/desktop.go
--- 0.19.3+ds1-4/util/desktop/desktop.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/desktop/desktop.go	2025-03-17 16:14:25.000000000 +0000
@@ -28,13 +28,14 @@ func BuildBackendEnabled() bool {
 	return bbEnabled
 }
 
+func BuildURL(ref string) string {
+	return fmt.Sprintf("docker-desktop://dashboard/build/%s", ref)
+}
+
 func BuildDetailsOutput(refs map[string]string, term bool) string {
 	if len(refs) == 0 {
 		return ""
 	}
-	refURL := func(ref string) string {
-		return fmt.Sprintf("docker-desktop://dashboard/build/%s", ref)
-	}
 	var out bytes.Buffer
 	out.WriteString("View build details: ")
 	multiTargets := len(refs) > 1
@@ -43,9 +44,10 @@ func BuildDetailsOutput(refs map[string]
 			out.WriteString(fmt.Sprintf("\n  %s: ", target))
 		}
 		if term {
-			out.WriteString(hyperlink(refURL(ref)))
+			url := BuildURL(ref)
+			out.WriteString(ANSIHyperlink(url, url))
 		} else {
-			out.WriteString(refURL(ref))
+			out.WriteString(BuildURL(ref))
 		}
 	}
 	return out.String()
@@ -57,9 +59,9 @@ func PrintBuildDetails(w io.Writer, refs
 	}
 }
 
-func hyperlink(url string) string {
+func ANSIHyperlink(url, text string) string {
 	// create an escape sequence using the OSC 8 format: https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda
-	return fmt.Sprintf("\033]8;;%s\033\\%s\033]8;;\033\\", url, url)
+	return fmt.Sprintf("\033]8;;%s\033\\%s\033]8;;\033\\", url, text)
 }
 
 type ErrorWithBuildRef struct {
diff -pruN 0.19.3+ds1-4/util/dockerutil/api.go 0.21.3-0ubuntu1/util/dockerutil/api.go
--- 0.19.3+ds1-4/util/dockerutil/api.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/dockerutil/api.go	2025-03-17 16:14:25.000000000 +0000
@@ -3,12 +3,12 @@ package dockerutil
 import (
 	"github.com/docker/cli/cli/command"
 	"github.com/docker/cli/cli/context/docker"
-	"github.com/docker/docker/client"
+	dockerclient "github.com/docker/docker/client"
 )
 
 // ClientAPI represents an active docker API object.
 type ClientAPI struct {
-	client.APIClient
+	dockerclient.APIClient
 }
 
 func NewClientAPI(cli command.Cli, ep string) (*ClientAPI, error) {
@@ -36,7 +36,7 @@ func NewClientAPI(cli command.Cli, ep st
 		return nil, err
 	}
 
-	ca.APIClient, err = client.NewClientWithOpts(clientOpts...)
+	ca.APIClient, err = dockerclient.NewClientWithOpts(clientOpts...)
 	if err != nil {
 		return nil, err
 	}
diff -pruN 0.19.3+ds1-4/util/dockerutil/client.go 0.21.3-0ubuntu1/util/dockerutil/client.go
--- 0.19.3+ds1-4/util/dockerutil/client.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/dockerutil/client.go	2025-03-17 16:14:25.000000000 +0000
@@ -7,7 +7,7 @@ import (
 
 	"github.com/docker/buildx/util/progress"
 	"github.com/docker/cli/cli/command"
-	"github.com/docker/docker/client"
+	dockerclient "github.com/docker/docker/client"
 )
 
 // Client represents an active docker object.
@@ -24,7 +24,7 @@ func NewClient(cli command.Cli) *Client
 }
 
 // API returns a new docker API client.
-func (c *Client) API(name string) (client.APIClient, error) {
+func (c *Client) API(name string) (dockerclient.APIClient, error) {
 	if name == "" {
 		name = c.cli.CurrentContext()
 	}
@@ -52,7 +52,7 @@ func (c *Client) LoadImage(ctx context.C
 				w.mu.Unlock()
 			}
 
-			resp, err := dapi.ImageLoad(ctx, pr, false)
+			resp, err := dapi.ImageLoad(ctx, pr)
 			defer close(done)
 			if err != nil {
 				handleErr(err)
diff -pruN 0.19.3+ds1-4/util/dockerutil/progress.go 0.21.3-0ubuntu1/util/dockerutil/progress.go
--- 0.19.3+ds1-4/util/dockerutil/progress.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/dockerutil/progress.go	2025-03-17 16:14:25.000000000 +0000
@@ -55,19 +55,21 @@ func fromReader(l progress.SubLogger, rc
 				Started: &now,
 			}
 		}
-		timeDelta := time.Since(st.Timestamp)
-		if timeDelta < minTimeDelta {
-			continue
-		}
-		st.Timestamp = time.Now()
 		if jm.Status == "Loading layer" {
 			st.Current = jm.Progress.Current
 			st.Total = jm.Progress.Total
 		}
+		now := time.Now()
 		if jm.Error != nil {
-			now := time.Now()
 			st.Completed = &now
+		} else {
+			timeDelta := time.Since(st.Timestamp)
+			if timeDelta < minTimeDelta {
+				started[id] = st
+				continue
+			}
 		}
+		st.Timestamp = now
 		started[id] = st
 		l.SetStatus(&st)
 	}
diff -pruN 0.19.3+ds1-4/util/imagetools/create.go 0.21.3-0ubuntu1/util/imagetools/create.go
--- 0.19.3+ds1-4/util/imagetools/create.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/imagetools/create.go	2025-03-17 16:14:25.000000000 +0000
@@ -8,9 +8,9 @@ import (
 	"net/url"
 	"strings"
 
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/remotes"
+	"github.com/containerd/containerd/v2/core/content"
+	"github.com/containerd/containerd/v2/core/images"
+	"github.com/containerd/containerd/v2/core/remotes"
 	"github.com/containerd/errdefs"
 	"github.com/containerd/platforms"
 	"github.com/distribution/reference"
diff -pruN 0.19.3+ds1-4/util/imagetools/imagetools_helpers_test.go 0.21.3-0ubuntu1/util/imagetools/imagetools_helpers_test.go
--- 0.19.3+ds1-4/util/imagetools/imagetools_helpers_test.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/imagetools/imagetools_helpers_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -8,7 +8,7 @@ import (
 	"io"
 	"strings"
 
-	"github.com/containerd/containerd/remotes"
+	"github.com/containerd/containerd/v2/core/remotes"
 	intoto "github.com/in-toto/in-toto-golang/in_toto"
 	slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
 	"github.com/opencontainers/go-digest"
diff -pruN 0.19.3+ds1-4/util/imagetools/inspect.go 0.21.3-0ubuntu1/util/imagetools/inspect.go
--- 0.19.3+ds1-4/util/imagetools/inspect.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/imagetools/inspect.go	2025-03-17 16:14:25.000000000 +0000
@@ -8,8 +8,8 @@ import (
 	"io"
 	"net/http"
 
-	"github.com/containerd/containerd/remotes"
-	"github.com/containerd/containerd/remotes/docker"
+	"github.com/containerd/containerd/v2/core/remotes"
+	"github.com/containerd/containerd/v2/core/remotes/docker"
 	"github.com/containerd/log"
 	"github.com/distribution/reference"
 	"github.com/docker/buildx/util/resolver"
diff -pruN 0.19.3+ds1-4/util/imagetools/loader.go 0.21.3-0ubuntu1/util/imagetools/loader.go
--- 0.19.3+ds1-4/util/imagetools/loader.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/imagetools/loader.go	2025-03-17 16:14:25.000000000 +0000
@@ -11,9 +11,9 @@ import (
 	"strings"
 	"sync"
 
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/remotes"
+	"github.com/containerd/containerd/v2/core/content"
+	"github.com/containerd/containerd/v2/core/images"
+	"github.com/containerd/containerd/v2/core/remotes"
 	"github.com/containerd/platforms"
 	"github.com/distribution/reference"
 	intoto "github.com/in-toto/in-toto-golang/in_toto"
diff -pruN 0.19.3+ds1-4/util/imagetools/printers.go 0.21.3-0ubuntu1/util/imagetools/printers.go
--- 0.19.3+ds1-4/util/imagetools/printers.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/imagetools/printers.go	2025-03-17 16:14:25.000000000 +0000
@@ -10,7 +10,7 @@ import (
 	"text/tabwriter"
 	"text/template"
 
-	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/v2/core/images"
 	"github.com/containerd/platforms"
 	"github.com/distribution/reference"
 	"github.com/opencontainers/go-digest"
diff -pruN 0.19.3+ds1-4/util/otelutil/fixtures/bktraces.json 0.21.3-0ubuntu1/util/otelutil/fixtures/bktraces.json
--- 0.19.3+ds1-4/util/otelutil/fixtures/bktraces.json	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/fixtures/bktraces.json	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,11125 @@
+{
+  "Name": "moby.buildkit.v1.Control/ListWorkers",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f5f33ab2ca194d1b",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "c1b335eeeaf56405",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.147265775Z",
+  "EndTime": "2024-01-12T14:57:40.164529263Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "ListWorkers"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.147281524Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.164526618Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "1f64b377d3d03600",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "c1b335eeeaf56405",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.165362709Z",
+  "EndTime": "2024-01-12T14:57:40.166354292Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Ping"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.165372367Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.16635318Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Return",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "edb147a8e37c0b62",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "c1b335eeeaf56405",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.166393536Z",
+  "EndTime": "2024-01-12T14:57:40.16719399Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Return"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.166401921Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.167193199Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/Solve",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "be9c91e5fc1a8511",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "c1b335eeeaf56405",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.165347242Z",
+  "EndTime": "2024-01-12T14:57:40.167342107Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Solve"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.16536234Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.167341325Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/Status",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "07376377d689755f",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "c1b335eeeaf56405",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.165348694Z",
+  "EndTime": "2024-01-12T14:57:40.167352066Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Status"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.165472235Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/ListWorkers",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "fea7d265a9b437c2",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.350714924Z",
+  "EndTime": "2024-01-12T14:57:40.372933212Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "ListWorkers"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.350738238Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.372930227Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/ListWorkers",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "e1841a2a45f6de1a",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.382091457Z",
+  "EndTime": "2024-01-12T14:57:40.398366627Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "ListWorkers"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.382111174Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.398362529Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "a4ee96f4ccd426ec",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.399784596Z",
+  "EndTime": "2024-01-12T14:57:40.400602684Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Ping"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.39979712Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.400601091Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.FileSync/DiffCopy",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "a6070511de38c86a",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "70f459c5d3c2c488",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.409090629Z",
+  "EndTime": "2024-01-12T14:57:40.418671608Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.FileSync"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "DiffCopy"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.415069418Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.416971452Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 2
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.417874398Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 3
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.417984414Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 4
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.418118123Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 2
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.41862914Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 5
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.418637065Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f9e99c0194575ce4",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f3a80839f6b95729",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.445118451Z",
+  "EndTime": "2024-01-12T14:57:40.523367473Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "VerifyTokenAuthority"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.445122027Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.523361662Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/FetchToken",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "78a4e25bc2d4278d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f26320c4e0b697ac",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.525027244Z",
+  "EndTime": "2024-01-12T14:57:40.900911737Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "FetchToken"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.525030811Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.90090819Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.FileSync/DiffCopy",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "03f88200c8224808",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "50509a5bad270153",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:41.456522474Z",
+  "EndTime": "2024-01-12T14:57:41.464340217Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.FileSync"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "DiffCopy"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.463714236Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.464300323Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 2
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.464304661Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "6d33d51dcc276bdb",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "6b3f3a9d50fbe98b",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:41.494006854Z",
+  "EndTime": "2024-01-12T14:57:41.494231333Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "VerifyTokenAuthority"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.494012254Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.4942295Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "76b7ceb8af988ea9",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f857d0002991458c",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:41.498132161Z",
+  "EndTime": "2024-01-12T14:57:41.498314011Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "VerifyTokenAuthority"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.498134275Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.498312929Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/FetchToken",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "cc230bba43af76bf",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "912de85fdfd20fe7",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:41.499095661Z",
+  "EndTime": "2024-01-12T14:57:41.841578399Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "FetchToken"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.499097465Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.84157357Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/FetchToken",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "e57242e59af94e80",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d9ed48c061e2ecfd",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:41.494901557Z",
+  "EndTime": "2024-01-12T14:57:41.843975306Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "FetchToken"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.494904071Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.8439725Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Solve",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "4548113210977542",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.400661584Z",
+  "EndTime": "2024-01-12T14:57:42.2855668Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Solve"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.400666954Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.285564065Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Return",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "76cae146240f0505",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:42.28564689Z",
+  "EndTime": "2024-01-12T14:57:42.286589372Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Return"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.285663691Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.286587859Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.FileSync/DiffCopy",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "a0d9ee53b31b1760",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "0f4681a3ad4d29f8",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:42.293696748Z",
+  "EndTime": "2024-01-12T14:57:42.369270402Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.FileSync"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "DiffCopy"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.31089318Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 2
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.316940688Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 3
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.320055056Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 4
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.322700568Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 5
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.325137621Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 6
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.329895107Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 7
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.333228304Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 8
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.339031163Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 9
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.341442287Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 10
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.350695522Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 11
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.352515933Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 12
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.35439304Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 13
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.356241885Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 14
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.358001301Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 15
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.359717427Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 16
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.361436218Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.362075091Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 17
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.363162765Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 18
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.363331097Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 19
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.363577729Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 2
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.364196313Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 20
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.365070687Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 21
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.365153813Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 22
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.365298203Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 23
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.366953244Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 24
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.368829308Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 25
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.368865285Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 3
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.369254041Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 26
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.369257247Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "load buildkit capabilities",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "c1b335eeeaf56405",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:40.147185565Z",
+  "EndTime": "2024-01-12T14:57:40.167978408Z",
+  "Attributes": null,
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/ListWorkers",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "014a492f789cd8df",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f5f33ab2ca194d1b",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.150333695Z",
+  "EndTime": "2024-01-12T14:57:40.16394451Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "ListWorkers"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "348c867020d36163",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "1f64b377d3d03600",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.165969223Z",
+  "EndTime": "2024-01-12T14:57:40.166043331Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Ping"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Return",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "8b7d202043a4ce8e",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "edb147a8e37c0b62",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.166855529Z",
+  "EndTime": "2024-01-12T14:57:40.166903889Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Return"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/Solve",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "3514a2ca1fb59328",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "be9c91e5fc1a8511",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.165882913Z",
+  "EndTime": "2024-01-12T14:57:40.167098121Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Solve"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/Status",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "5cd96a37a4409495",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "07376377d689755f",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.16580682Z",
+  "EndTime": "2024-01-12T14:57:40.167114423Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Status"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/ListWorkers",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "765af2593e65e310",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "fea7d265a9b437c2",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.3551397Z",
+  "EndTime": "2024-01-12T14:57:40.372322277Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "ListWorkers"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/ListWorkers",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "8ebd38cfd2dae311",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "e1841a2a45f6de1a",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.382862076Z",
+  "EndTime": "2024-01-12T14:57:40.397843209Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "ListWorkers"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "92290c7354b75ead",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "a4ee96f4ccd426ec",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.400135462Z",
+  "EndTime": "2024-01-12T14:57:40.400280345Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Ping"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "cache request: [internal] load build definition from Dockerfile",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "20676e8261684f34",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:40.401994836Z",
+  "EndTime": "2024-01-12T14:57:40.402041603Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:7bef658bc90b7db608c3df27d11b99f5785a6a6d1b5fe94ff63c69ae5093851c"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.FileSync/DiffCopy",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "70f459c5d3c2c488",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "6f6b0f30f558c4ae",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.408431648Z",
+  "EndTime": "2024-01-12T14:57:40.419083176Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.FileSync"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "DiffCopy"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[internal] load build definition from Dockerfile",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "6f6b0f30f558c4ae",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:40.402159874Z",
+  "EndTime": "2024-01-12T14:57:40.433614573Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:7bef658bc90b7db608c3df27d11b99f5785a6a6d1b5fe94ff63c69ae5093851c"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 1,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f3a80839f6b95729",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.444500758Z",
+  "EndTime": "2024-01-12T14:57:40.524117405Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "VerifyTokenAuthority"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/FetchToken",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f26320c4e0b697ac",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.524541768Z",
+  "EndTime": "2024-01-12T14:57:40.901439933Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "FetchToken"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "remotes.docker.resolver.HTTPRequest",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "0e8cc109bf2f9cfa",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.901513761Z",
+  "EndTime": "2024-01-12T14:57:41.271655436Z",
+  "Attributes": [
+    {
+      "Key": "http.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "HEAD"
+      }
+    },
+    {
+      "Key": "http.flavor",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.1"
+      }
+    },
+    {
+      "Key": "http.url",
+      "Value": {
+        "Type": "STRING",
+        "Value": "https://registry-1.docker.io/v2/docker/dockerfile/manifests/1.5"
+      }
+    },
+    {
+      "Key": "net.peer.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.user_agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 200
+      }
+    },
+    {
+      "Key": "http.response_content_length",
+      "Value": {
+        "Type": "INT64",
+        "Value": 8404
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 1,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "HTTP HEAD",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1f5f1e324025582",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "0e8cc109bf2f9cfa",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.901537045Z",
+  "EndTime": "2024-01-12T14:57:41.271672779Z",
+  "Attributes": [
+    {
+      "Key": "http.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "HEAD"
+      }
+    },
+    {
+      "Key": "http.flavor",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.1"
+      }
+    },
+    {
+      "Key": "http.url",
+      "Value": {
+        "Type": "STRING",
+        "Value": "https://registry-1.docker.io/v2/docker/dockerfile/manifests/1.5"
+      }
+    },
+    {
+      "Key": "net.peer.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.user_agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.request.header.host",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.request.header.user-agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.request.header.accept",
+      "Value": {
+        "Type": "STRING",
+        "Value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+      }
+    },
+    {
+      "Key": "http.request.header.authorization",
+      "Value": {
+        "Type": "STRING",
+        "Value": "****"
+      }
+    },
+    {
+      "Key": "http.request.header.traceparent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "00-0555ce1903feb85770d102846273073b-f1f5f1e324025582-01"
+      }
+    },
+    {
+      "Key": "http.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 200
+      }
+    },
+    {
+      "Key": "http.response_content_length",
+      "Value": {
+        "Type": "INT64",
+        "Value": 8404
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "http.getconn.start",
+      "Attributes": [
+        {
+          "Key": "net.host.name",
+          "Value": {
+            "Type": "STRING",
+            "Value": "http.docker.internal:3128"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.901613958Z"
+    },
+    {
+      "Name": "http.dns.start",
+      "Attributes": [
+        {
+          "Key": "net.host.name",
+          "Value": {
+            "Type": "STRING",
+            "Value": "http.docker.internal"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.901691854Z"
+    },
+    {
+      "Name": "http.dns.done",
+      "Attributes": [
+        {
+          "Key": "http.dns.addrs",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.903005838Z"
+    },
+    {
+      "Name": "http.connect.192.168.65.1:3128.start",
+      "Attributes": [
+        {
+          "Key": "http.remote",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.conn.start.network",
+          "Value": {
+            "Type": "STRING",
+            "Value": "tcp"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.903010567Z"
+    },
+    {
+      "Name": "http.connect.192.168.65.1:3128.done",
+      "Attributes": [
+        {
+          "Key": "http.conn.done.addr",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.conn.done.network",
+          "Value": {
+            "Type": "STRING",
+            "Value": "tcp"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.903518707Z"
+    },
+    {
+      "Name": "http.tls.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.935298552Z"
+    },
+    {
+      "Name": "http.tls.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.125870978Z"
+    },
+    {
+      "Name": "http.getconn.done",
+      "Attributes": [
+        {
+          "Key": "http.remote",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.local",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.3:41236"
+          }
+        },
+        {
+          "Key": "http.conn.reused",
+          "Value": {
+            "Type": "BOOL",
+            "Value": false
+          }
+        },
+        {
+          "Key": "http.conn.wasidle",
+          "Value": {
+            "Type": "BOOL",
+            "Value": false
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.12590922Z"
+    },
+    {
+      "Name": "http.send.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.125942271Z"
+    },
+    {
+      "Name": "http.send.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.125943253Z"
+    },
+    {
+      "Name": "http.receive.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.271591096Z"
+    },
+    {
+      "Name": "http.receive.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.271624759Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "cache request: docker-image://docker.io/docker/dockerfile:1.5@sha256:39b85bbfa7536a5feceb7372a0817649ecb2724562a38360f4d6a7782a409b14",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "6e0d97923598173a",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:41.298320807Z",
+  "EndTime": "2024-01-12T14:57:41.298458013Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:79dd01740e708982847bb0010c8505e266b4f72ed0ffa354f38e205a15ec3b00"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "load cache: docker-image://docker.io/docker/dockerfile:1.5@sha256:39b85bbfa7536a5feceb7372a0817649ecb2724562a38360f4d6a7782a409b14",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "8b3cf8274861214e",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:41.298789349Z",
+  "EndTime": "2024-01-12T14:57:41.298824265Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:79dd01740e708982847bb0010c8505e266b4f72ed0ffa354f38e205a15ec3b00"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "cache request: [internal] load .dockerignore",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "8303f71942c86c2d",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:41.450353579Z",
+  "EndTime": "2024-01-12T14:57:41.450380529Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:b9cb39feb3f4ca5b899481fca81551c8eeb5496ccc8b8134b2bc80786efdb313"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.FileSync/DiffCopy",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "50509a5bad270153",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "902f8408a5d9d83c",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.455977674Z",
+  "EndTime": "2024-01-12T14:57:41.464570167Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.FileSync"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "DiffCopy"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[internal] load .dockerignore",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "902f8408a5d9d83c",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:41.450444639Z",
+  "EndTime": "2024-01-12T14:57:41.480068437Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:b9cb39feb3f4ca5b899481fca81551c8eeb5496ccc8b8134b2bc80786efdb313"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 1,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "6b3f3a9d50fbe98b",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.493477345Z",
+  "EndTime": "2024-01-12T14:57:41.494484637Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "VerifyTokenAuthority"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f857d0002991458c",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.497835726Z",
+  "EndTime": "2024-01-12T14:57:41.498546724Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "VerifyTokenAuthority"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/FetchToken",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "912de85fdfd20fe7",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.498707645Z",
+  "EndTime": "2024-01-12T14:57:41.842024623Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "FetchToken"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.Auth/FetchToken",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d9ed48c061e2ecfd",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.494633044Z",
+  "EndTime": "2024-01-12T14:57:41.844336811Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.Auth"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "FetchToken"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "remotes.docker.resolver.HTTPRequest",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "cf5aa3b6eba7323c",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.844389359Z",
+  "EndTime": "2024-01-12T14:57:42.201524037Z",
+  "Attributes": [
+    {
+      "Key": "http.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "HEAD"
+      }
+    },
+    {
+      "Key": "http.flavor",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.1"
+      }
+    },
+    {
+      "Key": "http.url",
+      "Value": {
+        "Type": "STRING",
+        "Value": "https://registry-1.docker.io/v2/tonistiigi/bats-assert/manifests/latest"
+      }
+    },
+    {
+      "Key": "net.peer.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.user_agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 200
+      }
+    },
+    {
+      "Key": "http.response_content_length",
+      "Value": {
+        "Type": "INT64",
+        "Value": 6942
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 1,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "HTTP HEAD",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "5b70cf0df5f0a9bc",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "cf5aa3b6eba7323c",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.844404587Z",
+  "EndTime": "2024-01-12T14:57:42.20154152Z",
+  "Attributes": [
+    {
+      "Key": "http.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "HEAD"
+      }
+    },
+    {
+      "Key": "http.flavor",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.1"
+      }
+    },
+    {
+      "Key": "http.url",
+      "Value": {
+        "Type": "STRING",
+        "Value": "https://registry-1.docker.io/v2/tonistiigi/bats-assert/manifests/latest"
+      }
+    },
+    {
+      "Key": "net.peer.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.user_agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.request.header.host",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.request.header.user-agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.request.header.accept",
+      "Value": {
+        "Type": "STRING",
+        "Value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+      }
+    },
+    {
+      "Key": "http.request.header.authorization",
+      "Value": {
+        "Type": "STRING",
+        "Value": "****"
+      }
+    },
+    {
+      "Key": "http.request.header.traceparent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "00-0555ce1903feb85770d102846273073b-5b70cf0df5f0a9bc-01"
+      }
+    },
+    {
+      "Key": "http.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 200
+      }
+    },
+    {
+      "Key": "http.response_content_length",
+      "Value": {
+        "Type": "INT64",
+        "Value": 6942
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "http.getconn.start",
+      "Attributes": [
+        {
+          "Key": "net.host.name",
+          "Value": {
+            "Type": "STRING",
+            "Value": "http.docker.internal:3128"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.844420377Z"
+    },
+    {
+      "Name": "http.dns.start",
+      "Attributes": [
+        {
+          "Key": "net.host.name",
+          "Value": {
+            "Type": "STRING",
+            "Value": "http.docker.internal"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.844454851Z"
+    },
+    {
+      "Name": "http.dns.done",
+      "Attributes": [
+        {
+          "Key": "http.dns.addrs",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.845276957Z"
+    },
+    {
+      "Name": "http.connect.192.168.65.1:3128.start",
+      "Attributes": [
+        {
+          "Key": "http.remote",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.conn.start.network",
+          "Value": {
+            "Type": "STRING",
+            "Value": "tcp"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.845280875Z"
+    },
+    {
+      "Name": "http.connect.192.168.65.1:3128.done",
+      "Attributes": [
+        {
+          "Key": "http.conn.done.addr",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.conn.done.network",
+          "Value": {
+            "Type": "STRING",
+            "Value": "tcp"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.845736215Z"
+    },
+    {
+      "Name": "http.tls.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.863139846Z"
+    },
+    {
+      "Name": "http.tls.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.052708645Z"
+    },
+    {
+      "Name": "http.getconn.done",
+      "Attributes": [
+        {
+          "Key": "http.remote",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.local",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.3:41262"
+          }
+        },
+        {
+          "Key": "http.conn.reused",
+          "Value": {
+            "Type": "BOOL",
+            "Value": false
+          }
+        },
+        {
+          "Key": "http.conn.wasidle",
+          "Value": {
+            "Type": "BOOL",
+            "Value": false
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.052739352Z"
+    },
+    {
+      "Name": "http.send.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.052770821Z"
+    },
+    {
+      "Name": "http.send.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.052771582Z"
+    },
+    {
+      "Name": "http.receive.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.201435171Z"
+    },
+    {
+      "Name": "http.receive.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.201485435Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "remotes.docker.resolver.HTTPRequest",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "323fa81aa6a2dea5",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.842101356Z",
+  "EndTime": "2024-01-12T14:57:42.208552526Z",
+  "Attributes": [
+    {
+      "Key": "http.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "HEAD"
+      }
+    },
+    {
+      "Key": "http.flavor",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.1"
+      }
+    },
+    {
+      "Key": "http.url",
+      "Value": {
+        "Type": "STRING",
+        "Value": "https://registry-1.docker.io/v2/library/alpine/manifests/latest"
+      }
+    },
+    {
+      "Key": "net.peer.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.user_agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 200
+      }
+    },
+    {
+      "Key": "http.response_content_length",
+      "Value": {
+        "Type": "INT64",
+        "Value": 1638
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 1,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "HTTP HEAD",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "990f9e6b761681f6",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "323fa81aa6a2dea5",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:41.842125231Z",
+  "EndTime": "2024-01-12T14:57:42.208564549Z",
+  "Attributes": [
+    {
+      "Key": "http.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "HEAD"
+      }
+    },
+    {
+      "Key": "http.flavor",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.1"
+      }
+    },
+    {
+      "Key": "http.url",
+      "Value": {
+        "Type": "STRING",
+        "Value": "https://registry-1.docker.io/v2/library/alpine/manifests/latest"
+      }
+    },
+    {
+      "Key": "net.peer.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.user_agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.request.header.host",
+      "Value": {
+        "Type": "STRING",
+        "Value": "registry-1.docker.io"
+      }
+    },
+    {
+      "Key": "http.request.header.user-agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildkit/v0.12-dev"
+      }
+    },
+    {
+      "Key": "http.request.header.accept",
+      "Value": {
+        "Type": "STRING",
+        "Value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+      }
+    },
+    {
+      "Key": "http.request.header.authorization",
+      "Value": {
+        "Type": "STRING",
+        "Value": "****"
+      }
+    },
+    {
+      "Key": "http.request.header.traceparent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "00-0555ce1903feb85770d102846273073b-990f9e6b761681f6-01"
+      }
+    },
+    {
+      "Key": "http.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 200
+      }
+    },
+    {
+      "Key": "http.response_content_length",
+      "Value": {
+        "Type": "INT64",
+        "Value": 1638
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "http.getconn.start",
+      "Attributes": [
+        {
+          "Key": "net.host.name",
+          "Value": {
+            "Type": "STRING",
+            "Value": "http.docker.internal:3128"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.842152702Z"
+    },
+    {
+      "Name": "http.dns.start",
+      "Attributes": [
+        {
+          "Key": "net.host.name",
+          "Value": {
+            "Type": "STRING",
+            "Value": "http.docker.internal"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.842195061Z"
+    },
+    {
+      "Name": "http.dns.done",
+      "Attributes": [
+        {
+          "Key": "http.dns.addrs",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.843327715Z"
+    },
+    {
+      "Name": "http.connect.192.168.65.1:3128.start",
+      "Attributes": [
+        {
+          "Key": "http.remote",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.conn.start.network",
+          "Value": {
+            "Type": "STRING",
+            "Value": "tcp"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.843335069Z"
+    },
+    {
+      "Name": "http.connect.192.168.65.1:3128.done",
+      "Attributes": [
+        {
+          "Key": "http.conn.done.addr",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.conn.done.network",
+          "Value": {
+            "Type": "STRING",
+            "Value": "tcp"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.843911496Z"
+    },
+    {
+      "Name": "http.tls.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.862155336Z"
+    },
+    {
+      "Name": "http.tls.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.05636278Z"
+    },
+    {
+      "Name": "http.getconn.done",
+      "Attributes": [
+        {
+          "Key": "http.remote",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.local",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.3:41248"
+          }
+        },
+        {
+          "Key": "http.conn.reused",
+          "Value": {
+            "Type": "BOOL",
+            "Value": false
+          }
+        },
+        {
+          "Key": "http.conn.wasidle",
+          "Value": {
+            "Type": "BOOL",
+            "Value": false
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.056400801Z"
+    },
+    {
+      "Name": "http.send.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.056421069Z"
+    },
+    {
+      "Name": "http.send.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.056421771Z"
+    },
+    {
+      "Name": "http.receive.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.2084957Z"
+    },
+    {
+      "Name": "http.receive.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.208525526Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Solve",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "14078881261b5312",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "4548113210977542",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.401260424Z",
+  "EndTime": "2024-01-12T14:57:42.284524255Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Solve"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "Container created",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.337936172Z"
+    },
+    {
+      "Name": "Container started",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.338262401Z"
+    },
+    {
+      "Name": "Container exited",
+      "Attributes": [
+        {
+          "Key": "exit.code",
+          "Value": {
+            "Type": "INT64",
+            "Value": 0
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.255546744Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 9,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.frontend.LLBBridge/Return",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "424053ee88cf9600",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "76cae146240f0505",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:42.286310429Z",
+  "EndTime": "2024-01-12T14:57:42.286347388Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.frontend.LLBBridge"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Return"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "cache request: [build 1/3] FROM docker.io/library/alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "121feef5c8cc97ad",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.287014966Z",
+  "EndTime": "2024-01-12T14:57:42.28724194Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:68ce956d6bef78a30f5452b0c12c5f918cd9e67c18ebe8b864b0a483dc147258"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "cache request: [internal] load build context",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d5db73e91af3df92",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.287008985Z",
+  "EndTime": "2024-01-12T14:57:42.287274511Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:d6e40c1360ca7a3794673b2c27130b8a0cc88712faa287cb5f0060b14f025381"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "cache request: [bats-assert 1/1] FROM docker.io/tonistiigi/bats-assert@sha256:813f357fb86180c44bb6aaf155ff06573a630b3b2e0115405b0cb65116319551",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "efc51d00622f3291",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.287053417Z",
+  "EndTime": "2024-01-12T14:57:42.287283046Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:f34255b862e83e8a0427c87cf1e440bede7c30190733b1691b1abe35010fc318"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.filesync.v1.FileSync/DiffCopy",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "0f4681a3ad4d29f8",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "4e95c6912df2c60b",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:42.293077322Z",
+  "EndTime": "2024-01-12T14:57:42.369507285Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.filesync.v1.FileSync"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "DiffCopy"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[internal] load build context",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "4e95c6912df2c60b",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.287748305Z",
+  "EndTime": "2024-01-12T14:57:42.385320735Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:d6e40c1360ca7a3794673b2c27130b8a0cc88712faa287cb5f0060b14f025381"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 1,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "load cache: [build 1/3] FROM docker.io/library/alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "24dd74fb33034fd8",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.390533062Z",
+  "EndTime": "2024-01-12T14:57:42.390565823Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:68ce956d6bef78a30f5452b0c12c5f918cd9e67c18ebe8b864b0a483dc147258"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[build 2/3] COPY xx-* /out/",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "081ae7125ffd36a1",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.390820868Z",
+  "EndTime": "2024-01-12T14:57:42.455481346Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:196b331662aa0b768bce34341a2a913d12a61d790455623be2b10d713abbac56"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "HTTP HEAD",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "3d01dbef1293e6cd",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:42.287286814Z",
+  "EndTime": "2024-01-12T14:57:42.503085452Z",
+  "Attributes": [
+    {
+      "Key": "http.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "HEAD"
+      }
+    },
+    {
+      "Key": "http.flavor",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.1"
+      }
+    },
+    {
+      "Key": "http.url",
+      "Value": {
+        "Type": "STRING",
+        "Value": "https://raw.githubusercontent.com/fsaintjacques/semver-tool/3.4.0/src/semver"
+      }
+    },
+    {
+      "Key": "net.peer.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "raw.githubusercontent.com"
+      }
+    },
+    {
+      "Key": "http.request.header.:authority",
+      "Value": {
+        "Type": "STRING",
+        "Value": "raw.githubusercontent.com"
+      }
+    },
+    {
+      "Key": "http.request.header.:method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "HEAD"
+      }
+    },
+    {
+      "Key": "http.request.header.:path",
+      "Value": {
+        "Type": "STRING",
+        "Value": "/fsaintjacques/semver-tool/3.4.0/src/semver"
+      }
+    },
+    {
+      "Key": "http.request.header.:scheme",
+      "Value": {
+        "Type": "STRING",
+        "Value": "https"
+      }
+    },
+    {
+      "Key": "http.request.header.if-none-match",
+      "Value": {
+        "Type": "STRING",
+        "Value": "\"e8135dc02beea5325dd7607b2505971fba2f9d3bf7f0e07c47db570096ee9e4b\""
+      }
+    },
+    {
+      "Key": "http.request.header.accept-encoding",
+      "Value": {
+        "Type": "STRING",
+        "Value": "gzip"
+      }
+    },
+    {
+      "Key": "http.request.header.traceparent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "00-0555ce1903feb85770d102846273073b-3d01dbef1293e6cd-01"
+      }
+    },
+    {
+      "Key": "http.request.header.user-agent",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Go-http-client/2.0"
+      }
+    },
+    {
+      "Key": "http.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 304
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "http.getconn.start",
+      "Attributes": [
+        {
+          "Key": "net.host.name",
+          "Value": {
+            "Type": "STRING",
+            "Value": "http.docker.internal:3128"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.287345854Z"
+    },
+    {
+      "Name": "http.dns.start",
+      "Attributes": [
+        {
+          "Key": "net.host.name",
+          "Value": {
+            "Type": "STRING",
+            "Value": "http.docker.internal"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.287384847Z"
+    },
+    {
+      "Name": "http.dns.done",
+      "Attributes": [
+        {
+          "Key": "http.dns.addrs",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.288702709Z"
+    },
+    {
+      "Name": "http.connect.192.168.65.1:3128.start",
+      "Attributes": [
+        {
+          "Key": "http.remote",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.conn.start.network",
+          "Value": {
+            "Type": "STRING",
+            "Value": "tcp"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.288707688Z"
+    },
+    {
+      "Name": "http.connect.192.168.65.1:3128.done",
+      "Attributes": [
+        {
+          "Key": "http.conn.done.addr",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.conn.done.network",
+          "Value": {
+            "Type": "STRING",
+            "Value": "tcp"
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.289330781Z"
+    },
+    {
+      "Name": "http.tls.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.308357023Z"
+    },
+    {
+      "Name": "http.tls.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.330407545Z"
+    },
+    {
+      "Name": "http.getconn.done",
+      "Attributes": [
+        {
+          "Key": "http.remote",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.1:3128"
+          }
+        },
+        {
+          "Key": "http.local",
+          "Value": {
+            "Type": "STRING",
+            "Value": "192.168.65.3:41272"
+          }
+        },
+        {
+          "Key": "http.conn.reused",
+          "Value": {
+            "Type": "BOOL",
+            "Value": false
+          }
+        },
+        {
+          "Key": "http.conn.wasidle",
+          "Value": {
+            "Type": "BOOL",
+            "Value": false
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.330729617Z"
+    },
+    {
+      "Name": "http.send.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.330859459Z"
+    },
+    {
+      "Name": "http.send.done",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.330860942Z"
+    },
+    {
+      "Name": "http.receive.start",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.503006526Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "cache request: https://raw.githubusercontent.com/fsaintjacques/semver-tool/3.4.0/src/semver",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "89596877844426c7",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.287136233Z",
+  "EndTime": "2024-01-12T14:57:42.50314312Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:2de254c1cdd10bfd735b05df0ddbcc58b6c96bbeddacb02662082e5863c7dfaa"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[build 3/3] RUN ln -s xx-cc /out/xx-clang \u0026\u0026     ln -s xx-cc /out/xx-clang++ \u0026\u0026     ln -s xx-cc /out/xx-c++ \u0026\u0026     ln -s xx-apt /out/xx-apt-get",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "12e17e16cfa335bf",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.460817845Z",
+  "EndTime": "2024-01-12T14:57:42.907704321Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:a1eea8cd46dfa6d71e21811e74e153079f41846f01247e9ab5577997e361207d"
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "ExecOp started",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.460835478Z"
+    },
+    {
+      "Name": "Container created",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.553257485Z"
+    },
+    {
+      "Name": "Container started",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.553798935Z"
+    },
+    {
+      "Name": "Container exited",
+      "Attributes": [
+        {
+          "Key": "exit.code",
+          "Value": {
+            "Type": "INT64",
+            "Value": 0
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.877373704Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[xx 1/1] COPY --from=build /out/ /usr/bin/",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "619e3ab187179836",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:42.95885997Z",
+  "EndTime": "2024-01-12T14:57:43.011879613Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:abe6c4b1cb4b514609ab3e38e605946b9dabe300bacf05d0356dcea2cf038b48"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "load cache: [test-base 2/3] COPY --from=bats-assert . .",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "8ddee09af707d9fd",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:43.018474041Z",
+  "EndTime": "2024-01-12T14:57:43.018525547Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:6ff2f44640655dbef55c51757a728a4a8661a76b9368399ed22ae8169fda81fe"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[test-base 3/3] COPY --from=xx / /",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "62eddbfa59fdb4c3",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:43.018613201Z",
+  "EndTime": "2024-01-12T14:57:43.089313591Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:3c9083d5f8616feedd45b59d401c86f2737b196284b79fd02e5d84db757460ba"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[test-base-fixtures 1/1] COPY fixtures fixtures",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "07426553e9705e76",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:43.094591201Z",
+  "EndTime": "2024-01-12T14:57:43.182925672Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:346d54a7e04fefc7af945eebe8a6cd97677ee60bf676dc8c88c243d28922f32b"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[test-go 1/2] COPY test-go.bats test_helper.bash ./",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "1115d9d92e0eaeb8",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:43.187809656Z",
+  "EndTime": "2024-01-12T14:57:43.289237315Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:fdfb54beb471875a9c87aee2905e97b00d1e5ce7b1e280006bb05ac0e6c5e9ca"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/Status",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "52ac568c2da201b8",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.39969566Z",
+  "EndTime": "2024-01-12T14:59:09.25901601Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Status"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.399750984Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.402465886Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 2
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.40249445Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 3
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.402504999Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 4
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.40883372Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 5
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.415916471Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 6
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.419297628Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 7
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.434004641Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 8
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.440396651Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 9
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.29852984Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 10
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.298633422Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 11
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.29874541Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 12
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.298765538Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 13
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.299220671Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 14
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.450692733Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 15
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.450718571Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 16
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.450875766Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 17
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.456368595Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 18
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.464429855Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 19
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.464736406Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 20
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.480645857Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 21
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:41.489618728Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 22
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.247192558Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 23
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.251291625Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 24
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.287177131Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 25
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.287291432Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 26
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.287552016Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 27
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.288074771Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 28
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.293598414Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 29
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.311730795Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 30
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.369784713Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 31
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.385847739Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 32
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.390942548Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 33
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.391212991Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 34
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.456018882Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 35
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.461238922Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 36
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.503713777Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 37
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.908249129Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 38
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:42.959403226Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 39
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.012411075Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 40
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.019002879Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 41
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.019028387Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 42
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.08985268Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 43
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.094980027Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 44
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.183415678Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 45
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.188233607Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 46
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.28977037Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 47
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.297209719Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 48
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.738226971Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 49
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:51.968009255Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 50
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:52.274741317Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 51
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:52.366039619Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 52
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:52.46341939Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 53
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:52.564803533Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 54
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:52.665774796Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 55
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:52.980197331Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 56
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:53.291337312Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 57
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:53.586146639Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 58
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:53.855079758Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 59
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:53.939470899Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 60
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:55.320568151Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 61
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:56.655115777Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 62
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:56.914980485Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 63
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:57.186993379Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 64
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:57.270160346Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 65
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:57.550595907Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 66
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:57.636707033Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 67
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:57.910932291Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 68
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:57.996460488Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 69
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:58.273025208Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 70
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:58.357401192Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 71
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:01.127991199Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 72
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:01.658083457Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 73
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:02.656773361Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 74
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:02.786421303Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 75
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:05.354449314Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 76
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:07.858012601Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 77
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:10.082719475Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 78
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:12.291732174Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 79
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:14.601733662Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 80
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:17.071072788Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 81
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:19.619476413Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 82
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:21.927248622Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 83
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:24.270083074Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 84
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:26.55550237Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 85
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:28.933118776Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 86
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:31.230064451Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 87
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:33.83601258Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 88
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:35.658402552Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 89
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:36.076400316Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 90
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:41.083074542Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 91
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:45.608812981Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 92
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:51.177282893Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 93
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:58:56.545055422Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 94
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:01.977370353Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 95
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:05.368902631Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 96
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:05.433713443Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 97
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:09.127357014Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 98
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:09.127620501Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 99
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:09.253104626Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/Solve",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "4e11b66472979a18",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:57:40.399715629Z",
+  "EndTime": "2024-01-12T14:59:09.343026371Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Solve"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:40.399725737Z"
+    },
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "RECEIVED"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:09.342194877Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/ListenBuildHistory",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "65de56cee7833b9c",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 3,
+  "StartTime": "2024-01-12T14:59:09.342399118Z",
+  "EndTime": "2024-01-12T14:59:09.343277766Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "ListenBuildHistory"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "message",
+      "Attributes": [
+        {
+          "Key": "message.type",
+          "Value": {
+            "Type": "STRING",
+            "Value": "SENT"
+          }
+        },
+        {
+          "Key": "message.id",
+          "Value": {
+            "Type": "INT64",
+            "Value": 1
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:09.342530045Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "semver:0.40.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "bake",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "d30eea87f2f0241d",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "00000000000000000000000000000000",
+    "SpanID": "0000000000000000",
+    "TraceFlags": "00",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:39.717700035Z",
+  "EndTime": "2024-01-12T14:59:09.3441603Z",
+  "Attributes": [
+    {
+      "Key": "command",
+      "Value": {
+        "Type": "STRING",
+        "Value": "/usr/local/lib/docker/cli-plugins/docker-buildx buildx bake --set *.args.TEST_BASE_TYPE=alpine test-go"
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 2,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "buildx"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.14.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "[test-go 2/2] RUN --mount=type=cache,target=/pkg-cache,sharing=locked --mount=type=cache,target=/root/.cache ./test-go.bats",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "3bc0c8dfc74903a6",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "SpanKind": 1,
+  "StartTime": "2024-01-12T14:57:43.296744167Z",
+  "EndTime": "2024-01-12T14:59:09.253431509Z",
+  "Attributes": [
+    {
+      "Key": "vertex",
+      "Value": {
+        "Type": "STRING",
+        "Value": "sha256:0b3c6531279269810071b85e6f8ffba4ae61f6573d281bd79e63c6456daca816"
+      }
+    }
+  ],
+  "Events": [
+    {
+      "Name": "ExecOp started",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.296759245Z"
+    },
+    {
+      "Name": "Container created",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.396638353Z"
+    },
+    {
+      "Name": "Container started",
+      "Attributes": null,
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:57:43.397050883Z"
+    },
+    {
+      "Name": "Container exited",
+      "Attributes": [
+        {
+          "Key": "exit.code",
+          "Value": {
+            "Type": "INT64",
+            "Value": 0
+          }
+        }
+      ],
+      "DroppedAttributeCount": 0,
+      "Time": "2024-01-12T14:59:09.134015538Z"
+    }
+  ],
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/otel/sdk/tracer",
+    "Version": "",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/Status",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "b6791e17d0cf1c4e",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "52ac568c2da201b8",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.400083576Z",
+  "EndTime": "2024-01-12T14:59:09.258626543Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Status"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/Solve",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "f1bb978313184255",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "4e11b66472979a18",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:57:40.40011856Z",
+  "EndTime": "2024-01-12T14:59:09.342576396Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "Solve"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
+{
+  "Name": "moby.buildkit.v1.Control/ListenBuildHistory",
+  "SpanContext": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "9fbd0e803dc50ab6",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": false
+  },
+  "Parent": {
+    "TraceID": "0555ce1903feb85770d102846273073b",
+    "SpanID": "65de56cee7833b9c",
+    "TraceFlags": "01",
+    "TraceState": "",
+    "Remote": true
+  },
+  "SpanKind": 2,
+  "StartTime": "2024-01-12T14:59:09.342907117Z",
+  "EndTime": "2024-01-12T14:59:09.342945573Z",
+  "Attributes": [
+    {
+      "Key": "rpc.system",
+      "Value": {
+        "Type": "STRING",
+        "Value": "grpc"
+      }
+    },
+    {
+      "Key": "rpc.service",
+      "Value": {
+        "Type": "STRING",
+        "Value": "moby.buildkit.v1.Control"
+      }
+    },
+    {
+      "Key": "rpc.method",
+      "Value": {
+        "Type": "STRING",
+        "Value": "ListenBuildHistory"
+      }
+    },
+    {
+      "Key": "rpc.grpc.status_code",
+      "Value": {
+        "Type": "INT64",
+        "Value": 0
+      }
+    }
+  ],
+  "Events": null,
+  "Links": null,
+  "Status": {
+    "Code": 0,
+    "Description": ""
+  },
+  "DroppedAttributes": 0,
+  "DroppedEvents": 0,
+  "DroppedLinks": 0,
+  "ChildSpanCount": 0,
+  "Resource": [
+    {
+      "Key": "service.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "dockerd"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.language",
+      "Value": {
+        "Type": "STRING",
+        "Value": "go"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.name",
+      "Value": {
+        "Type": "STRING",
+        "Value": "opentelemetry"
+      }
+    },
+    {
+      "Key": "telemetry.sdk.version",
+      "Value": {
+        "Type": "STRING",
+        "Value": "1.19.0"
+      }
+    }
+  ],
+  "InstrumentationLibrary": {
+    "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+    "Version": "0.45.0",
+    "SchemaURL": ""
+  }
+}
diff -pruN 0.19.3+ds1-4/util/otelutil/fixtures/jaeger.json 0.21.3-0ubuntu1/util/otelutil/fixtures/jaeger.json
--- 0.19.3+ds1-4/util/otelutil/fixtures/jaeger.json	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/fixtures/jaeger.json	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,9542 @@
+{
+  "data": [
+    {
+      "traceID": "0555ce1903feb85770d102846273073b",
+      "spans": [
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "f5f33ab2ca194d1b",
+          "operationName": "moby.buildkit.v1.Control/ListWorkers",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "c1b335eeeaf56405"
+            }
+          ],
+          "startTime": 1705071460147265,
+          "duration": 17263,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "ListWorkers"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460147281,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460164526,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p0",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "1f64b377d3d03600",
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "c1b335eeeaf56405"
+            }
+          ],
+          "startTime": 1705071460165362,
+          "duration": 991,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Ping"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460165372,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460166353,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p1",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "edb147a8e37c0b62",
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Return",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "c1b335eeeaf56405"
+            }
+          ],
+          "startTime": 1705071460166393,
+          "duration": 800,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Return"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460166401,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460167193,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p2",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "be9c91e5fc1a8511",
+          "operationName": "moby.buildkit.v1.Control/Solve",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "c1b335eeeaf56405"
+            }
+          ],
+          "startTime": 1705071460165347,
+          "duration": 1994,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Solve"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460165362,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460167341,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p3",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "07376377d689755f",
+          "operationName": "moby.buildkit.v1.Control/Status",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "c1b335eeeaf56405"
+            }
+          ],
+          "startTime": 1705071460165348,
+          "duration": 2003,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Status"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460165472,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p4",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "fea7d265a9b437c2",
+          "operationName": "moby.buildkit.v1.Control/ListWorkers",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071460350714,
+          "duration": 22218,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "ListWorkers"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460350738,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460372930,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p5",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "e1841a2a45f6de1a",
+          "operationName": "moby.buildkit.v1.Control/ListWorkers",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071460382091,
+          "duration": 16275,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "ListWorkers"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460382111,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460398362,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p6",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "a4ee96f4ccd426ec",
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071460399784,
+          "duration": 818,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Ping"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460399797,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460400601,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p7",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "a6070511de38c86a",
+          "operationName": "moby.filesync.v1.FileSync/DiffCopy",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "70f459c5d3c2c488"
+            }
+          ],
+          "startTime": 1705071460409090,
+          "duration": 9580,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.FileSync"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "DiffCopy"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460415069,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460416971,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460417874,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 2
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460417984,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 3
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460418118,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 4
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460418629,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 2
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460418637,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 5
+                }
+              ]
+            }
+          ],
+          "processID": "p8",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "f9e99c0194575ce4",
+          "operationName": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f3a80839f6b95729"
+            }
+          ],
+          "startTime": 1705071460445118,
+          "duration": 78249,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "VerifyTokenAuthority"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460445122,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460523361,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p9",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "78a4e25bc2d4278d",
+          "operationName": "moby.filesync.v1.Auth/FetchToken",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f26320c4e0b697ac"
+            }
+          ],
+          "startTime": 1705071460525027,
+          "duration": 375884,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "FetchToken"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460525030,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460900908,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p10",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "03f88200c8224808",
+          "operationName": "moby.filesync.v1.FileSync/DiffCopy",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "50509a5bad270153"
+            }
+          ],
+          "startTime": 1705071461456522,
+          "duration": 7817,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.FileSync"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "DiffCopy"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071461463714,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461464300,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461464304,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 2
+                }
+              ]
+            }
+          ],
+          "processID": "p11",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "6d33d51dcc276bdb",
+          "operationName": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "6b3f3a9d50fbe98b"
+            }
+          ],
+          "startTime": 1705071461494006,
+          "duration": 224,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "VerifyTokenAuthority"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071461494012,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461494229,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p12",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "76b7ceb8af988ea9",
+          "operationName": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f857d0002991458c"
+            }
+          ],
+          "startTime": 1705071461498132,
+          "duration": 181,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "VerifyTokenAuthority"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071461498134,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461498312,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p13",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "cc230bba43af76bf",
+          "operationName": "moby.filesync.v1.Auth/FetchToken",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "912de85fdfd20fe7"
+            }
+          ],
+          "startTime": 1705071461499095,
+          "duration": 342482,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "FetchToken"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071461499097,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461841573,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p14",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "e57242e59af94e80",
+          "operationName": "moby.filesync.v1.Auth/FetchToken",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d9ed48c061e2ecfd"
+            }
+          ],
+          "startTime": 1705071461494901,
+          "duration": 349073,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "FetchToken"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071461494904,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461843972,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p15",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "4548113210977542",
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Solve",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071460400661,
+          "duration": 1884905,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Solve"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460400666,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462285564,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p16",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "76cae146240f0505",
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Return",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071462285646,
+          "duration": 942,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Return"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071462285663,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462286587,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p17",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "a0d9ee53b31b1760",
+          "operationName": "moby.filesync.v1.FileSync/DiffCopy",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "0f4681a3ad4d29f8"
+            }
+          ],
+          "startTime": 1705071462293696,
+          "duration": 75573,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.FileSync"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "DiffCopy"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071462310893,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462316940,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 2
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462320055,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 3
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462322700,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 4
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462325137,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 5
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462329895,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 6
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462333228,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 7
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462339031,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 8
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462341442,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 9
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462350695,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 10
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462352515,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 11
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462354393,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 12
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462356241,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 13
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462358001,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 14
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462359717,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 15
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462361436,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 16
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462362075,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462363162,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 17
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462363331,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 18
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462363577,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 19
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462364196,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 2
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462365070,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 20
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462365153,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 21
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462365298,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 22
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462366953,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 23
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462368829,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 24
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462368865,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 25
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462369254,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 3
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462369257,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 26
+                }
+              ]
+            }
+          ],
+          "processID": "p18",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "c1b335eeeaf56405",
+          "operationName": "load buildkit capabilities",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071460147185,
+          "duration": 20792,
+          "tags": [
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": null,
+          "processID": "p19",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "014a492f789cd8df",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.Control/ListWorkers",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f5f33ab2ca194d1b"
+            }
+          ],
+          "startTime": 1705071460150333,
+          "duration": 13610,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "ListWorkers"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p20",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "348c867020d36163",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "1f64b377d3d03600"
+            }
+          ],
+          "startTime": 1705071460165969,
+          "duration": 74,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Ping"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p21",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "8b7d202043a4ce8e",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Return",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "edb147a8e37c0b62"
+            }
+          ],
+          "startTime": 1705071460166855,
+          "duration": 48,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Return"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p22",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "3514a2ca1fb59328",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.Control/Solve",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "be9c91e5fc1a8511"
+            }
+          ],
+          "startTime": 1705071460165882,
+          "duration": 1215,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Solve"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p23",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "5cd96a37a4409495",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.Control/Status",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "07376377d689755f"
+            }
+          ],
+          "startTime": 1705071460165806,
+          "duration": 1307,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Status"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p24",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "765af2593e65e310",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.Control/ListWorkers",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "fea7d265a9b437c2"
+            }
+          ],
+          "startTime": 1705071460355139,
+          "duration": 17182,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "ListWorkers"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p25",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "8ebd38cfd2dae311",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.Control/ListWorkers",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "e1841a2a45f6de1a"
+            }
+          ],
+          "startTime": 1705071460382862,
+          "duration": 14981,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "ListWorkers"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p26",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "92290c7354b75ead",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "a4ee96f4ccd426ec"
+            }
+          ],
+          "startTime": 1705071460400135,
+          "duration": 144,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Ping"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p27",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "20676e8261684f34",
+          "flags": 1,
+          "operationName": "cache request: [internal] load build definition from Dockerfile",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071460401994,
+          "duration": 46,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:7bef658bc90b7db608c3df27d11b99f5785a6a6d1b5fe94ff63c69ae5093851c"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p28",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "70f459c5d3c2c488",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.FileSync/DiffCopy",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "6f6b0f30f558c4ae"
+            }
+          ],
+          "startTime": 1705071460408431,
+          "duration": 10651,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.FileSync"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "DiffCopy"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p29",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "6f6b0f30f558c4ae",
+          "flags": 1,
+          "operationName": "[internal] load build definition from Dockerfile",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071460402159,
+          "duration": 31454,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:7bef658bc90b7db608c3df27d11b99f5785a6a6d1b5fe94ff63c69ae5093851c"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p30",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "f3a80839f6b95729",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071460444500,
+          "duration": 79616,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "VerifyTokenAuthority"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p31",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "f26320c4e0b697ac",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.Auth/FetchToken",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071460524541,
+          "duration": 376898,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "FetchToken"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p32",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "0e8cc109bf2f9cfa",
+          "flags": 1,
+          "operationName": "remotes.docker.resolver.HTTPRequest",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071460901513,
+          "duration": 370141,
+          "tags": [
+            {
+              "key": "http.method",
+              "type": "string",
+              "value": "HEAD"
+            },
+            {
+              "key": "http.flavor",
+              "type": "string",
+              "value": "1.1"
+            },
+            {
+              "key": "http.url",
+              "type": "string",
+              "value": "https://registry-1.docker.io/v2/docker/dockerfile/manifests/1.5"
+            },
+            {
+              "key": "net.peer.name",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.user_agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.status_code",
+              "type": "int64",
+              "value": 200
+            },
+            {
+              "key": "http.response_content_length",
+              "type": "int64",
+              "value": 8404
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p33",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "f1f5f1e324025582",
+          "flags": 1,
+          "operationName": "HTTP HEAD",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "0e8cc109bf2f9cfa"
+            }
+          ],
+          "startTime": 1705071460901537,
+          "duration": 370135,
+          "tags": [
+            {
+              "key": "http.method",
+              "type": "string",
+              "value": "HEAD"
+            },
+            {
+              "key": "http.flavor",
+              "type": "string",
+              "value": "1.1"
+            },
+            {
+              "key": "http.url",
+              "type": "string",
+              "value": "https://registry-1.docker.io/v2/docker/dockerfile/manifests/1.5"
+            },
+            {
+              "key": "net.peer.name",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.user_agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.request.header.host",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.request.header.user-agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.request.header.accept",
+              "type": "string",
+              "value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+            },
+            {
+              "key": "http.request.header.authorization",
+              "type": "string",
+              "value": "****"
+            },
+            {
+              "key": "http.request.header.traceparent",
+              "type": "string",
+              "value": "00-0555ce1903feb85770d102846273073b-f1f5f1e324025582-01"
+            },
+            {
+              "key": "http.status_code",
+              "type": "int64",
+              "value": 200
+            },
+            {
+              "key": "http.response_content_length",
+              "type": "int64",
+              "value": 8404
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460901613,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.getconn.start"
+                },
+                {
+                  "key": "net.host.name",
+                  "type": "string",
+                  "value": "http.docker.internal:3128"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460901691,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.dns.start"
+                },
+                {
+                  "key": "net.host.name",
+                  "type": "string",
+                  "value": "http.docker.internal"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460903005,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.dns.done"
+                },
+                {
+                  "key": "http.dns.addrs",
+                  "type": "string",
+                  "value": "192.168.65.1"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460903010,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.connect.192.168.65.1:3128.start"
+                },
+                {
+                  "key": "http.remote",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.conn.start.network",
+                  "type": "string",
+                  "value": "tcp"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460903518,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.connect.192.168.65.1:3128.done"
+                },
+                {
+                  "key": "http.conn.done.addr",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.conn.done.network",
+                  "type": "string",
+                  "value": "tcp"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460935298,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.tls.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461125870,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.tls.done"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461125909,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.getconn.done"
+                },
+                {
+                  "key": "http.remote",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.local",
+                  "type": "string",
+                  "value": "192.168.65.3:41236"
+                },
+                {
+                  "key": "http.conn.reused",
+                  "type": "bool",
+                  "value": false
+                },
+                {
+                  "key": "http.conn.wasidle",
+                  "type": "bool",
+                  "value": false
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461125942,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.send.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461125943,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.send.done"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461271591,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.receive.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461271624,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.receive.done"
+                }
+              ]
+            }
+          ],
+          "processID": "p34",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "6e0d97923598173a",
+          "flags": 1,
+          "operationName": "cache request: docker-image://docker.io/docker/dockerfile:1.5@sha256:39b85bbfa7536a5feceb7372a0817649ecb2724562a38360f4d6a7782a409b14",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461298320,
+          "duration": 137,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:79dd01740e708982847bb0010c8505e266b4f72ed0ffa354f38e205a15ec3b00"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p35",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "8b3cf8274861214e",
+          "flags": 1,
+          "operationName": "load cache: docker-image://docker.io/docker/dockerfile:1.5@sha256:39b85bbfa7536a5feceb7372a0817649ecb2724562a38360f4d6a7782a409b14",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461298789,
+          "duration": 34,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:79dd01740e708982847bb0010c8505e266b4f72ed0ffa354f38e205a15ec3b00"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p36",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "8303f71942c86c2d",
+          "flags": 1,
+          "operationName": "cache request: [internal] load .dockerignore",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461450353,
+          "duration": 26,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:b9cb39feb3f4ca5b899481fca81551c8eeb5496ccc8b8134b2bc80786efdb313"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p37",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "50509a5bad270153",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.FileSync/DiffCopy",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "902f8408a5d9d83c"
+            }
+          ],
+          "startTime": 1705071461455977,
+          "duration": 8592,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.FileSync"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "DiffCopy"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p38",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "902f8408a5d9d83c",
+          "flags": 1,
+          "operationName": "[internal] load .dockerignore",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461450444,
+          "duration": 29623,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:b9cb39feb3f4ca5b899481fca81551c8eeb5496ccc8b8134b2bc80786efdb313"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p39",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "6b3f3a9d50fbe98b",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461493477,
+          "duration": 1007,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "VerifyTokenAuthority"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p40",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "f857d0002991458c",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461497835,
+          "duration": 710,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "VerifyTokenAuthority"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p41",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "912de85fdfd20fe7",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.Auth/FetchToken",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461498707,
+          "duration": 343316,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "FetchToken"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p42",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "d9ed48c061e2ecfd",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.Auth/FetchToken",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461494633,
+          "duration": 349703,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.Auth"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "FetchToken"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p43",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "cf5aa3b6eba7323c",
+          "flags": 1,
+          "operationName": "remotes.docker.resolver.HTTPRequest",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461844389,
+          "duration": 357134,
+          "tags": [
+            {
+              "key": "http.method",
+              "type": "string",
+              "value": "HEAD"
+            },
+            {
+              "key": "http.flavor",
+              "type": "string",
+              "value": "1.1"
+            },
+            {
+              "key": "http.url",
+              "type": "string",
+              "value": "https://registry-1.docker.io/v2/tonistiigi/bats-assert/manifests/latest"
+            },
+            {
+              "key": "net.peer.name",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.user_agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.status_code",
+              "type": "int64",
+              "value": 200
+            },
+            {
+              "key": "http.response_content_length",
+              "type": "int64",
+              "value": 6942
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p44",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "5b70cf0df5f0a9bc",
+          "flags": 1,
+          "operationName": "HTTP HEAD",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "cf5aa3b6eba7323c"
+            }
+          ],
+          "startTime": 1705071461844404,
+          "duration": 357136,
+          "tags": [
+            {
+              "key": "http.method",
+              "type": "string",
+              "value": "HEAD"
+            },
+            {
+              "key": "http.flavor",
+              "type": "string",
+              "value": "1.1"
+            },
+            {
+              "key": "http.url",
+              "type": "string",
+              "value": "https://registry-1.docker.io/v2/tonistiigi/bats-assert/manifests/latest"
+            },
+            {
+              "key": "net.peer.name",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.user_agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.request.header.host",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.request.header.user-agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.request.header.accept",
+              "type": "string",
+              "value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+            },
+            {
+              "key": "http.request.header.authorization",
+              "type": "string",
+              "value": "****"
+            },
+            {
+              "key": "http.request.header.traceparent",
+              "type": "string",
+              "value": "00-0555ce1903feb85770d102846273073b-5b70cf0df5f0a9bc-01"
+            },
+            {
+              "key": "http.status_code",
+              "type": "int64",
+              "value": 200
+            },
+            {
+              "key": "http.response_content_length",
+              "type": "int64",
+              "value": 6942
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071461844420,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.getconn.start"
+                },
+                {
+                  "key": "net.host.name",
+                  "type": "string",
+                  "value": "http.docker.internal:3128"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461844454,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.dns.start"
+                },
+                {
+                  "key": "net.host.name",
+                  "type": "string",
+                  "value": "http.docker.internal"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461845276,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.dns.done"
+                },
+                {
+                  "key": "http.dns.addrs",
+                  "type": "string",
+                  "value": "192.168.65.1"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461845280,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.connect.192.168.65.1:3128.start"
+                },
+                {
+                  "key": "http.remote",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.conn.start.network",
+                  "type": "string",
+                  "value": "tcp"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461845736,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.connect.192.168.65.1:3128.done"
+                },
+                {
+                  "key": "http.conn.done.addr",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.conn.done.network",
+                  "type": "string",
+                  "value": "tcp"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461863139,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.tls.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462052708,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.tls.done"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462052739,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.getconn.done"
+                },
+                {
+                  "key": "http.remote",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.local",
+                  "type": "string",
+                  "value": "192.168.65.3:41262"
+                },
+                {
+                  "key": "http.conn.reused",
+                  "type": "bool",
+                  "value": false
+                },
+                {
+                  "key": "http.conn.wasidle",
+                  "type": "bool",
+                  "value": false
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462052770,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.send.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462052771,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.send.done"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462201435,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.receive.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462201485,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.receive.done"
+                }
+              ]
+            }
+          ],
+          "processID": "p45",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "323fa81aa6a2dea5",
+          "flags": 1,
+          "operationName": "remotes.docker.resolver.HTTPRequest",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "14078881261b5312"
+            }
+          ],
+          "startTime": 1705071461842101,
+          "duration": 366451,
+          "tags": [
+            {
+              "key": "http.method",
+              "type": "string",
+              "value": "HEAD"
+            },
+            {
+              "key": "http.flavor",
+              "type": "string",
+              "value": "1.1"
+            },
+            {
+              "key": "http.url",
+              "type": "string",
+              "value": "https://registry-1.docker.io/v2/library/alpine/manifests/latest"
+            },
+            {
+              "key": "net.peer.name",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.user_agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.status_code",
+              "type": "int64",
+              "value": 200
+            },
+            {
+              "key": "http.response_content_length",
+              "type": "int64",
+              "value": 1638
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p46",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "990f9e6b761681f6",
+          "flags": 1,
+          "operationName": "HTTP HEAD",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "323fa81aa6a2dea5"
+            }
+          ],
+          "startTime": 1705071461842125,
+          "duration": 366439,
+          "tags": [
+            {
+              "key": "http.method",
+              "type": "string",
+              "value": "HEAD"
+            },
+            {
+              "key": "http.flavor",
+              "type": "string",
+              "value": "1.1"
+            },
+            {
+              "key": "http.url",
+              "type": "string",
+              "value": "https://registry-1.docker.io/v2/library/alpine/manifests/latest"
+            },
+            {
+              "key": "net.peer.name",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.user_agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.request.header.host",
+              "type": "string",
+              "value": "registry-1.docker.io"
+            },
+            {
+              "key": "http.request.header.user-agent",
+              "type": "string",
+              "value": "buildkit/v0.12-dev"
+            },
+            {
+              "key": "http.request.header.accept",
+              "type": "string",
+              "value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+            },
+            {
+              "key": "http.request.header.authorization",
+              "type": "string",
+              "value": "****"
+            },
+            {
+              "key": "http.request.header.traceparent",
+              "type": "string",
+              "value": "00-0555ce1903feb85770d102846273073b-990f9e6b761681f6-01"
+            },
+            {
+              "key": "http.status_code",
+              "type": "int64",
+              "value": 200
+            },
+            {
+              "key": "http.response_content_length",
+              "type": "int64",
+              "value": 1638
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071461842152,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.getconn.start"
+                },
+                {
+                  "key": "net.host.name",
+                  "type": "string",
+                  "value": "http.docker.internal:3128"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461842195,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.dns.start"
+                },
+                {
+                  "key": "net.host.name",
+                  "type": "string",
+                  "value": "http.docker.internal"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461843327,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.dns.done"
+                },
+                {
+                  "key": "http.dns.addrs",
+                  "type": "string",
+                  "value": "192.168.65.1"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461843335,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.connect.192.168.65.1:3128.start"
+                },
+                {
+                  "key": "http.remote",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.conn.start.network",
+                  "type": "string",
+                  "value": "tcp"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461843911,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.connect.192.168.65.1:3128.done"
+                },
+                {
+                  "key": "http.conn.done.addr",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.conn.done.network",
+                  "type": "string",
+                  "value": "tcp"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461862155,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.tls.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462056362,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.tls.done"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462056400,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.getconn.done"
+                },
+                {
+                  "key": "http.remote",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.local",
+                  "type": "string",
+                  "value": "192.168.65.3:41248"
+                },
+                {
+                  "key": "http.conn.reused",
+                  "type": "bool",
+                  "value": false
+                },
+                {
+                  "key": "http.conn.wasidle",
+                  "type": "bool",
+                  "value": false
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462056421,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.send.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462056421,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.send.done"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462208495,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.receive.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462208525,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.receive.done"
+                }
+              ]
+            }
+          ],
+          "processID": "p47",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "14078881261b5312",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Solve",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "4548113210977542"
+            }
+          ],
+          "startTime": 1705071460401260,
+          "duration": 1883263,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Solve"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071461337936,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container created"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461338262,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container started"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462255546,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container exited"
+                },
+                {
+                  "key": "exit.code",
+                  "type": "int64",
+                  "value": 0
+                }
+              ]
+            }
+          ],
+          "processID": "p48",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "424053ee88cf9600",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.frontend.LLBBridge/Return",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "76cae146240f0505"
+            }
+          ],
+          "startTime": 1705071462286310,
+          "duration": 36,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.frontend.LLBBridge"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Return"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p49",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "121feef5c8cc97ad",
+          "flags": 1,
+          "operationName": "cache request: [build 1/3] FROM docker.io/library/alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462287014,
+          "duration": 226,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:68ce956d6bef78a30f5452b0c12c5f918cd9e67c18ebe8b864b0a483dc147258"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p50",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "d5db73e91af3df92",
+          "flags": 1,
+          "operationName": "cache request: [internal] load build context",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462287008,
+          "duration": 265,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:d6e40c1360ca7a3794673b2c27130b8a0cc88712faa287cb5f0060b14f025381"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p51",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "efc51d00622f3291",
+          "flags": 1,
+          "operationName": "cache request: [bats-assert 1/1] FROM docker.io/tonistiigi/bats-assert@sha256:813f357fb86180c44bb6aaf155ff06573a630b3b2e0115405b0cb65116319551",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462287053,
+          "duration": 229,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:f34255b862e83e8a0427c87cf1e440bede7c30190733b1691b1abe35010fc318"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p52",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "0f4681a3ad4d29f8",
+          "flags": 1,
+          "operationName": "moby.filesync.v1.FileSync/DiffCopy",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "4e95c6912df2c60b"
+            }
+          ],
+          "startTime": 1705071462293077,
+          "duration": 76429,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.filesync.v1.FileSync"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "DiffCopy"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": null,
+          "processID": "p53",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "4e95c6912df2c60b",
+          "flags": 1,
+          "operationName": "[internal] load build context",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462287748,
+          "duration": 97572,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:d6e40c1360ca7a3794673b2c27130b8a0cc88712faa287cb5f0060b14f025381"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p54",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "24dd74fb33034fd8",
+          "flags": 1,
+          "operationName": "load cache: [build 1/3] FROM docker.io/library/alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462390533,
+          "duration": 32,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:68ce956d6bef78a30f5452b0c12c5f918cd9e67c18ebe8b864b0a483dc147258"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p55",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "081ae7125ffd36a1",
+          "flags": 1,
+          "operationName": "[build 2/3] COPY xx-* /out/",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462390820,
+          "duration": 64660,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:196b331662aa0b768bce34341a2a913d12a61d790455623be2b10d713abbac56"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p56",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "3d01dbef1293e6cd",
+          "flags": 1,
+          "operationName": "HTTP HEAD",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462287286,
+          "duration": 215798,
+          "tags": [
+            {
+              "key": "http.method",
+              "type": "string",
+              "value": "HEAD"
+            },
+            {
+              "key": "http.flavor",
+              "type": "string",
+              "value": "1.1"
+            },
+            {
+              "key": "http.url",
+              "type": "string",
+              "value": "https://raw.githubusercontent.com/fsaintjacques/semver-tool/3.4.0/src/semver"
+            },
+            {
+              "key": "net.peer.name",
+              "type": "string",
+              "value": "raw.githubusercontent.com"
+            },
+            {
+              "key": "http.request.header.:authority",
+              "type": "string",
+              "value": "raw.githubusercontent.com"
+            },
+            {
+              "key": "http.request.header.:method",
+              "type": "string",
+              "value": "HEAD"
+            },
+            {
+              "key": "http.request.header.:path",
+              "type": "string",
+              "value": "/fsaintjacques/semver-tool/3.4.0/src/semver"
+            },
+            {
+              "key": "http.request.header.:scheme",
+              "type": "string",
+              "value": "https"
+            },
+            {
+              "key": "http.request.header.if-none-match",
+              "type": "string",
+              "value": "\"e8135dc02beea5325dd7607b2505971fba2f9d3bf7f0e07c47db570096ee9e4b\""
+            },
+            {
+              "key": "http.request.header.accept-encoding",
+              "type": "string",
+              "value": "gzip"
+            },
+            {
+              "key": "http.request.header.traceparent",
+              "type": "string",
+              "value": "00-0555ce1903feb85770d102846273073b-3d01dbef1293e6cd-01"
+            },
+            {
+              "key": "http.request.header.user-agent",
+              "type": "string",
+              "value": "Go-http-client/2.0"
+            },
+            {
+              "key": "http.status_code",
+              "type": "int64",
+              "value": 304
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071462287345,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.getconn.start"
+                },
+                {
+                  "key": "net.host.name",
+                  "type": "string",
+                  "value": "http.docker.internal:3128"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462287384,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.dns.start"
+                },
+                {
+                  "key": "net.host.name",
+                  "type": "string",
+                  "value": "http.docker.internal"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462288702,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.dns.done"
+                },
+                {
+                  "key": "http.dns.addrs",
+                  "type": "string",
+                  "value": "192.168.65.1"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462288707,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.connect.192.168.65.1:3128.start"
+                },
+                {
+                  "key": "http.remote",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.conn.start.network",
+                  "type": "string",
+                  "value": "tcp"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462289330,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.connect.192.168.65.1:3128.done"
+                },
+                {
+                  "key": "http.conn.done.addr",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.conn.done.network",
+                  "type": "string",
+                  "value": "tcp"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462308357,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.tls.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462330407,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.tls.done"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462330729,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.getconn.done"
+                },
+                {
+                  "key": "http.remote",
+                  "type": "string",
+                  "value": "192.168.65.1:3128"
+                },
+                {
+                  "key": "http.local",
+                  "type": "string",
+                  "value": "192.168.65.3:41272"
+                },
+                {
+                  "key": "http.conn.reused",
+                  "type": "bool",
+                  "value": false
+                },
+                {
+                  "key": "http.conn.wasidle",
+                  "type": "bool",
+                  "value": false
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462330859,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.send.start"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462330860,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.send.done"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462503006,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "http.receive.start"
+                }
+              ]
+            }
+          ],
+          "processID": "p57",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "89596877844426c7",
+          "flags": 1,
+          "operationName": "cache request: https://raw.githubusercontent.com/fsaintjacques/semver-tool/3.4.0/src/semver",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462287136,
+          "duration": 216006,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:2de254c1cdd10bfd735b05df0ddbcc58b6c96bbeddacb02662082e5863c7dfaa"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p58",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "12e17e16cfa335bf",
+          "flags": 1,
+          "operationName": "[build 3/3] RUN ln -s xx-cc /out/xx-clang \u0026\u0026     ln -s xx-cc /out/xx-clang++ \u0026\u0026     ln -s xx-cc /out/xx-c++ \u0026\u0026     ln -s xx-apt /out/xx-apt-get",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462460817,
+          "duration": 446886,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:a1eea8cd46dfa6d71e21811e74e153079f41846f01247e9ab5577997e361207d"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071462460835,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "ExecOp started"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462553257,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container created"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462553798,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container started"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462877373,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container exited"
+                },
+                {
+                  "key": "exit.code",
+                  "type": "int64",
+                  "value": 0
+                }
+              ]
+            }
+          ],
+          "processID": "p59",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "619e3ab187179836",
+          "flags": 1,
+          "operationName": "[xx 1/1] COPY --from=build /out/ /usr/bin/",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071462958859,
+          "duration": 53019,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:abe6c4b1cb4b514609ab3e38e605946b9dabe300bacf05d0356dcea2cf038b48"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p60",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "8ddee09af707d9fd",
+          "flags": 1,
+          "operationName": "load cache: [test-base 2/3] COPY --from=bats-assert . .",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071463018474,
+          "duration": 51,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:6ff2f44640655dbef55c51757a728a4a8661a76b9368399ed22ae8169fda81fe"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p61",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "62eddbfa59fdb4c3",
+          "flags": 1,
+          "operationName": "[test-base 3/3] COPY --from=xx / /",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071463018613,
+          "duration": 70700,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:3c9083d5f8616feedd45b59d401c86f2737b196284b79fd02e5d84db757460ba"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p62",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "07426553e9705e76",
+          "flags": 1,
+          "operationName": "[test-base-fixtures 1/1] COPY fixtures fixtures",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071463094591,
+          "duration": 88334,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:346d54a7e04fefc7af945eebe8a6cd97677ee60bf676dc8c88c243d28922f32b"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p63",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "1115d9d92e0eaeb8",
+          "flags": 1,
+          "operationName": "[test-go 1/2] COPY test-go.bats test_helper.bash ./",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071463187809,
+          "duration": 101427,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:fdfb54beb471875a9c87aee2905e97b00d1e5ce7b1e280006bb05ac0e6c5e9ca"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": null,
+          "processID": "p64",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "52ac568c2da201b8",
+          "operationName": "moby.buildkit.v1.Control/Status",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071460399695,
+          "duration": 88859320,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Status"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460399750,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460402465,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460402494,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 2
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460402504,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 3
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460408833,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 4
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460415916,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 5
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460419297,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 6
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460434004,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 7
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071460440396,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 8
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461298529,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 9
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461298633,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 10
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461298745,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 11
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461298765,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 12
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461299220,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 13
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461450692,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 14
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461450718,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 15
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461450875,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 16
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461456368,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 17
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461464429,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 18
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461464736,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 19
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461480645,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 20
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071461489618,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 21
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462247192,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 22
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462251291,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 23
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462287177,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 24
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462287291,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 25
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462287552,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 26
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462288074,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 27
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462293598,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 28
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462311730,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 29
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462369784,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 30
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462385847,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 31
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462390942,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 32
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462391212,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 33
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462456018,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 34
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462461238,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 35
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462503713,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 36
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462908249,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 37
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071462959403,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 38
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463012411,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 39
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463019002,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 40
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463019028,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 41
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463089852,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 42
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463094980,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 43
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463183415,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 44
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463188233,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 45
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463289770,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 46
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463297209,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 47
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463738226,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 48
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071471968009,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 49
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071472274741,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 50
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071472366039,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 51
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071472463419,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 52
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071472564803,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 53
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071472665774,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 54
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071472980197,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 55
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071473291337,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 56
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071473586146,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 57
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071473855079,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 58
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071473939470,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 59
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071475320568,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 60
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071476655115,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 61
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071476914980,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 62
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071477186993,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 63
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071477270160,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 64
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071477550595,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 65
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071477636707,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 66
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071477910932,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 67
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071477996460,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 68
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071478273025,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 69
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071478357401,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 70
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071481127991,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 71
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071481658083,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 72
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071482656773,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 73
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071482786421,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 74
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071485354449,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 75
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071487858012,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 76
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071490082719,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 77
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071492291732,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 78
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071494601733,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 79
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071497071072,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 80
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071499619476,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 81
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071501927248,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 82
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071504270083,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 83
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071506555502,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 84
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071508933118,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 85
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071511230064,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 86
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071513836012,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 87
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071515658402,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 88
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071516076400,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 89
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071521083074,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 90
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071525608812,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 91
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071531177282,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 92
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071536545055,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 93
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071541977370,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 94
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071545368902,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 95
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071545433713,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 96
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071549127357,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 97
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071549127620,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 98
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071549253104,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 99
+                }
+              ]
+            }
+          ],
+          "processID": "p65",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "4e11b66472979a18",
+          "operationName": "moby.buildkit.v1.Control/Solve",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071460399715,
+          "duration": 88943310,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Solve"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071460399725,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071549342194,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "RECEIVED"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p66",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "65de56cee7833b9c",
+          "operationName": "moby.buildkit.v1.Control/ListenBuildHistory",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "d30eea87f2f0241d"
+            }
+          ],
+          "startTime": 1705071549342399,
+          "duration": 878,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "ListenBuildHistory"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "semver:0.40.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "client"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071549342530,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "message"
+                },
+                {
+                  "key": "message.type",
+                  "type": "string",
+                  "value": "SENT"
+                },
+                {
+                  "key": "message.id",
+                  "type": "int64",
+                  "value": 1
+                }
+              ]
+            }
+          ],
+          "processID": "p67",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "d30eea87f2f0241d",
+          "operationName": "bake",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "00000000000000000000000000000000",
+              "spanID": "0000000000000000"
+            }
+          ],
+          "startTime": 1705071459717700,
+          "duration": 89626460,
+          "tags": [
+            {
+              "key": "command",
+              "type": "string",
+              "value": "/usr/local/lib/docker/cli-plugins/docker-buildx buildx bake --set *.args.TEST_BASE_TYPE=alpine test-go"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            },
+            {
+              "key": "otel.status_code",
+              "type": "string",
+              "value": "OK"
+            }
+          ],
+          "logs": null,
+          "processID": "p68",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "3bc0c8dfc74903a6",
+          "flags": 1,
+          "operationName": "[test-go 2/2] RUN --mount=type=cache,target=/pkg-cache,sharing=locked --mount=type=cache,target=/root/.cache ./test-go.bats",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "f1bb978313184255"
+            }
+          ],
+          "startTime": 1705071463296744,
+          "duration": 85956687,
+          "tags": [
+            {
+              "key": "vertex",
+              "type": "string",
+              "value": "sha256:0b3c6531279269810071b85e6f8ffba4ae61f6573d281bd79e63c6456daca816"
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/otel/sdk/tracer"
+            }
+          ],
+          "logs": [
+            {
+              "timestamp": 1705071463296759,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "ExecOp started"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463396638,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container created"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071463397050,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container started"
+                }
+              ]
+            },
+            {
+              "timestamp": 1705071549134015,
+              "fields": [
+                {
+                  "key": "event",
+                  "type": "string",
+                  "value": "Container exited"
+                },
+                {
+                  "key": "exit.code",
+                  "type": "int64",
+                  "value": 0
+                }
+              ]
+            }
+          ],
+          "processID": "p69",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "b6791e17d0cf1c4e",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.Control/Status",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "52ac568c2da201b8"
+            }
+          ],
+          "startTime": 1705071460400083,
+          "duration": 88858542,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Status"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p70",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "f1bb978313184255",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.Control/Solve",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "4e11b66472979a18"
+            }
+          ],
+          "startTime": 1705071460400118,
+          "duration": 88942457,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "Solve"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p71",
+          "warnings": null
+        },
+        {
+          "traceID": "0555ce1903feb85770d102846273073b",
+          "spanID": "9fbd0e803dc50ab6",
+          "flags": 1,
+          "operationName": "moby.buildkit.v1.Control/ListenBuildHistory",
+          "references": [
+            {
+              "refType": "CHILD_OF",
+              "traceID": "0555ce1903feb85770d102846273073b",
+              "spanID": "65de56cee7833b9c"
+            }
+          ],
+          "startTime": 1705071549342907,
+          "duration": 38,
+          "tags": [
+            {
+              "key": "rpc.system",
+              "type": "string",
+              "value": "grpc"
+            },
+            {
+              "key": "rpc.service",
+              "type": "string",
+              "value": "moby.buildkit.v1.Control"
+            },
+            {
+              "key": "rpc.method",
+              "type": "string",
+              "value": "ListenBuildHistory"
+            },
+            {
+              "key": "rpc.grpc.status_code",
+              "type": "int64",
+              "value": 0
+            },
+            {
+              "key": "otel.library.name",
+              "type": "string",
+              "value": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+            },
+            {
+              "key": "otel.library.version",
+              "type": "string",
+              "value": "0.45.0"
+            },
+            {
+              "key": "span.kind",
+              "type": "string",
+              "value": "server"
+            }
+          ],
+          "logs": null,
+          "processID": "p72",
+          "warnings": null
+        }
+      ],
+      "processes": {
+        "p0": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p1": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p10": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p11": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p12": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p13": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p14": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p15": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p16": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p17": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p18": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p19": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p2": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p20": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p21": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p22": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p23": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p24": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p25": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p26": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p27": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p28": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p29": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p3": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p30": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p31": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p32": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p33": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p34": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p35": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p36": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p37": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p38": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p39": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p4": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p40": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p41": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p42": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p43": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p44": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p45": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p46": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p47": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p48": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p49": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p5": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p50": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p51": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p52": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p53": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p54": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p55": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p56": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p57": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p58": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p59": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p6": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p60": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p61": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p62": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p63": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p64": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p65": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p66": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p67": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p68": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p69": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p7": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p70": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p71": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p72": {
+          "serviceName": "dockerd",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.19.0"
+            }
+          ]
+        },
+        "p8": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        },
+        "p9": {
+          "serviceName": "buildx",
+          "tags": [
+            {
+              "key": "telemetry.sdk.language",
+              "type": "string",
+              "value": "go"
+            },
+            {
+              "key": "telemetry.sdk.name",
+              "type": "string",
+              "value": "opentelemetry"
+            },
+            {
+              "key": "telemetry.sdk.version",
+              "type": "string",
+              "value": "1.14.0"
+            }
+          ]
+        }
+      },
+      "warnings": null
+    }
+  ]
+}
\ No newline at end of file
diff -pruN 0.19.3+ds1-4/util/otelutil/fixtures/otlp.json 0.21.3-0ubuntu1/util/otelutil/fixtures/otlp.json
--- 0.19.3+ds1-4/util/otelutil/fixtures/otlp.json	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/fixtures/otlp.json	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,11127 @@
+[
+  {
+    "Name": "moby.buildkit.v1.Control/ListWorkers",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f5f33ab2ca194d1b",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "c1b335eeeaf56405",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.147265775Z",
+    "EndTime": "2024-01-12T14:57:40.164529263Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "ListWorkers"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.147281524Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.164526618Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "1f64b377d3d03600",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "c1b335eeeaf56405",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.165362709Z",
+    "EndTime": "2024-01-12T14:57:40.166354292Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Ping"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.165372367Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.16635318Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Return",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "edb147a8e37c0b62",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "c1b335eeeaf56405",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.166393536Z",
+    "EndTime": "2024-01-12T14:57:40.16719399Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Return"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.166401921Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.167193199Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/Solve",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "be9c91e5fc1a8511",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "c1b335eeeaf56405",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.165347242Z",
+    "EndTime": "2024-01-12T14:57:40.167342107Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Solve"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.16536234Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.167341325Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/Status",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "07376377d689755f",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "c1b335eeeaf56405",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.165348694Z",
+    "EndTime": "2024-01-12T14:57:40.167352066Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Status"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.165472235Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/ListWorkers",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "fea7d265a9b437c2",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.350714924Z",
+    "EndTime": "2024-01-12T14:57:40.372933212Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "ListWorkers"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.350738238Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.372930227Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/ListWorkers",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "e1841a2a45f6de1a",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.382091457Z",
+    "EndTime": "2024-01-12T14:57:40.398366627Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "ListWorkers"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.382111174Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.398362529Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "a4ee96f4ccd426ec",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.399784596Z",
+    "EndTime": "2024-01-12T14:57:40.400602684Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Ping"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.39979712Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.400601091Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.FileSync/DiffCopy",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "a6070511de38c86a",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "70f459c5d3c2c488",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.409090629Z",
+    "EndTime": "2024-01-12T14:57:40.418671608Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.FileSync"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "DiffCopy"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.415069418Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.416971452Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 2
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.417874398Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 3
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.417984414Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 4
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.418118123Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 2
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.41862914Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 5
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.418637065Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f9e99c0194575ce4",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f3a80839f6b95729",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.445118451Z",
+    "EndTime": "2024-01-12T14:57:40.523367473Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "VerifyTokenAuthority"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.445122027Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.523361662Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/FetchToken",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "78a4e25bc2d4278d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f26320c4e0b697ac",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.525027244Z",
+    "EndTime": "2024-01-12T14:57:40.900911737Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "FetchToken"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.525030811Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.90090819Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.FileSync/DiffCopy",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "03f88200c8224808",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "50509a5bad270153",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:41.456522474Z",
+    "EndTime": "2024-01-12T14:57:41.464340217Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.FileSync"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "DiffCopy"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.463714236Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.464300323Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 2
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.464304661Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "6d33d51dcc276bdb",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "6b3f3a9d50fbe98b",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:41.494006854Z",
+    "EndTime": "2024-01-12T14:57:41.494231333Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "VerifyTokenAuthority"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.494012254Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.4942295Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "76b7ceb8af988ea9",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f857d0002991458c",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:41.498132161Z",
+    "EndTime": "2024-01-12T14:57:41.498314011Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "VerifyTokenAuthority"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.498134275Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.498312929Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/FetchToken",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "cc230bba43af76bf",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "912de85fdfd20fe7",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:41.499095661Z",
+    "EndTime": "2024-01-12T14:57:41.841578399Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "FetchToken"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.499097465Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.84157357Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/FetchToken",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "e57242e59af94e80",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d9ed48c061e2ecfd",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:41.494901557Z",
+    "EndTime": "2024-01-12T14:57:41.843975306Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "FetchToken"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.494904071Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.8439725Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Solve",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "4548113210977542",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.400661584Z",
+    "EndTime": "2024-01-12T14:57:42.2855668Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Solve"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.400666954Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.285564065Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Return",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "76cae146240f0505",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:42.28564689Z",
+    "EndTime": "2024-01-12T14:57:42.286589372Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Return"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.285663691Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.286587859Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.FileSync/DiffCopy",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "a0d9ee53b31b1760",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "0f4681a3ad4d29f8",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:42.293696748Z",
+    "EndTime": "2024-01-12T14:57:42.369270402Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.FileSync"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "DiffCopy"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.31089318Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 2
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.316940688Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 3
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.320055056Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 4
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.322700568Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 5
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.325137621Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 6
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.329895107Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 7
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.333228304Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 8
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.339031163Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 9
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.341442287Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 10
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.350695522Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 11
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.352515933Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 12
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.35439304Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 13
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.356241885Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 14
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.358001301Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 15
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.359717427Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 16
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.361436218Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.362075091Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 17
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.363162765Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 18
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.363331097Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 19
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.363577729Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 2
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.364196313Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 20
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.365070687Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 21
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.365153813Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 22
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.365298203Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 23
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.366953244Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 24
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.368829308Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 25
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.368865285Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 3
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.369254041Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 26
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.369257247Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "load buildkit capabilities",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "c1b335eeeaf56405",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:40.147185565Z",
+    "EndTime": "2024-01-12T14:57:40.167978408Z",
+    "Attributes": null,
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/ListWorkers",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "014a492f789cd8df",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f5f33ab2ca194d1b",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.150333695Z",
+    "EndTime": "2024-01-12T14:57:40.16394451Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "ListWorkers"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "348c867020d36163",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "1f64b377d3d03600",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.165969223Z",
+    "EndTime": "2024-01-12T14:57:40.166043331Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Ping"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Return",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "8b7d202043a4ce8e",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "edb147a8e37c0b62",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.166855529Z",
+    "EndTime": "2024-01-12T14:57:40.166903889Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Return"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/Solve",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "3514a2ca1fb59328",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "be9c91e5fc1a8511",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.165882913Z",
+    "EndTime": "2024-01-12T14:57:40.167098121Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Solve"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/Status",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "5cd96a37a4409495",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "07376377d689755f",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.16580682Z",
+    "EndTime": "2024-01-12T14:57:40.167114423Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Status"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/ListWorkers",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "765af2593e65e310",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "fea7d265a9b437c2",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.3551397Z",
+    "EndTime": "2024-01-12T14:57:40.372322277Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "ListWorkers"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/ListWorkers",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "8ebd38cfd2dae311",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "e1841a2a45f6de1a",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.382862076Z",
+    "EndTime": "2024-01-12T14:57:40.397843209Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "ListWorkers"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Ping",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "92290c7354b75ead",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "a4ee96f4ccd426ec",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.400135462Z",
+    "EndTime": "2024-01-12T14:57:40.400280345Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Ping"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "cache request: [internal] load build definition from Dockerfile",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "20676e8261684f34",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:40.401994836Z",
+    "EndTime": "2024-01-12T14:57:40.402041603Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:7bef658bc90b7db608c3df27d11b99f5785a6a6d1b5fe94ff63c69ae5093851c"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.FileSync/DiffCopy",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "70f459c5d3c2c488",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "6f6b0f30f558c4ae",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.408431648Z",
+    "EndTime": "2024-01-12T14:57:40.419083176Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.FileSync"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "DiffCopy"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[internal] load build definition from Dockerfile",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "6f6b0f30f558c4ae",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:40.402159874Z",
+    "EndTime": "2024-01-12T14:57:40.433614573Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:7bef658bc90b7db608c3df27d11b99f5785a6a6d1b5fe94ff63c69ae5093851c"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 1,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f3a80839f6b95729",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.444500758Z",
+    "EndTime": "2024-01-12T14:57:40.524117405Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "VerifyTokenAuthority"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/FetchToken",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f26320c4e0b697ac",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.524541768Z",
+    "EndTime": "2024-01-12T14:57:40.901439933Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "FetchToken"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "remotes.docker.resolver.HTTPRequest",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "0e8cc109bf2f9cfa",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.901513761Z",
+    "EndTime": "2024-01-12T14:57:41.271655436Z",
+    "Attributes": [
+      {
+        "Key": "http.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "HEAD"
+        }
+      },
+      {
+        "Key": "http.flavor",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.1"
+        }
+      },
+      {
+        "Key": "http.url",
+        "Value": {
+          "Type": "STRING",
+          "Value": "https://registry-1.docker.io/v2/docker/dockerfile/manifests/1.5"
+        }
+      },
+      {
+        "Key": "net.peer.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.user_agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 200
+        }
+      },
+      {
+        "Key": "http.response_content_length",
+        "Value": {
+          "Type": "INT64",
+          "Value": 8404
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 1,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "HTTP HEAD",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1f5f1e324025582",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "0e8cc109bf2f9cfa",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.901537045Z",
+    "EndTime": "2024-01-12T14:57:41.271672779Z",
+    "Attributes": [
+      {
+        "Key": "http.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "HEAD"
+        }
+      },
+      {
+        "Key": "http.flavor",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.1"
+        }
+      },
+      {
+        "Key": "http.url",
+        "Value": {
+          "Type": "STRING",
+          "Value": "https://registry-1.docker.io/v2/docker/dockerfile/manifests/1.5"
+        }
+      },
+      {
+        "Key": "net.peer.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.user_agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.request.header.host",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.request.header.user-agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.request.header.accept",
+        "Value": {
+          "Type": "STRING",
+          "Value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+        }
+      },
+      {
+        "Key": "http.request.header.authorization",
+        "Value": {
+          "Type": "STRING",
+          "Value": "****"
+        }
+      },
+      {
+        "Key": "http.request.header.traceparent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "00-0555ce1903feb85770d102846273073b-f1f5f1e324025582-01"
+        }
+      },
+      {
+        "Key": "http.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 200
+        }
+      },
+      {
+        "Key": "http.response_content_length",
+        "Value": {
+          "Type": "INT64",
+          "Value": 8404
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "http.getconn.start",
+        "Attributes": [
+          {
+            "Key": "net.host.name",
+            "Value": {
+              "Type": "STRING",
+              "Value": "http.docker.internal:3128"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.901613958Z"
+      },
+      {
+        "Name": "http.dns.start",
+        "Attributes": [
+          {
+            "Key": "net.host.name",
+            "Value": {
+              "Type": "STRING",
+              "Value": "http.docker.internal"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.901691854Z"
+      },
+      {
+        "Name": "http.dns.done",
+        "Attributes": [
+          {
+            "Key": "http.dns.addrs",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.903005838Z"
+      },
+      {
+        "Name": "http.connect.192.168.65.1:3128.start",
+        "Attributes": [
+          {
+            "Key": "http.remote",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.conn.start.network",
+            "Value": {
+              "Type": "STRING",
+              "Value": "tcp"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.903010567Z"
+      },
+      {
+        "Name": "http.connect.192.168.65.1:3128.done",
+        "Attributes": [
+          {
+            "Key": "http.conn.done.addr",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.conn.done.network",
+            "Value": {
+              "Type": "STRING",
+              "Value": "tcp"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.903518707Z"
+      },
+      {
+        "Name": "http.tls.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.935298552Z"
+      },
+      {
+        "Name": "http.tls.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.125870978Z"
+      },
+      {
+        "Name": "http.getconn.done",
+        "Attributes": [
+          {
+            "Key": "http.remote",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.local",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.3:41236"
+            }
+          },
+          {
+            "Key": "http.conn.reused",
+            "Value": {
+              "Type": "BOOL",
+              "Value": false
+            }
+          },
+          {
+            "Key": "http.conn.wasidle",
+            "Value": {
+              "Type": "BOOL",
+              "Value": false
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.12590922Z"
+      },
+      {
+        "Name": "http.send.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.125942271Z"
+      },
+      {
+        "Name": "http.send.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.125943253Z"
+      },
+      {
+        "Name": "http.receive.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.271591096Z"
+      },
+      {
+        "Name": "http.receive.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.271624759Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "cache request: docker-image://docker.io/docker/dockerfile:1.5@sha256:39b85bbfa7536a5feceb7372a0817649ecb2724562a38360f4d6a7782a409b14",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "6e0d97923598173a",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:41.298320807Z",
+    "EndTime": "2024-01-12T14:57:41.298458013Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:79dd01740e708982847bb0010c8505e266b4f72ed0ffa354f38e205a15ec3b00"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "load cache: docker-image://docker.io/docker/dockerfile:1.5@sha256:39b85bbfa7536a5feceb7372a0817649ecb2724562a38360f4d6a7782a409b14",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "8b3cf8274861214e",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:41.298789349Z",
+    "EndTime": "2024-01-12T14:57:41.298824265Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:79dd01740e708982847bb0010c8505e266b4f72ed0ffa354f38e205a15ec3b00"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "cache request: [internal] load .dockerignore",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "8303f71942c86c2d",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:41.450353579Z",
+    "EndTime": "2024-01-12T14:57:41.450380529Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:b9cb39feb3f4ca5b899481fca81551c8eeb5496ccc8b8134b2bc80786efdb313"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.FileSync/DiffCopy",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "50509a5bad270153",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "902f8408a5d9d83c",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.455977674Z",
+    "EndTime": "2024-01-12T14:57:41.464570167Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.FileSync"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "DiffCopy"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[internal] load .dockerignore",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "902f8408a5d9d83c",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:41.450444639Z",
+    "EndTime": "2024-01-12T14:57:41.480068437Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:b9cb39feb3f4ca5b899481fca81551c8eeb5496ccc8b8134b2bc80786efdb313"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 1,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "6b3f3a9d50fbe98b",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.493477345Z",
+    "EndTime": "2024-01-12T14:57:41.494484637Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "VerifyTokenAuthority"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/VerifyTokenAuthority",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f857d0002991458c",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.497835726Z",
+    "EndTime": "2024-01-12T14:57:41.498546724Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "VerifyTokenAuthority"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/FetchToken",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "912de85fdfd20fe7",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.498707645Z",
+    "EndTime": "2024-01-12T14:57:41.842024623Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "FetchToken"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.Auth/FetchToken",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d9ed48c061e2ecfd",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.494633044Z",
+    "EndTime": "2024-01-12T14:57:41.844336811Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.Auth"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "FetchToken"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "remotes.docker.resolver.HTTPRequest",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "cf5aa3b6eba7323c",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.844389359Z",
+    "EndTime": "2024-01-12T14:57:42.201524037Z",
+    "Attributes": [
+      {
+        "Key": "http.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "HEAD"
+        }
+      },
+      {
+        "Key": "http.flavor",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.1"
+        }
+      },
+      {
+        "Key": "http.url",
+        "Value": {
+          "Type": "STRING",
+          "Value": "https://registry-1.docker.io/v2/tonistiigi/bats-assert/manifests/latest"
+        }
+      },
+      {
+        "Key": "net.peer.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.user_agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 200
+        }
+      },
+      {
+        "Key": "http.response_content_length",
+        "Value": {
+          "Type": "INT64",
+          "Value": 6942
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 1,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "HTTP HEAD",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "5b70cf0df5f0a9bc",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "cf5aa3b6eba7323c",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.844404587Z",
+    "EndTime": "2024-01-12T14:57:42.20154152Z",
+    "Attributes": [
+      {
+        "Key": "http.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "HEAD"
+        }
+      },
+      {
+        "Key": "http.flavor",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.1"
+        }
+      },
+      {
+        "Key": "http.url",
+        "Value": {
+          "Type": "STRING",
+          "Value": "https://registry-1.docker.io/v2/tonistiigi/bats-assert/manifests/latest"
+        }
+      },
+      {
+        "Key": "net.peer.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.user_agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.request.header.host",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.request.header.user-agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.request.header.accept",
+        "Value": {
+          "Type": "STRING",
+          "Value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+        }
+      },
+      {
+        "Key": "http.request.header.authorization",
+        "Value": {
+          "Type": "STRING",
+          "Value": "****"
+        }
+      },
+      {
+        "Key": "http.request.header.traceparent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "00-0555ce1903feb85770d102846273073b-5b70cf0df5f0a9bc-01"
+        }
+      },
+      {
+        "Key": "http.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 200
+        }
+      },
+      {
+        "Key": "http.response_content_length",
+        "Value": {
+          "Type": "INT64",
+          "Value": 6942
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "http.getconn.start",
+        "Attributes": [
+          {
+            "Key": "net.host.name",
+            "Value": {
+              "Type": "STRING",
+              "Value": "http.docker.internal:3128"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.844420377Z"
+      },
+      {
+        "Name": "http.dns.start",
+        "Attributes": [
+          {
+            "Key": "net.host.name",
+            "Value": {
+              "Type": "STRING",
+              "Value": "http.docker.internal"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.844454851Z"
+      },
+      {
+        "Name": "http.dns.done",
+        "Attributes": [
+          {
+            "Key": "http.dns.addrs",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.845276957Z"
+      },
+      {
+        "Name": "http.connect.192.168.65.1:3128.start",
+        "Attributes": [
+          {
+            "Key": "http.remote",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.conn.start.network",
+            "Value": {
+              "Type": "STRING",
+              "Value": "tcp"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.845280875Z"
+      },
+      {
+        "Name": "http.connect.192.168.65.1:3128.done",
+        "Attributes": [
+          {
+            "Key": "http.conn.done.addr",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.conn.done.network",
+            "Value": {
+              "Type": "STRING",
+              "Value": "tcp"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.845736215Z"
+      },
+      {
+        "Name": "http.tls.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.863139846Z"
+      },
+      {
+        "Name": "http.tls.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.052708645Z"
+      },
+      {
+        "Name": "http.getconn.done",
+        "Attributes": [
+          {
+            "Key": "http.remote",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.local",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.3:41262"
+            }
+          },
+          {
+            "Key": "http.conn.reused",
+            "Value": {
+              "Type": "BOOL",
+              "Value": false
+            }
+          },
+          {
+            "Key": "http.conn.wasidle",
+            "Value": {
+              "Type": "BOOL",
+              "Value": false
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.052739352Z"
+      },
+      {
+        "Name": "http.send.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.052770821Z"
+      },
+      {
+        "Name": "http.send.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.052771582Z"
+      },
+      {
+        "Name": "http.receive.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.201435171Z"
+      },
+      {
+        "Name": "http.receive.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.201485435Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "remotes.docker.resolver.HTTPRequest",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "323fa81aa6a2dea5",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.842101356Z",
+    "EndTime": "2024-01-12T14:57:42.208552526Z",
+    "Attributes": [
+      {
+        "Key": "http.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "HEAD"
+        }
+      },
+      {
+        "Key": "http.flavor",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.1"
+        }
+      },
+      {
+        "Key": "http.url",
+        "Value": {
+          "Type": "STRING",
+          "Value": "https://registry-1.docker.io/v2/library/alpine/manifests/latest"
+        }
+      },
+      {
+        "Key": "net.peer.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.user_agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 200
+        }
+      },
+      {
+        "Key": "http.response_content_length",
+        "Value": {
+          "Type": "INT64",
+          "Value": 1638
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 1,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "HTTP HEAD",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "990f9e6b761681f6",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "323fa81aa6a2dea5",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:41.842125231Z",
+    "EndTime": "2024-01-12T14:57:42.208564549Z",
+    "Attributes": [
+      {
+        "Key": "http.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "HEAD"
+        }
+      },
+      {
+        "Key": "http.flavor",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.1"
+        }
+      },
+      {
+        "Key": "http.url",
+        "Value": {
+          "Type": "STRING",
+          "Value": "https://registry-1.docker.io/v2/library/alpine/manifests/latest"
+        }
+      },
+      {
+        "Key": "net.peer.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.user_agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.request.header.host",
+        "Value": {
+          "Type": "STRING",
+          "Value": "registry-1.docker.io"
+        }
+      },
+      {
+        "Key": "http.request.header.user-agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildkit/v0.12-dev"
+        }
+      },
+      {
+        "Key": "http.request.header.accept",
+        "Value": {
+          "Type": "STRING",
+          "Value": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*"
+        }
+      },
+      {
+        "Key": "http.request.header.authorization",
+        "Value": {
+          "Type": "STRING",
+          "Value": "****"
+        }
+      },
+      {
+        "Key": "http.request.header.traceparent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "00-0555ce1903feb85770d102846273073b-990f9e6b761681f6-01"
+        }
+      },
+      {
+        "Key": "http.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 200
+        }
+      },
+      {
+        "Key": "http.response_content_length",
+        "Value": {
+          "Type": "INT64",
+          "Value": 1638
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "http.getconn.start",
+        "Attributes": [
+          {
+            "Key": "net.host.name",
+            "Value": {
+              "Type": "STRING",
+              "Value": "http.docker.internal:3128"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.842152702Z"
+      },
+      {
+        "Name": "http.dns.start",
+        "Attributes": [
+          {
+            "Key": "net.host.name",
+            "Value": {
+              "Type": "STRING",
+              "Value": "http.docker.internal"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.842195061Z"
+      },
+      {
+        "Name": "http.dns.done",
+        "Attributes": [
+          {
+            "Key": "http.dns.addrs",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.843327715Z"
+      },
+      {
+        "Name": "http.connect.192.168.65.1:3128.start",
+        "Attributes": [
+          {
+            "Key": "http.remote",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.conn.start.network",
+            "Value": {
+              "Type": "STRING",
+              "Value": "tcp"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.843335069Z"
+      },
+      {
+        "Name": "http.connect.192.168.65.1:3128.done",
+        "Attributes": [
+          {
+            "Key": "http.conn.done.addr",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.conn.done.network",
+            "Value": {
+              "Type": "STRING",
+              "Value": "tcp"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.843911496Z"
+      },
+      {
+        "Name": "http.tls.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.862155336Z"
+      },
+      {
+        "Name": "http.tls.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.05636278Z"
+      },
+      {
+        "Name": "http.getconn.done",
+        "Attributes": [
+          {
+            "Key": "http.remote",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.local",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.3:41248"
+            }
+          },
+          {
+            "Key": "http.conn.reused",
+            "Value": {
+              "Type": "BOOL",
+              "Value": false
+            }
+          },
+          {
+            "Key": "http.conn.wasidle",
+            "Value": {
+              "Type": "BOOL",
+              "Value": false
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.056400801Z"
+      },
+      {
+        "Name": "http.send.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.056421069Z"
+      },
+      {
+        "Name": "http.send.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.056421771Z"
+      },
+      {
+        "Name": "http.receive.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.2084957Z"
+      },
+      {
+        "Name": "http.receive.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.208525526Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Solve",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "14078881261b5312",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "4548113210977542",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.401260424Z",
+    "EndTime": "2024-01-12T14:57:42.284524255Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Solve"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "Container created",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.337936172Z"
+      },
+      {
+        "Name": "Container started",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.338262401Z"
+      },
+      {
+        "Name": "Container exited",
+        "Attributes": [
+          {
+            "Key": "exit.code",
+            "Value": {
+              "Type": "INT64",
+              "Value": 0
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.255546744Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 9,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.frontend.LLBBridge/Return",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "424053ee88cf9600",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "76cae146240f0505",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:42.286310429Z",
+    "EndTime": "2024-01-12T14:57:42.286347388Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.frontend.LLBBridge"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Return"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "cache request: [build 1/3] FROM docker.io/library/alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "121feef5c8cc97ad",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.287014966Z",
+    "EndTime": "2024-01-12T14:57:42.28724194Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:68ce956d6bef78a30f5452b0c12c5f918cd9e67c18ebe8b864b0a483dc147258"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "cache request: [internal] load build context",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d5db73e91af3df92",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.287008985Z",
+    "EndTime": "2024-01-12T14:57:42.287274511Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:d6e40c1360ca7a3794673b2c27130b8a0cc88712faa287cb5f0060b14f025381"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "cache request: [bats-assert 1/1] FROM docker.io/tonistiigi/bats-assert@sha256:813f357fb86180c44bb6aaf155ff06573a630b3b2e0115405b0cb65116319551",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "efc51d00622f3291",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.287053417Z",
+    "EndTime": "2024-01-12T14:57:42.287283046Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:f34255b862e83e8a0427c87cf1e440bede7c30190733b1691b1abe35010fc318"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.filesync.v1.FileSync/DiffCopy",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "0f4681a3ad4d29f8",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "4e95c6912df2c60b",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:42.293077322Z",
+    "EndTime": "2024-01-12T14:57:42.369507285Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.filesync.v1.FileSync"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "DiffCopy"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[internal] load build context",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "4e95c6912df2c60b",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.287748305Z",
+    "EndTime": "2024-01-12T14:57:42.385320735Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:d6e40c1360ca7a3794673b2c27130b8a0cc88712faa287cb5f0060b14f025381"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 1,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "load cache: [build 1/3] FROM docker.io/library/alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "24dd74fb33034fd8",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.390533062Z",
+    "EndTime": "2024-01-12T14:57:42.390565823Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:68ce956d6bef78a30f5452b0c12c5f918cd9e67c18ebe8b864b0a483dc147258"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[build 2/3] COPY xx-* /out/",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "081ae7125ffd36a1",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.390820868Z",
+    "EndTime": "2024-01-12T14:57:42.455481346Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:196b331662aa0b768bce34341a2a913d12a61d790455623be2b10d713abbac56"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "HTTP HEAD",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "3d01dbef1293e6cd",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:42.287286814Z",
+    "EndTime": "2024-01-12T14:57:42.503085452Z",
+    "Attributes": [
+      {
+        "Key": "http.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "HEAD"
+        }
+      },
+      {
+        "Key": "http.flavor",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.1"
+        }
+      },
+      {
+        "Key": "http.url",
+        "Value": {
+          "Type": "STRING",
+          "Value": "https://raw.githubusercontent.com/fsaintjacques/semver-tool/3.4.0/src/semver"
+        }
+      },
+      {
+        "Key": "net.peer.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "raw.githubusercontent.com"
+        }
+      },
+      {
+        "Key": "http.request.header.:authority",
+        "Value": {
+          "Type": "STRING",
+          "Value": "raw.githubusercontent.com"
+        }
+      },
+      {
+        "Key": "http.request.header.:method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "HEAD"
+        }
+      },
+      {
+        "Key": "http.request.header.:path",
+        "Value": {
+          "Type": "STRING",
+          "Value": "/fsaintjacques/semver-tool/3.4.0/src/semver"
+        }
+      },
+      {
+        "Key": "http.request.header.:scheme",
+        "Value": {
+          "Type": "STRING",
+          "Value": "https"
+        }
+      },
+      {
+        "Key": "http.request.header.if-none-match",
+        "Value": {
+          "Type": "STRING",
+          "Value": "\"e8135dc02beea5325dd7607b2505971fba2f9d3bf7f0e07c47db570096ee9e4b\""
+        }
+      },
+      {
+        "Key": "http.request.header.accept-encoding",
+        "Value": {
+          "Type": "STRING",
+          "Value": "gzip"
+        }
+      },
+      {
+        "Key": "http.request.header.traceparent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "00-0555ce1903feb85770d102846273073b-3d01dbef1293e6cd-01"
+        }
+      },
+      {
+        "Key": "http.request.header.user-agent",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Go-http-client/2.0"
+        }
+      },
+      {
+        "Key": "http.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 304
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "http.getconn.start",
+        "Attributes": [
+          {
+            "Key": "net.host.name",
+            "Value": {
+              "Type": "STRING",
+              "Value": "http.docker.internal:3128"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.287345854Z"
+      },
+      {
+        "Name": "http.dns.start",
+        "Attributes": [
+          {
+            "Key": "net.host.name",
+            "Value": {
+              "Type": "STRING",
+              "Value": "http.docker.internal"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.287384847Z"
+      },
+      {
+        "Name": "http.dns.done",
+        "Attributes": [
+          {
+            "Key": "http.dns.addrs",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.288702709Z"
+      },
+      {
+        "Name": "http.connect.192.168.65.1:3128.start",
+        "Attributes": [
+          {
+            "Key": "http.remote",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.conn.start.network",
+            "Value": {
+              "Type": "STRING",
+              "Value": "tcp"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.288707688Z"
+      },
+      {
+        "Name": "http.connect.192.168.65.1:3128.done",
+        "Attributes": [
+          {
+            "Key": "http.conn.done.addr",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.conn.done.network",
+            "Value": {
+              "Type": "STRING",
+              "Value": "tcp"
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.289330781Z"
+      },
+      {
+        "Name": "http.tls.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.308357023Z"
+      },
+      {
+        "Name": "http.tls.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.330407545Z"
+      },
+      {
+        "Name": "http.getconn.done",
+        "Attributes": [
+          {
+            "Key": "http.remote",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.1:3128"
+            }
+          },
+          {
+            "Key": "http.local",
+            "Value": {
+              "Type": "STRING",
+              "Value": "192.168.65.3:41272"
+            }
+          },
+          {
+            "Key": "http.conn.reused",
+            "Value": {
+              "Type": "BOOL",
+              "Value": false
+            }
+          },
+          {
+            "Key": "http.conn.wasidle",
+            "Value": {
+              "Type": "BOOL",
+              "Value": false
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.330729617Z"
+      },
+      {
+        "Name": "http.send.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.330859459Z"
+      },
+      {
+        "Name": "http.send.done",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.330860942Z"
+      },
+      {
+        "Name": "http.receive.start",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.503006526Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "cache request: https://raw.githubusercontent.com/fsaintjacques/semver-tool/3.4.0/src/semver",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "89596877844426c7",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.287136233Z",
+    "EndTime": "2024-01-12T14:57:42.50314312Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:2de254c1cdd10bfd735b05df0ddbcc58b6c96bbeddacb02662082e5863c7dfaa"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[build 3/3] RUN ln -s xx-cc /out/xx-clang \u0026\u0026     ln -s xx-cc /out/xx-clang++ \u0026\u0026     ln -s xx-cc /out/xx-c++ \u0026\u0026     ln -s xx-apt /out/xx-apt-get",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "12e17e16cfa335bf",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.460817845Z",
+    "EndTime": "2024-01-12T14:57:42.907704321Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:a1eea8cd46dfa6d71e21811e74e153079f41846f01247e9ab5577997e361207d"
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "ExecOp started",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.460835478Z"
+      },
+      {
+        "Name": "Container created",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.553257485Z"
+      },
+      {
+        "Name": "Container started",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.553798935Z"
+      },
+      {
+        "Name": "Container exited",
+        "Attributes": [
+          {
+            "Key": "exit.code",
+            "Value": {
+              "Type": "INT64",
+              "Value": 0
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.877373704Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[xx 1/1] COPY --from=build /out/ /usr/bin/",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "619e3ab187179836",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:42.95885997Z",
+    "EndTime": "2024-01-12T14:57:43.011879613Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:abe6c4b1cb4b514609ab3e38e605946b9dabe300bacf05d0356dcea2cf038b48"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "load cache: [test-base 2/3] COPY --from=bats-assert . .",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "8ddee09af707d9fd",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:43.018474041Z",
+    "EndTime": "2024-01-12T14:57:43.018525547Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:6ff2f44640655dbef55c51757a728a4a8661a76b9368399ed22ae8169fda81fe"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[test-base 3/3] COPY --from=xx / /",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "62eddbfa59fdb4c3",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:43.018613201Z",
+    "EndTime": "2024-01-12T14:57:43.089313591Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:3c9083d5f8616feedd45b59d401c86f2737b196284b79fd02e5d84db757460ba"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[test-base-fixtures 1/1] COPY fixtures fixtures",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "07426553e9705e76",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:43.094591201Z",
+    "EndTime": "2024-01-12T14:57:43.182925672Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:346d54a7e04fefc7af945eebe8a6cd97677ee60bf676dc8c88c243d28922f32b"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[test-go 1/2] COPY test-go.bats test_helper.bash ./",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "1115d9d92e0eaeb8",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:43.187809656Z",
+    "EndTime": "2024-01-12T14:57:43.289237315Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:fdfb54beb471875a9c87aee2905e97b00d1e5ce7b1e280006bb05ac0e6c5e9ca"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/Status",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "52ac568c2da201b8",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.39969566Z",
+    "EndTime": "2024-01-12T14:59:09.25901601Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Status"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.399750984Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.402465886Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 2
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.40249445Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 3
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.402504999Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 4
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.40883372Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 5
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.415916471Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 6
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.419297628Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 7
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.434004641Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 8
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.440396651Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 9
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.29852984Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 10
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.298633422Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 11
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.29874541Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 12
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.298765538Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 13
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.299220671Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 14
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.450692733Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 15
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.450718571Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 16
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.450875766Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 17
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.456368595Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 18
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.464429855Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 19
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.464736406Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 20
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.480645857Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 21
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:41.489618728Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 22
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.247192558Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 23
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.251291625Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 24
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.287177131Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 25
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.287291432Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 26
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.287552016Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 27
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.288074771Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 28
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.293598414Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 29
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.311730795Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 30
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.369784713Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 31
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.385847739Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 32
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.390942548Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 33
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.391212991Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 34
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.456018882Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 35
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.461238922Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 36
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.503713777Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 37
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.908249129Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 38
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:42.959403226Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 39
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.012411075Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 40
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.019002879Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 41
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.019028387Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 42
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.08985268Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 43
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.094980027Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 44
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.183415678Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 45
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.188233607Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 46
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.28977037Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 47
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.297209719Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 48
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.738226971Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 49
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:51.968009255Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 50
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:52.274741317Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 51
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:52.366039619Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 52
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:52.46341939Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 53
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:52.564803533Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 54
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:52.665774796Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 55
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:52.980197331Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 56
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:53.291337312Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 57
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:53.586146639Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 58
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:53.855079758Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 59
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:53.939470899Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 60
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:55.320568151Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 61
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:56.655115777Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 62
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:56.914980485Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 63
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:57.186993379Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 64
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:57.270160346Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 65
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:57.550595907Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 66
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:57.636707033Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 67
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:57.910932291Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 68
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:57.996460488Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 69
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:58.273025208Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 70
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:58.357401192Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 71
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:01.127991199Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 72
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:01.658083457Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 73
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:02.656773361Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 74
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:02.786421303Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 75
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:05.354449314Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 76
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:07.858012601Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 77
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:10.082719475Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 78
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:12.291732174Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 79
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:14.601733662Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 80
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:17.071072788Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 81
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:19.619476413Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 82
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:21.927248622Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 83
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:24.270083074Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 84
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:26.55550237Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 85
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:28.933118776Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 86
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:31.230064451Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 87
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:33.83601258Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 88
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:35.658402552Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 89
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:36.076400316Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 90
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:41.083074542Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 91
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:45.608812981Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 92
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:51.177282893Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 93
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:58:56.545055422Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 94
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:01.977370353Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 95
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:05.368902631Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 96
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:05.433713443Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 97
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:09.127357014Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 98
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:09.127620501Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 99
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:09.253104626Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/Solve",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "4e11b66472979a18",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:57:40.399715629Z",
+    "EndTime": "2024-01-12T14:59:09.343026371Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Solve"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:40.399725737Z"
+      },
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "RECEIVED"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:09.342194877Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/ListenBuildHistory",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "65de56cee7833b9c",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 3,
+    "StartTime": "2024-01-12T14:59:09.342399118Z",
+    "EndTime": "2024-01-12T14:59:09.343277766Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "ListenBuildHistory"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "message",
+        "Attributes": [
+          {
+            "Key": "message.type",
+            "Value": {
+              "Type": "STRING",
+              "Value": "SENT"
+            }
+          },
+          {
+            "Key": "message.id",
+            "Value": {
+              "Type": "INT64",
+              "Value": 1
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:09.342530045Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "semver:0.40.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "bake",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "d30eea87f2f0241d",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "00000000000000000000000000000000",
+      "SpanID": "0000000000000000",
+      "TraceFlags": "00",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:39.717700035Z",
+    "EndTime": "2024-01-12T14:59:09.3441603Z",
+    "Attributes": [
+      {
+        "Key": "command",
+        "Value": {
+          "Type": "STRING",
+          "Value": "/usr/local/lib/docker/cli-plugins/docker-buildx buildx bake --set *.args.TEST_BASE_TYPE=alpine test-go"
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Ok",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "buildx"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.14.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "[test-go 2/2] RUN --mount=type=cache,target=/pkg-cache,sharing=locked --mount=type=cache,target=/root/.cache ./test-go.bats",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "3bc0c8dfc74903a6",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "SpanKind": 1,
+    "StartTime": "2024-01-12T14:57:43.296744167Z",
+    "EndTime": "2024-01-12T14:59:09.253431509Z",
+    "Attributes": [
+      {
+        "Key": "vertex",
+        "Value": {
+          "Type": "STRING",
+          "Value": "sha256:0b3c6531279269810071b85e6f8ffba4ae61f6573d281bd79e63c6456daca816"
+        }
+      }
+    ],
+    "Events": [
+      {
+        "Name": "ExecOp started",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.296759245Z"
+      },
+      {
+        "Name": "Container created",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.396638353Z"
+      },
+      {
+        "Name": "Container started",
+        "Attributes": null,
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:57:43.397050883Z"
+      },
+      {
+        "Name": "Container exited",
+        "Attributes": [
+          {
+            "Key": "exit.code",
+            "Value": {
+              "Type": "INT64",
+              "Value": 0
+            }
+          }
+        ],
+        "DroppedAttributeCount": 0,
+        "Time": "2024-01-12T14:59:09.134015538Z"
+      }
+    ],
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/otel/sdk/tracer",
+      "Version": "",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/Status",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "b6791e17d0cf1c4e",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "52ac568c2da201b8",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.400083576Z",
+    "EndTime": "2024-01-12T14:59:09.258626543Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Status"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/Solve",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "f1bb978313184255",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "4e11b66472979a18",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:57:40.40011856Z",
+    "EndTime": "2024-01-12T14:59:09.342576396Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "Solve"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  },
+  {
+    "Name": "moby.buildkit.v1.Control/ListenBuildHistory",
+    "SpanContext": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "9fbd0e803dc50ab6",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": false
+    },
+    "Parent": {
+      "TraceID": "0555ce1903feb85770d102846273073b",
+      "SpanID": "65de56cee7833b9c",
+      "TraceFlags": "01",
+      "TraceState": "",
+      "Remote": true
+    },
+    "SpanKind": 2,
+    "StartTime": "2024-01-12T14:59:09.342907117Z",
+    "EndTime": "2024-01-12T14:59:09.342945573Z",
+    "Attributes": [
+      {
+        "Key": "rpc.system",
+        "Value": {
+          "Type": "STRING",
+          "Value": "grpc"
+        }
+      },
+      {
+        "Key": "rpc.service",
+        "Value": {
+          "Type": "STRING",
+          "Value": "moby.buildkit.v1.Control"
+        }
+      },
+      {
+        "Key": "rpc.method",
+        "Value": {
+          "Type": "STRING",
+          "Value": "ListenBuildHistory"
+        }
+      },
+      {
+        "Key": "rpc.grpc.status_code",
+        "Value": {
+          "Type": "INT64",
+          "Value": 0
+        }
+      }
+    ],
+    "Events": null,
+    "Links": null,
+    "Status": {
+      "Code": "Unset",
+      "Description": ""
+    },
+    "DroppedAttributes": 0,
+    "DroppedEvents": 0,
+    "DroppedLinks": 0,
+    "ChildSpanCount": 0,
+    "Resource": [
+      {
+        "Key": "service.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "dockerd"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.language",
+        "Value": {
+          "Type": "STRING",
+          "Value": "go"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.name",
+        "Value": {
+          "Type": "STRING",
+          "Value": "opentelemetry"
+        }
+      },
+      {
+        "Key": "telemetry.sdk.version",
+        "Value": {
+          "Type": "STRING",
+          "Value": "1.19.0"
+        }
+      }
+    ],
+    "InstrumentationLibrary": {
+      "Name": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
+      "Version": "0.45.0",
+      "SchemaURL": ""
+    }
+  }
+]
\ No newline at end of file
diff -pruN 0.19.3+ds1-4/util/otelutil/jaeger/convert.go 0.21.3-0ubuntu1/util/otelutil/jaeger/convert.go
--- 0.19.3+ds1-4/util/otelutil/jaeger/convert.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/jaeger/convert.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,224 @@
+package jaeger
+
+import (
+	"encoding/json"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/sdk/resource"
+	tracesdk "go.opentelemetry.io/otel/sdk/trace"
+	"go.opentelemetry.io/otel/trace"
+)
+
+const (
+	keyInstrumentationLibraryName    = "otel.library.name"
+	keyInstrumentationLibraryVersion = "otel.library.version"
+	keyError                         = "error"
+	keySpanKind                      = "span.kind"
+	keyStatusCode                    = "otel.status_code"
+	keyStatusMessage                 = "otel.status_description"
+	keyDroppedAttributeCount         = "otel.event.dropped_attributes_count"
+	keyEventName                     = "event"
+)
+
+func ResourceToProcess(res *resource.Resource, defaultServiceName string) Process {
+	var process Process
+	var serviceName attribute.KeyValue
+	if res != nil {
+		for iter := res.Iter(); iter.Next(); {
+			if iter.Attribute().Key == attribute.Key("service.name") {
+				serviceName = iter.Attribute()
+				// Don't convert service.name into tag.
+				continue
+			}
+			if tag := keyValueToJaegerTag(iter.Attribute()); tag != nil {
+				process.Tags = append(process.Tags, *tag)
+			}
+		}
+	}
+
+	// If no service.name is contained in a Span's Resource,
+	// that field MUST be populated from the default Resource.
+	if serviceName.Value.AsString() == "" {
+		serviceName = attribute.Key("service.version").String(defaultServiceName)
+	}
+	process.ServiceName = serviceName.Value.AsString()
+
+	return process
+}
+
+func ConvertSpan(ss tracesdk.ReadOnlySpan) Span {
+	attr := ss.Attributes()
+	tags := make([]KeyValue, 0, len(attr))
+	for _, kv := range attr {
+		tag := keyValueToJaegerTag(kv)
+		if tag != nil {
+			tags = append(tags, *tag)
+		}
+	}
+
+	if is := ss.InstrumentationScope(); is.Name != "" {
+		tags = append(tags, getStringTag(keyInstrumentationLibraryName, is.Name))
+		if is.Version != "" {
+			tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, is.Version))
+		}
+	}
+
+	if ss.SpanKind() != trace.SpanKindInternal {
+		tags = append(tags,
+			getStringTag(keySpanKind, ss.SpanKind().String()),
+		)
+	}
+
+	if ss.Status().Code != codes.Unset {
+		switch ss.Status().Code {
+		case codes.Ok:
+			tags = append(tags, getStringTag(keyStatusCode, "OK"))
+		case codes.Error:
+			tags = append(tags, getBoolTag(keyError, true))
+			tags = append(tags, getStringTag(keyStatusCode, "ERROR"))
+		}
+		if ss.Status().Description != "" {
+			tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description))
+		}
+	}
+
+	var logs []Log
+	for _, a := range ss.Events() {
+		nTags := len(a.Attributes)
+		if a.Name != "" {
+			nTags++
+		}
+		if a.DroppedAttributeCount != 0 {
+			nTags++
+		}
+		fields := make([]KeyValue, 0, nTags)
+		if a.Name != "" {
+			// If an event contains an attribute with the same key, it needs
+			// to be given precedence and overwrite this.
+			fields = append(fields, getStringTag(keyEventName, a.Name))
+		}
+		for _, kv := range a.Attributes {
+			tag := keyValueToJaegerTag(kv)
+			if tag != nil {
+				fields = append(fields, *tag)
+			}
+		}
+		if a.DroppedAttributeCount != 0 {
+			fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount)))
+		}
+		logs = append(logs, Log{
+			Timestamp: timeAsEpochMicroseconds(a.Time),
+			Fields:    fields,
+		})
+	}
+
+	var refs []Reference
+	for _, link := range ss.Links() {
+		refs = append(refs, Reference{
+			RefType: FollowsFrom,
+			TraceID: TraceID(link.SpanContext.TraceID().String()),
+			SpanID:  SpanID(link.SpanContext.SpanID().String()),
+		})
+	}
+	refs = append(refs, Reference{
+		RefType: ChildOf,
+		TraceID: TraceID(ss.Parent().TraceID().String()),
+		SpanID:  SpanID(ss.Parent().SpanID().String()),
+	})
+
+	return Span{
+		TraceID:       TraceID(ss.SpanContext().TraceID().String()),
+		SpanID:        SpanID(ss.SpanContext().SpanID().String()),
+		Flags:         uint32(ss.SpanContext().TraceFlags()),
+		OperationName: ss.Name(),
+		References:    refs,
+		StartTime:     timeAsEpochMicroseconds(ss.StartTime()),
+		Duration:      durationAsMicroseconds(ss.EndTime().Sub(ss.StartTime())),
+		Tags:          tags,
+		Logs:          logs,
+	}
+}
+
+func keyValueToJaegerTag(keyValue attribute.KeyValue) *KeyValue {
+	var tag *KeyValue
+	switch keyValue.Value.Type() {
+	case attribute.STRING:
+		s := keyValue.Value.AsString()
+		tag = &KeyValue{
+			Key:   string(keyValue.Key),
+			Type:  StringType,
+			Value: s,
+		}
+	case attribute.BOOL:
+		b := keyValue.Value.AsBool()
+		tag = &KeyValue{
+			Key:   string(keyValue.Key),
+			Type:  BoolType,
+			Value: b,
+		}
+	case attribute.INT64:
+		i := keyValue.Value.AsInt64()
+		tag = &KeyValue{
+			Key:   string(keyValue.Key),
+			Type:  Int64Type,
+			Value: i,
+		}
+	case attribute.FLOAT64:
+		f := keyValue.Value.AsFloat64()
+		tag = &KeyValue{
+			Key:   string(keyValue.Key),
+			Type:  Float64Type,
+			Value: f,
+		}
+	case attribute.BOOLSLICE,
+		attribute.INT64SLICE,
+		attribute.FLOAT64SLICE,
+		attribute.STRINGSLICE:
+		data, _ := json.Marshal(keyValue.Value.AsInterface())
+		a := (string)(data)
+		tag = &KeyValue{
+			Key:   string(keyValue.Key),
+			Type:  StringType,
+			Value: a,
+		}
+	}
+	return tag
+}
+
+func getInt64Tag(k string, i int64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Type:  Int64Type,
+		Value: i,
+	}
+}
+
+func getStringTag(k, s string) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Type:  StringType,
+		Value: s,
+	}
+}
+
+func getBoolTag(k string, b bool) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Type:  BoolType,
+		Value: b,
+	}
+}
+
+// timeAsEpochMicroseconds converts time.Time to microseconds since epoch,
+// which is the format the StartTime field is stored in the Span.
+func timeAsEpochMicroseconds(t time.Time) uint64 {
+	return uint64(t.UnixNano() / 1000)
+}
+
+// durationAsMicroseconds converts time.Duration to microseconds,
+// which is the format the Duration field is stored in the Span.
+func durationAsMicroseconds(d time.Duration) uint64 {
+	return uint64(d.Nanoseconds() / 1000)
+}
diff -pruN 0.19.3+ds1-4/util/otelutil/jaeger/model.go 0.21.3-0ubuntu1/util/otelutil/jaeger/model.go
--- 0.19.3+ds1-4/util/otelutil/jaeger/model.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/jaeger/model.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,102 @@
+package jaeger
+
+// ReferenceType is the reference type of one span to another
+type ReferenceType string
+
+// TraceID is the shared trace ID of all spans in the trace.
+type TraceID string
+
+// SpanID is the id of a span
+type SpanID string
+
+// ProcessID is a hashed value of the Process struct that is unique within the trace.
+type ProcessID string
+
+// ValueType is the type of a value stored in KeyValue struct.
+type ValueType string
+
+const (
+	// ChildOf means a span is the child of another span
+	ChildOf ReferenceType = "CHILD_OF"
+	// FollowsFrom means a span follows from another span
+	FollowsFrom ReferenceType = "FOLLOWS_FROM"
+
+	// StringType indicates a string value stored in KeyValue
+	StringType ValueType = "string"
+	// BoolType indicates a Boolean value stored in KeyValue
+	BoolType ValueType = "bool"
+	// Int64Type indicates a 64bit signed integer value stored in KeyValue
+	Int64Type ValueType = "int64"
+	// Float64Type indicates a 64bit float value stored in KeyValue
+	Float64Type ValueType = "float64"
+	// BinaryType indicates an arbitrary byte array stored in KeyValue
+	BinaryType ValueType = "binary"
+)
+
+// Trace is a list of spans
+type Trace struct {
+	TraceID   TraceID               `json:"traceID"`
+	Spans     []Span                `json:"spans"`
+	Processes map[ProcessID]Process `json:"processes"`
+	Warnings  []string              `json:"warnings"`
+}
+
+// Span is a span denoting a piece of work in some infrastructure
+// When converting to UI model, ParentSpanID and Process should be dereferenced into
+// References and ProcessID, respectively.
+// When converting to ES model, ProcessID and Warnings should be omitted. Even if
+// included, ES with dynamic settings off will automatically ignore unneeded fields.
+type Span struct {
+	TraceID       TraceID     `json:"traceID"`
+	SpanID        SpanID      `json:"spanID"`
+	ParentSpanID  SpanID      `json:"parentSpanID,omitempty"` // deprecated
+	Flags         uint32      `json:"flags,omitempty"`
+	OperationName string      `json:"operationName"`
+	References    []Reference `json:"references"`
+	StartTime     uint64      `json:"startTime"` // microseconds since Unix epoch
+	Duration      uint64      `json:"duration"`  // microseconds
+	Tags          []KeyValue  `json:"tags"`
+	Logs          []Log       `json:"logs"`
+	ProcessID     ProcessID   `json:"processID,omitempty"`
+	Process       *Process    `json:"process,omitempty"`
+	Warnings      []string    `json:"warnings"`
+}
+
+// Reference is a reference from one span to another
+type Reference struct {
+	RefType ReferenceType `json:"refType"`
+	TraceID TraceID       `json:"traceID"`
+	SpanID  SpanID        `json:"spanID"`
+}
+
+// Process is the process emitting a set of spans
+type Process struct {
+	ServiceName string     `json:"serviceName"`
+	Tags        []KeyValue `json:"tags"`
+}
+
+// Log is a log emitted in a span
+type Log struct {
+	Timestamp uint64     `json:"timestamp"`
+	Fields    []KeyValue `json:"fields"`
+}
+
+// KeyValue is a key-value pair with typed value.
+type KeyValue struct {
+	Key   string      `json:"key"`
+	Type  ValueType   `json:"type,omitempty"`
+	Value interface{} `json:"value"`
+}
+
+// DependencyLink shows dependencies between services
+type DependencyLink struct {
+	Parent    string `json:"parent"`
+	Child     string `json:"child"`
+	CallCount uint64 `json:"callCount"`
+}
+
+// Operation defines the data in the operation response when query operation by service and span kind
+type Operation struct {
+	Name     string `json:"name"`
+	SpanKind string `json:"spanKind"`
+}
diff -pruN 0.19.3+ds1-4/util/otelutil/jaeger.go 0.21.3-0ubuntu1/util/otelutil/jaeger.go
--- 0.19.3+ds1-4/util/otelutil/jaeger.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/jaeger.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,45 @@
+package otelutil
+
+import (
+	"fmt"
+
+	"github.com/docker/buildx/util/otelutil/jaeger"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/resource"
+)
+
+type JaegerData struct {
+	Data []jaeger.Trace `json:"data"`
+}
+
+// JaegerData return Jaeger data compatible with ui import feature.
+// https://github.com/jaegertracing/jaeger-ui/issues/381#issuecomment-494150826
+func (s Spans) JaegerData() JaegerData {
+	roSpans := s.ReadOnlySpans()
+
+	// fetch default service.name from default resource for backup
+	var defaultServiceName string
+	defaultResource := resource.Default()
+	if value, exists := defaultResource.Set().Value(attribute.Key("service.name")); exists {
+		defaultServiceName = value.AsString()
+	}
+
+	data := jaeger.Trace{
+		TraceID:   jaeger.TraceID(roSpans[0].SpanContext().TraceID().String()),
+		Processes: make(map[jaeger.ProcessID]jaeger.Process),
+		Spans:     []jaeger.Span{},
+	}
+	for i := range roSpans {
+		ss := roSpans[i]
+		pid := jaeger.ProcessID(fmt.Sprintf("p%d", i))
+		data.Processes[pid] = jaeger.ResourceToProcess(ss.Resource(), defaultServiceName)
+		span := jaeger.ConvertSpan(ss)
+		span.Process = nil
+		span.ProcessID = pid
+		data.Spans = append(data.Spans, span)
+	}
+
+	return JaegerData{
+		Data: []jaeger.Trace{data},
+	}
+}
diff -pruN 0.19.3+ds1-4/util/otelutil/jaeger_test.go 0.21.3-0ubuntu1/util/otelutil/jaeger_test.go
--- 0.19.3+ds1-4/util/otelutil/jaeger_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/jaeger_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,27 @@
+package otelutil
+
+import (
+	"bytes"
+	"encoding/json"
+	"os"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+const jaegerFixture = "./fixtures/jaeger.json"
+
+func TestJaegerData(t *testing.T) {
+	dt, err := os.ReadFile(bktracesFixture)
+	require.NoError(t, err)
+
+	spanStubs, err := ParseSpanStubs(bytes.NewReader(dt))
+	require.NoError(t, err)
+
+	trace := spanStubs.JaegerData()
+	dtJaegerTrace, err := json.MarshalIndent(trace, "", "  ")
+	require.NoError(t, err)
+	dtJaeger, err := os.ReadFile(jaegerFixture)
+	require.NoError(t, err)
+	require.Equal(t, string(dtJaeger), string(dtJaegerTrace))
+}
diff -pruN 0.19.3+ds1-4/util/otelutil/span.go 0.21.3-0ubuntu1/util/otelutil/span.go
--- 0.19.3+ds1-4/util/otelutil/span.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/span.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,491 @@
+package otelutil
+
+import (
+	"bytes"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+	"time"
+
+	"github.com/pkg/errors"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	tracesdk "go.opentelemetry.io/otel/sdk/trace"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Span is a type similar to otel's SpanStub, but with the correct types needed
+// for handle marshaling and unmarshalling.
+type Span struct {
+	// Name is the name of a specific span
+	Name string
+	// SpanContext is the unique SpanContext that identifies the span
+	SpanContext trace.SpanContext
+	// Parten is the unique SpanContext that identifies the parent of the span.
+	// If the span has no parent, this span context will be invalid.
+	Parent trace.SpanContext
+	// SpanKind is the role the span plays in a Trace
+	SpanKind trace.SpanKind
+	// StartTime is the time the span started recording
+	StartTime time.Time
+	// EndTime returns the time the span stopped recording
+	EndTime time.Time
+	// Attributes are the defining attributes of a span
+	Attributes []attribute.KeyValue
+	// Events are all the events that occurred within the span
+	Events []tracesdk.Event
+	// Links are all the links the span has to other spans
+	Links []tracesdk.Link
+	// Status is that span status
+	Status tracesdk.Status
+	// DroppedAttributes is the number of attributes dropped by the span due to
+	// a limit being reached
+	DroppedAttributes int
+	// DroppedEvents is the number of attributes dropped by the span due to a
+	// limit being reached
+	DroppedEvents int
+	// DroppedLinks is the number of links dropped by the span due to a limit
+	// being reached
+	DroppedLinks int
+	// ChildSpanCount is the count of spans that consider the span a direct
+	// parent
+	ChildSpanCount int
+	// Resource is the information about the entity that produced the span
+	// We have to change this type from the otel type to make this struct
+	// marshallable
+	Resource []attribute.KeyValue
+	// InstrumentationLibrary is information about the library that produced
+	// the span
+	//nolint:staticcheck
+	InstrumentationLibrary instrumentation.Library
+}
+
+type Spans []Span
+
+// Len return the length of the Spans.
+func (s Spans) Len() int {
+	return len(s)
+}
+
+// ReadOnlySpans return a list of tracesdk.ReadOnlySpan from span stubs.
+func (s Spans) ReadOnlySpans() []tracesdk.ReadOnlySpan {
+	roSpans := make([]tracesdk.ReadOnlySpan, len(s))
+	for i := range s {
+		roSpans[i] = s[i].Snapshot()
+	}
+	return roSpans
+}
+
+// ParseSpanStubs parses BuildKit trace data into a list of SpanStubs.
+func ParseSpanStubs(rdr io.Reader) (Spans, error) {
+	var spanStubs []Span
+	decoder := json.NewDecoder(rdr)
+	for {
+		var span Span
+		if err := decoder.Decode(&span); err == io.EOF {
+			break
+		} else if err != nil {
+			return nil, errors.Wrapf(err, "error decoding JSON")
+		}
+		spanStubs = append(spanStubs, span)
+	}
+	return spanStubs, nil
+}
+
+// spanData is data that we need to unmarshal in custom ways.
+type spanData struct {
+	Name              string
+	SpanContext       spanContext
+	Parent            spanContext
+	SpanKind          trace.SpanKind
+	StartTime         time.Time
+	EndTime           time.Time
+	Attributes        []keyValue
+	Events            []event
+	Links             []link
+	Status            tracesdk.Status
+	DroppedAttributes int
+	DroppedEvents     int
+	DroppedLinks      int
+	ChildSpanCount    int
+	Resource          []keyValue // change this type from the otel type to make this struct marshallable
+	//nolint:staticcheck
+	InstrumentationLibrary instrumentation.Library
+}
+
+// spanContext is a custom type used to unmarshal otel SpanContext correctly.
+type spanContext struct {
+	TraceID    string
+	SpanID     string
+	TraceFlags string
+	TraceState string // TODO: implement, currently dropped
+	Remote     bool
+}
+
+// event is a custom type used to unmarshal otel Event correctly.
+type event struct {
+	Name                  string
+	Attributes            []keyValue
+	DroppedAttributeCount int
+	Time                  time.Time
+}
+
+// link is a custom type used to unmarshal otel Link correctly.
+type link struct {
+	SpanContext           spanContext
+	Attributes            []keyValue
+	DroppedAttributeCount int
+}
+
+// keyValue is a custom type used to unmarshal otel KeyValue correctly.
+type keyValue struct {
+	Key   string
+	Value value
+}
+
+// value is a custom type used to unmarshal otel Value correctly.
+type value struct {
+	Type  string
+	Value interface{}
+}
+
+// UnmarshalJSON implements json.Unmarshaler for Span which allows correctly
+// retrieving attribute.KeyValue values.
+func (s *Span) UnmarshalJSON(data []byte) error {
+	var sd spanData
+	if err := json.NewDecoder(bytes.NewReader(data)).Decode(&sd); err != nil {
+		return errors.Wrap(err, "unable to decode to spanData")
+	}
+
+	s.Name = sd.Name
+	s.SpanKind = sd.SpanKind
+	s.StartTime = sd.StartTime
+	s.EndTime = sd.EndTime
+	s.Status = sd.Status
+	s.DroppedAttributes = sd.DroppedAttributes
+	s.DroppedEvents = sd.DroppedEvents
+	s.DroppedLinks = sd.DroppedLinks
+	s.ChildSpanCount = sd.ChildSpanCount
+	s.InstrumentationLibrary = sd.InstrumentationLibrary
+
+	spanCtx, err := sd.SpanContext.asTraceSpanContext()
+	if err != nil {
+		return errors.Wrap(err, "unable to decode spanCtx")
+	}
+	s.SpanContext = spanCtx
+
+	parent, err := sd.Parent.asTraceSpanContext()
+	if err != nil {
+		return errors.Wrap(err, "unable to decode parent")
+	}
+	s.Parent = parent
+
+	var attributes []attribute.KeyValue
+	for _, a := range sd.Attributes {
+		kv, err := a.asAttributeKeyValue()
+		if err != nil {
+			return errors.Wrapf(err, "unable to decode attribute (%s)", a.Key)
+		}
+		attributes = append(attributes, kv)
+	}
+	s.Attributes = attributes
+
+	var events []tracesdk.Event
+	for _, e := range sd.Events {
+		var eventAttributes []attribute.KeyValue
+		for _, a := range e.Attributes {
+			kv, err := a.asAttributeKeyValue()
+			if err != nil {
+				return errors.Wrapf(err, "unable to decode event attribute (%s)", a.Key)
+			}
+			eventAttributes = append(eventAttributes, kv)
+		}
+		events = append(events, tracesdk.Event{
+			Name:                  e.Name,
+			Attributes:            eventAttributes,
+			DroppedAttributeCount: e.DroppedAttributeCount,
+			Time:                  e.Time,
+		})
+	}
+	s.Events = events
+
+	var links []tracesdk.Link
+	for _, l := range sd.Links {
+		linkSpanCtx, err := l.SpanContext.asTraceSpanContext()
+		if err != nil {
+			return errors.Wrap(err, "unable to decode linkSpanCtx")
+		}
+		var linkAttributes []attribute.KeyValue
+		for _, a := range l.Attributes {
+			kv, err := a.asAttributeKeyValue()
+			if err != nil {
+				return errors.Wrapf(err, "unable to decode link attribute (%s)", a.Key)
+			}
+			linkAttributes = append(linkAttributes, kv)
+		}
+		links = append(links, tracesdk.Link{
+			SpanContext:           linkSpanCtx,
+			Attributes:            linkAttributes,
+			DroppedAttributeCount: l.DroppedAttributeCount,
+		})
+	}
+	s.Links = links
+
+	var resources []attribute.KeyValue
+	for _, r := range sd.Resource {
+		kv, err := r.asAttributeKeyValue()
+		if err != nil {
+			return errors.Wrapf(err, "unable to decode resource (%s)", r.Key)
+		}
+		resources = append(resources, kv)
+	}
+	s.Resource = resources
+
+	return nil
+}
+
+// asTraceSpanContext converts the internal spanContext representation to an
+// otel one.
+func (sc *spanContext) asTraceSpanContext() (trace.SpanContext, error) {
+	traceID, err := traceIDFromHex(sc.TraceID)
+	if err != nil {
+		return trace.SpanContext{}, errors.Wrap(err, "unable to parse trace id")
+	}
+	spanID, err := spanIDFromHex(sc.SpanID)
+	if err != nil {
+		return trace.SpanContext{}, errors.Wrap(err, "unable to parse span id")
+	}
+	traceFlags := trace.TraceFlags(0x00)
+	if sc.TraceFlags == "01" {
+		traceFlags = trace.TraceFlags(0x01)
+	}
+	config := trace.SpanContextConfig{
+		TraceID:    traceID,
+		SpanID:     spanID,
+		TraceFlags: traceFlags,
+		Remote:     sc.Remote,
+	}
+	return trace.NewSpanContext(config), nil
+}
+
+// asAttributeKeyValue converts the internal keyValue representation to an
+// otel one.
+func (kv *keyValue) asAttributeKeyValue() (attribute.KeyValue, error) {
+	// value types get encoded as string
+	switch kv.Value.Type {
+	case attribute.INVALID.String():
+		return attribute.KeyValue{}, errors.New("invalid value type")
+	case attribute.BOOL.String():
+		return attribute.Bool(kv.Key, kv.Value.Value.(bool)), nil
+	case attribute.INT64.String():
+		// value could be int64 or float64, so handle both cases (float64 comes
+		// from json unmarshal)
+		var v int64
+		switch i := kv.Value.Value.(type) {
+		case int64:
+			v = i
+		case float64:
+			v = int64(i)
+		}
+		return attribute.Int64(kv.Key, v), nil
+	case attribute.FLOAT64.String():
+		return attribute.Float64(kv.Key, kv.Value.Value.(float64)), nil
+	case attribute.STRING.String():
+		return attribute.String(kv.Key, kv.Value.Value.(string)), nil
+	case attribute.BOOLSLICE.String():
+		return attribute.BoolSlice(kv.Key, kv.Value.Value.([]bool)), nil
+	case attribute.INT64SLICE.String():
+		// handle both float64 and int64 (float64 comes from json unmarshal)
+		var v []int64
+		switch sli := kv.Value.Value.(type) {
+		case []int64:
+			v = sli
+		case []float64:
+			for i := range sli {
+				v = append(v, int64(sli[i]))
+			}
+		}
+		return attribute.Int64Slice(kv.Key, v), nil
+	case attribute.FLOAT64SLICE.String():
+		return attribute.Float64Slice(kv.Key, kv.Value.Value.([]float64)), nil
+	case attribute.STRINGSLICE.String():
+		var strSli []string
+		// sometimes we can get an []interface{} instead of a []string, so
+		// always cast to []string if that happens.
+		switch sli := kv.Value.Value.(type) {
+		case []string:
+			strSli = sli
+		case []interface{}:
+			for i := range sli {
+				var v string
+				// best case we have a string, otherwise, cast it using
+				// fmt.Sprintf
+				if str, ok := sli[i].(string); ok {
+					v = str
+				} else {
+					v = fmt.Sprintf("%v", sli[i])
+				}
+				// add the string to the slice
+				strSli = append(strSli, v)
+			}
+		default:
+			return attribute.KeyValue{}, errors.Errorf("got unsupported type %q for %s", reflect.ValueOf(kv.Value.Value).Kind(), attribute.STRINGSLICE.String())
+		}
+		return attribute.StringSlice(kv.Key, strSli), nil
+	default:
+		return attribute.KeyValue{}, errors.Errorf("unknown value type %s", kv.Value.Type)
+	}
+}
+
+// traceIDFromHex returns a TraceID from a hex string if it is compliant with
+// the W3C trace-context specification and removes the validity check.
+// https://www.w3.org/TR/trace-context/#trace-id
+func traceIDFromHex(h string) (trace.TraceID, error) {
+	t := trace.TraceID{}
+	if len(h) != 32 {
+		return t, errors.New("unable to parse trace id")
+	}
+	if err := decodeHex(h, t[:]); err != nil {
+		return t, err
+	}
+	return t, nil
+}
+
+// spanIDFromHex returns a SpanID from a hex string if it is compliant with the
+// W3C trace-context specification and removes the validity check.
+// https://www.w3.org/TR/trace-context/#parent-id
+func spanIDFromHex(h string) (trace.SpanID, error) {
+	s := trace.SpanID{}
+	if len(h) != 16 {
+		return s, errors.New("unable to parse span id of length: %d")
+	}
+	if err := decodeHex(h, s[:]); err != nil {
+		return s, err
+	}
+	return s, nil
+}
+
+// decodeHex decodes hex in a manner compliant with otel.
+func decodeHex(h string, b []byte) error {
+	for _, r := range h {
+		switch {
+		case 'a' <= r && r <= 'f':
+			continue
+		case '0' <= r && r <= '9':
+			continue
+		default:
+			return errors.New("unable to parse hex id")
+		}
+	}
+	decoded, err := hex.DecodeString(h)
+	if err != nil {
+		return err
+	}
+	copy(b, decoded)
+	return nil
+}
+
+// Snapshot turns a Span into a ReadOnlySpan which is exportable by otel.
+func (s *Span) Snapshot() tracesdk.ReadOnlySpan {
+	return spanSnapshot{
+		name:                 s.Name,
+		spanContext:          s.SpanContext,
+		parent:               s.Parent,
+		spanKind:             s.SpanKind,
+		startTime:            s.StartTime,
+		endTime:              s.EndTime,
+		attributes:           s.Attributes,
+		events:               s.Events,
+		links:                s.Links,
+		status:               s.Status,
+		droppedAttributes:    s.DroppedAttributes,
+		droppedEvents:        s.DroppedEvents,
+		droppedLinks:         s.DroppedLinks,
+		childSpanCount:       s.ChildSpanCount,
+		resource:             resource.NewSchemaless(s.Resource...),
+		instrumentationScope: s.InstrumentationLibrary,
+	}
+}
+
+// spanSnapshot is a helper type for transforming a Span into a ReadOnlySpan.
+type spanSnapshot struct {
+	// Embed the interface to implement the private method.
+	tracesdk.ReadOnlySpan
+
+	name                 string
+	spanContext          trace.SpanContext
+	parent               trace.SpanContext
+	spanKind             trace.SpanKind
+	startTime            time.Time
+	endTime              time.Time
+	attributes           []attribute.KeyValue
+	events               []tracesdk.Event
+	links                []tracesdk.Link
+	status               tracesdk.Status
+	droppedAttributes    int
+	droppedEvents        int
+	droppedLinks         int
+	childSpanCount       int
+	resource             *resource.Resource
+	instrumentationScope instrumentation.Scope
+}
+
+// Name returns the Name of the snapshot
+func (s spanSnapshot) Name() string { return s.name }
+
+// SpanContext returns the SpanContext of the snapshot
+func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext }
+
+// Parent returns the Parent of the snapshot
+func (s spanSnapshot) Parent() trace.SpanContext { return s.parent }
+
+// SpanKind returns the SpanKind of the snapshot
+func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind }
+
+// StartTime returns the StartTime of the snapshot
+func (s spanSnapshot) StartTime() time.Time { return s.startTime }
+
+// EndTime returns the EndTime of the snapshot
+func (s spanSnapshot) EndTime() time.Time { return s.endTime }
+
+// Attributes returns the Attributes of the snapshot
+func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes }
+
+// Links returns the Links of the snapshot
+func (s spanSnapshot) Links() []tracesdk.Link { return s.links }
+
+// Events return the Events of the snapshot
+func (s spanSnapshot) Events() []tracesdk.Event { return s.events }
+
+// Status returns the Status of the snapshot
+func (s spanSnapshot) Status() tracesdk.Status { return s.status }
+
+// DroppedAttributes returns the DroppedAttributes of the snapshot
+func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes }
+
+// DroppedLinks returns the DroppedLinks of the snapshot
+func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks }
+
+// DroppedEvents returns the DroppedEvents of the snapshot
+func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents }
+
+// ChildSpanCount returns the ChildSpanCount of the snapshot
+func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount }
+
+// Resource returns the Resource of the snapshot
+func (s spanSnapshot) Resource() *resource.Resource { return s.resource }
+
+// InstrumentationScope returns the InstrumentationScope of the snapshot
+func (s spanSnapshot) InstrumentationScope() instrumentation.Scope {
+	return s.instrumentationScope
+}
+
+// InstrumentationLibrary returns the InstrumentationLibrary of the snapshot
+//
+//nolint:staticcheck
+func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library {
+	return s.instrumentationScope
+}
diff -pruN 0.19.3+ds1-4/util/otelutil/span_test.go 0.21.3-0ubuntu1/util/otelutil/span_test.go
--- 0.19.3+ds1-4/util/otelutil/span_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/util/otelutil/span_test.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,159 @@
+package otelutil
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"os"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
+)
+
+// curl -s --unix-socket /tmp/docker-desktop-build-dev.sock http://localhost/blobs/default/default?digest=sha256:3103104e9fa908087bd47572da6ad9a5a7bf973608f736536d18d635a7da0140 -X GET > ./fixtures/bktraces.json
+const bktracesFixture = "./fixtures/bktraces.json"
+
+const otlpFixture = "./fixtures/otlp.json"
+
+func TestParseSpanStubs(t *testing.T) {
+	dt, err := os.ReadFile(bktracesFixture)
+	require.NoError(t, err)
+
+	spanStubs, err := ParseSpanStubs(bytes.NewReader(dt))
+	require.NoError(t, err)
+	require.Equal(t, 73, len(spanStubs))
+
+	dtSpanStubs, err := json.MarshalIndent(spanStubs, "", "  ")
+	require.NoError(t, err)
+	dtotel, err := os.ReadFile(otlpFixture)
+	require.NoError(t, err)
+	require.Equal(t, string(dtotel), string(dtSpanStubs))
+
+	exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint())
+	require.NoError(t, err)
+	require.NoError(t, exp.ExportSpans(context.Background(), spanStubs.ReadOnlySpans()))
+}
+
+func TestAsAttributeKeyValue(t *testing.T) {
+	type args struct {
+		Type  string
+		value any
+	}
+	tests := []struct {
+		name string
+		args args
+		want attribute.KeyValue
+	}{
+		{
+			name: "string",
+			args: args{
+				Type:  attribute.STRING.String(),
+				value: "value",
+			},
+			want: attribute.String("key", "value"),
+		},
+		{
+			name: "int64 (int64)",
+			args: args{
+				Type:  attribute.INT64.String(),
+				value: int64(1),
+			},
+			want: attribute.Int64("key", 1),
+		},
+		{
+			name: "int64 (float64)",
+			args: args{
+				Type:  attribute.INT64.String(),
+				value: float64(1.0),
+			},
+			want: attribute.Int64("key", 1),
+		},
+		{
+			name: "bool",
+			args: args{
+				Type:  attribute.BOOL.String(),
+				value: true,
+			},
+			want: attribute.Bool("key", true),
+		},
+		{
+			name: "float64",
+			args: args{
+				Type:  attribute.FLOAT64.String(),
+				value: float64(1.0),
+			},
+			want: attribute.Float64("key", 1.0),
+		},
+		{
+			name: "float64slice",
+			args: args{
+				Type:  attribute.FLOAT64SLICE.String(),
+				value: []float64{1.0, 2.0},
+			},
+			want: attribute.Float64Slice("key", []float64{1.0, 2.0}),
+		},
+		{
+			name: "int64slice (int64)",
+			args: args{
+				Type:  attribute.INT64SLICE.String(),
+				value: []int64{1, 2},
+			},
+			want: attribute.Int64Slice("key", []int64{1, 2}),
+		},
+		{
+			name: "int64slice (float64)",
+			args: args{
+				Type:  attribute.INT64SLICE.String(),
+				value: []float64{1.0, 2.0},
+			},
+			want: attribute.Int64Slice("key", []int64{1, 2}),
+		},
+		{
+			name: "boolslice",
+			args: args{
+				Type:  attribute.BOOLSLICE.String(),
+				value: []bool{true, false},
+			},
+			want: attribute.BoolSlice("key", []bool{true, false}),
+		},
+		{
+			name: "stringslice (strings)",
+			args: args{
+				Type:  attribute.STRINGSLICE.String(),
+				value: []string{"value1", "value2"},
+			},
+			want: attribute.StringSlice("key", []string{"value1", "value2"}),
+		},
+		{
+			name: "stringslice (interface of string)",
+			args: args{
+				Type:  attribute.STRINGSLICE.String(),
+				value: []interface{}{"value1", "value2"},
+			},
+			want: attribute.StringSlice("key", []string{"value1", "value2"}),
+		},
+		{
+			name: "stringslice (interface mixed)",
+			args: args{
+				Type:  attribute.STRINGSLICE.String(),
+				value: []interface{}{"value1", 2},
+			},
+			want: attribute.StringSlice("key", []string{"value1", "2"}),
+		},
+	}
+	for _, tt := range tests {
+		tt := tt
+		t.Run(tt.name, func(t *testing.T) {
+			kv := keyValue{
+				Key:   "key",
+				Value: value{Type: tt.args.Type, Value: tt.args.value},
+			}
+			attr, err := kv.asAttributeKeyValue()
+			require.NoError(t, err, "failed to convert key value to attribute key value")
+			assert.Equal(t, tt.want, attr, "attribute key value mismatch")
+		})
+	}
+}
diff -pruN 0.19.3+ds1-4/util/progress/printer.go 0.21.3-0ubuntu1/util/progress/printer.go
--- 0.19.3+ds1-4/util/progress/printer.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/progress/printer.go	2025-03-17 16:14:25.000000000 +0000
@@ -122,6 +122,7 @@ func NewPrinter(ctx context.Context, out
 		for {
 			pw.status = make(chan *client.SolveStatus)
 			pw.done = make(chan struct{})
+			pw.closeOnce = sync.Once{}
 
 			pw.logMu.Lock()
 			pw.logSourceMap = map[digest.Digest]interface{}{}
diff -pruN 0.19.3+ds1-4/util/resolver/resolver.go 0.21.3-0ubuntu1/util/resolver/resolver.go
--- 0.19.3+ds1-4/util/resolver/resolver.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/resolver/resolver.go	2025-03-17 16:14:25.000000000 +0000
@@ -8,7 +8,7 @@ import (
 	"runtime"
 	"time"
 
-	"github.com/containerd/containerd/remotes/docker"
+	"github.com/containerd/containerd/v2/core/remotes/docker"
 	"github.com/moby/buildkit/util/tracing"
 	"github.com/pkg/errors"
 )
diff -pruN 0.19.3+ds1-4/util/tracing/trace.go 0.21.3-0ubuntu1/util/tracing/trace.go
--- 0.19.3+ds1-4/util/tracing/trace.go	2024-12-17 11:25:19.000000000 +0000
+++ 0.21.3-0ubuntu1/util/tracing/trace.go	2025-03-17 16:14:25.000000000 +0000
@@ -2,7 +2,6 @@ package tracing
 
 import (
 	"context"
-	"os"
 	"strings"
 
 	"github.com/moby/buildkit/util/tracing/delegated"
@@ -13,7 +12,7 @@ import (
 	"go.opentelemetry.io/otel/trace"
 )
 
-func TraceCurrentCommand(ctx context.Context, name string) (context.Context, func(error), error) {
+func TraceCurrentCommand(ctx context.Context, args []string, attrs ...attribute.KeyValue) (context.Context, func(error), error) {
 	opts := []sdktrace.TracerProviderOption{
 		sdktrace.WithResource(detect.Resource()),
 		sdktrace.WithBatcher(delegated.DefaultExporter),
@@ -25,8 +24,8 @@ func TraceCurrentCommand(ctx context.Con
 	}
 
 	tp := sdktrace.NewTracerProvider(opts...)
-	ctx, span := tp.Tracer("").Start(ctx, name, trace.WithAttributes(
-		attribute.String("command", strings.Join(os.Args, " ")),
+	ctx, span := tp.Tracer("").Start(ctx, strings.Join(args, " "), trace.WithAttributes(
+		attrs...,
 	))
 
 	return ctx, func(err error) {
diff -pruN 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/README.md 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/README.md
--- 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,93 @@
+# go-fuzz-headers
+This repository contains various helper functions for go fuzzing. It is mostly used in combination with [go-fuzz](https://github.com/dvyukov/go-fuzz), but compatibility with fuzzing in the standard library will also be supported. Any coverage guided fuzzing engine that provides an array or slice of bytes can be used with go-fuzz-headers.
+
+
+## Usage
+Using go-fuzz-headers is easy. First create a new consumer with the bytes provided by the fuzzing engine:
+
+```go
+import (
+	fuzz "github.com/AdaLogics/go-fuzz-headers"
+)
+data := []byte{'R', 'a', 'n', 'd', 'o', 'm'}
+f := fuzz.NewConsumer(data)
+
+```
+
+This creates a `Consumer` that consumes the bytes of the input as it uses them to fuzz different types.
+
+After that, `f` can be used to easily create fuzzed instances of different types. Below are some examples:
+
+### Structs
+One of the most useful features of go-fuzz-headers is its ability to fill structs with the data provided by the fuzzing engine. This is done with a single line:
+```go
+type Person struct {
+    Name string
+    Age  int
+}
+p := Person{}
+// Fill p with values based on the data provided by the fuzzing engine:
+err := f.GenerateStruct(&p)
+```
+
+This includes nested structs too. In this example, the fuzz Consumer will also insert values in `p.BestFriend`:
+```go
+type PersonI struct {
+    Name       string
+    Age        int
+    BestFriend PersonII
+}
+type PersonII struct {
+    Name string
+    Age  int
+}
+p := PersonI{}
+err := f.GenerateStruct(&p)
+```
+
+If the consumer should insert values for unexported fields as well as exported, this can be enabled with:
+
+```go
+f.AllowUnexportedFields()
+```
+
+...and disabled with:
+
+```go
+f.DisallowUnexportedFields()
+```
+
+### Other types:
+
+Other useful APIs:
+
+```go
+createdString, err := f.GetString() // Gets a string
+createdInt, err := f.GetInt() // Gets an integer
+createdByte, err := f.GetByte() // Gets a byte
+createdBytes, err := f.GetBytes() // Gets a byte slice
+createdBool, err := f.GetBool() // Gets a boolean
+err := f.FuzzMap(target_map) // Fills a map
+createdTarBytes, err := f.TarBytes() // Gets bytes of a valid tar archive
+err := f.CreateFiles(inThisDir) // Fills inThisDir with files
+createdString, err := f.GetStringFrom("anyCharInThisString", ofThisLength) // Gets a string that consists of chars from "anyCharInThisString" and has the exact length "ofThisLength"
+```
+
+Most APIs are added as they are needed.
+
+## Projects that use go-fuzz-headers
+- [runC](https://github.com/opencontainers/runc)
+- [Istio](https://github.com/istio/istio)
+- [Vitess](https://github.com/vitessio/vitess)
+- [Containerd](https://github.com/containerd/containerd)
+
+Feel free to add your own project to the list, if you use go-fuzz-headers to fuzz it.
+
+
+ 
+
+## Status
+The project is under development and will be updated regularly.
+
+## References
+go-fuzz-headers' approach to fuzzing structs is strongly inspired by [gofuzz](https://github.com/google/gofuzz).
\ No newline at end of file
diff -pruN 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go
--- 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,960 @@
+// Copyright 2023 The go-fuzz-headers Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gofuzzheaders
+
+import (
+	"archive/tar"
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+	"unsafe"
+)
+
+var (
+	MaxTotalLen uint32 = 2000000
+	maxDepth           = 100
+)
+
+func SetMaxTotalLen(newLen uint32) {
+	MaxTotalLen = newLen
+}
+
+type ConsumeFuzzer struct {
+	data                 []byte
+	dataTotal            uint32
+	CommandPart          []byte
+	RestOfArray          []byte
+	NumberOfCalls        int
+	position             uint32
+	fuzzUnexportedFields bool
+	forceUTF8Strings     bool
+	curDepth             int
+	Funcs                map[reflect.Type]reflect.Value
+}
+
+func IsDivisibleBy(n int, divisibleby int) bool {
+	return (n % divisibleby) == 0
+}
+
+func NewConsumer(fuzzData []byte) *ConsumeFuzzer {
+	return &ConsumeFuzzer{
+		data:      fuzzData,
+		dataTotal: uint32(len(fuzzData)),
+		Funcs:     make(map[reflect.Type]reflect.Value),
+		curDepth:  0,
+	}
+}
+
+func (f *ConsumeFuzzer) Split(minCalls, maxCalls int) error {
+	if f.dataTotal == 0 {
+		return errors.New("could not split")
+	}
+	numberOfCalls := int(f.data[0])
+	if numberOfCalls < minCalls || numberOfCalls > maxCalls {
+		return errors.New("bad number of calls")
+	}
+	if int(f.dataTotal) < numberOfCalls+numberOfCalls+1 {
+		return errors.New("length of data does not match required parameters")
+	}
+
+	// Define part 2 and 3 of the data array
+	commandPart := f.data[1 : numberOfCalls+1]
+	restOfArray := f.data[numberOfCalls+1:]
+
+	// Just a small check. It is necessary
+	if len(commandPart) != numberOfCalls {
+		return errors.New("length of commandPart does not match number of calls")
+	}
+
+	// Check if restOfArray is divisible by numberOfCalls
+	if !IsDivisibleBy(len(restOfArray), numberOfCalls) {
+		return errors.New("length of commandPart does not match number of calls")
+	}
+	f.CommandPart = commandPart
+	f.RestOfArray = restOfArray
+	f.NumberOfCalls = numberOfCalls
+	return nil
+}
+
+func (f *ConsumeFuzzer) AllowUnexportedFields() {
+	f.fuzzUnexportedFields = true
+}
+
+func (f *ConsumeFuzzer) DisallowUnexportedFields() {
+	f.fuzzUnexportedFields = false
+}
+
+func (f *ConsumeFuzzer) AllowNonUTF8Strings() {
+	f.forceUTF8Strings = false
+}
+
+func (f *ConsumeFuzzer) DisallowNonUTF8Strings() {
+	f.forceUTF8Strings = true
+}
+
+func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error {
+	e := reflect.ValueOf(targetStruct).Elem()
+	return f.fuzzStruct(e, false)
+}
+
+func (f *ConsumeFuzzer) setCustom(v reflect.Value) error {
+	// First: see if we have a fuzz function for it.
+	doCustom, ok := f.Funcs[v.Type()]
+	if !ok {
+		return fmt.Errorf("could not find a custom function")
+	}
+
+	switch v.Kind() {
+	case reflect.Ptr:
+		if v.IsNil() {
+			if !v.CanSet() {
+				return fmt.Errorf("could not use a custom function")
+			}
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+	case reflect.Map:
+		if v.IsNil() {
+			if !v.CanSet() {
+				return fmt.Errorf("could not use a custom function")
+			}
+			v.Set(reflect.MakeMap(v.Type()))
+		}
+	default:
+		return fmt.Errorf("could not use a custom function")
+	}
+
+	verr := doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
+		F: f,
+	})})
+
+	// check if we return an error
+	if verr[0].IsNil() {
+		return nil
+	}
+	return fmt.Errorf("could not use a custom function")
+}
+
+func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error {
+	if f.curDepth >= maxDepth {
+		// return err or nil here?
+		return nil
+	}
+	f.curDepth++
+	defer func() { f.curDepth-- }()
+
+	// We check if we should check for custom functions
+	if customFunctions && e.IsValid() && e.CanAddr() {
+		err := f.setCustom(e.Addr())
+		if err != nil {
+			return err
+		}
+	}
+
+	switch e.Kind() {
+	case reflect.Struct:
+		for i := 0; i < e.NumField(); i++ {
+			var v reflect.Value
+			if !e.Field(i).CanSet() {
+				if f.fuzzUnexportedFields {
+					v = reflect.NewAt(e.Field(i).Type(), unsafe.Pointer(e.Field(i).UnsafeAddr())).Elem()
+				}
+				if err := f.fuzzStruct(v, customFunctions); err != nil {
+					return err
+				}
+			} else {
+				v = e.Field(i)
+				if err := f.fuzzStruct(v, customFunctions); err != nil {
+					return err
+				}
+			}
+		}
+	case reflect.String:
+		str, err := f.GetString()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetString(str)
+		}
+	case reflect.Slice:
+		var maxElements uint32
+		// Byte slices should not be restricted
+		if e.Type().String() == "[]uint8" {
+			maxElements = 10000000
+		} else {
+			maxElements = 50
+		}
+
+		randQty, err := f.GetUint32()
+		if err != nil {
+			return err
+		}
+		numOfElements := randQty % maxElements
+		if (f.dataTotal - f.position) < numOfElements {
+			numOfElements = f.dataTotal - f.position
+		}
+
+		uu := reflect.MakeSlice(e.Type(), int(numOfElements), int(numOfElements))
+
+		for i := 0; i < int(numOfElements); i++ {
+			// If we have more than 10, then we can proceed with that.
+			if err := f.fuzzStruct(uu.Index(i), customFunctions); err != nil {
+				if i >= 10 {
+					if e.CanSet() {
+						e.Set(uu)
+					}
+					return nil
+				} else {
+					return err
+				}
+			}
+		}
+		if e.CanSet() {
+			e.Set(uu)
+		}
+	case reflect.Uint:
+		newInt, err := f.GetUint()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetUint(uint64(newInt))
+		}
+	case reflect.Uint16:
+		newInt, err := f.GetUint16()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetUint(uint64(newInt))
+		}
+	case reflect.Uint32:
+		newInt, err := f.GetUint32()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetUint(uint64(newInt))
+		}
+	case reflect.Uint64:
+		newInt, err := f.GetInt()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetUint(uint64(newInt))
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		newInt, err := f.GetInt()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetInt(int64(newInt))
+		}
+	case reflect.Float32:
+		newFloat, err := f.GetFloat32()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetFloat(float64(newFloat))
+		}
+	case reflect.Float64:
+		newFloat, err := f.GetFloat64()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetFloat(float64(newFloat))
+		}
+	case reflect.Map:
+		if e.CanSet() {
+			e.Set(reflect.MakeMap(e.Type()))
+			const maxElements = 50
+			randQty, err := f.GetInt()
+			if err != nil {
+				return err
+			}
+			numOfElements := randQty % maxElements
+			for i := 0; i < numOfElements; i++ {
+				key := reflect.New(e.Type().Key()).Elem()
+				if err := f.fuzzStruct(key, customFunctions); err != nil {
+					return err
+				}
+				val := reflect.New(e.Type().Elem()).Elem()
+				if err = f.fuzzStruct(val, customFunctions); err != nil {
+					return err
+				}
+				e.SetMapIndex(key, val)
+			}
+		}
+	case reflect.Ptr:
+		if e.CanSet() {
+			e.Set(reflect.New(e.Type().Elem()))
+			if err := f.fuzzStruct(e.Elem(), customFunctions); err != nil {
+				return err
+			}
+			return nil
+		}
+	case reflect.Uint8:
+		b, err := f.GetByte()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetUint(uint64(b))
+		}
+	case reflect.Bool:
+		b, err := f.GetBool()
+		if err != nil {
+			return err
+		}
+		if e.CanSet() {
+			e.SetBool(b)
+		}
+	}
+	return nil
+}
+
+func (f *ConsumeFuzzer) GetStringArray() (reflect.Value, error) {
+	// The max size of the array:
+	const max uint32 = 20
+
+	arraySize := f.position
+	if arraySize > max {
+		arraySize = max
+	}
+	stringArray := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("string")), int(arraySize), int(arraySize))
+	if f.position+arraySize >= f.dataTotal {
+		return stringArray, errors.New("could not make string array")
+	}
+
+	for i := 0; i < int(arraySize); i++ {
+		stringSize := uint32(f.data[f.position])
+		if f.position+stringSize >= f.dataTotal {
+			return stringArray, nil
+		}
+		stringToAppend := string(f.data[f.position : f.position+stringSize])
+		strVal := reflect.ValueOf(stringToAppend)
+		stringArray = reflect.Append(stringArray, strVal)
+		f.position += stringSize
+	}
+	return stringArray, nil
+}
+
+func (f *ConsumeFuzzer) GetInt() (int, error) {
+	if f.position >= f.dataTotal {
+		return 0, errors.New("not enough bytes to create int")
+	}
+	returnInt := int(f.data[f.position])
+	f.position++
+	return returnInt, nil
+}
+
+func (f *ConsumeFuzzer) GetByte() (byte, error) {
+	if f.position >= f.dataTotal {
+		return 0x00, errors.New("not enough bytes to get byte")
+	}
+	returnByte := f.data[f.position]
+	f.position++
+	return returnByte, nil
+}
+
+func (f *ConsumeFuzzer) GetNBytes(numberOfBytes int) ([]byte, error) {
+	if f.position >= f.dataTotal {
+		return nil, errors.New("not enough bytes to get byte")
+	}
+	returnBytes := make([]byte, 0, numberOfBytes)
+	for i := 0; i < numberOfBytes; i++ {
+		newByte, err := f.GetByte()
+		if err != nil {
+			return nil, err
+		}
+		returnBytes = append(returnBytes, newByte)
+	}
+	return returnBytes, nil
+}
+
+func (f *ConsumeFuzzer) GetUint16() (uint16, error) {
+	u16, err := f.GetNBytes(2)
+	if err != nil {
+		return 0, err
+	}
+	littleEndian, err := f.GetBool()
+	if err != nil {
+		return 0, err
+	}
+	if littleEndian {
+		return binary.LittleEndian.Uint16(u16), nil
+	}
+	return binary.BigEndian.Uint16(u16), nil
+}
+
+func (f *ConsumeFuzzer) GetUint32() (uint32, error) {
+	u32, err := f.GetNBytes(4)
+	if err != nil {
+		return 0, err
+	}
+	return binary.BigEndian.Uint32(u32), nil
+}
+
+func (f *ConsumeFuzzer) GetUint64() (uint64, error) {
+	u64, err := f.GetNBytes(8)
+	if err != nil {
+		return 0, err
+	}
+	littleEndian, err := f.GetBool()
+	if err != nil {
+		return 0, err
+	}
+	if littleEndian {
+		return binary.LittleEndian.Uint64(u64), nil
+	}
+	return binary.BigEndian.Uint64(u64), nil
+}
+
+func (f *ConsumeFuzzer) GetUint() (uint, error) {
+	var zero uint
+	size := int(unsafe.Sizeof(zero))
+	if size == 8 {
+		u64, err := f.GetUint64()
+		if err != nil {
+			return 0, err
+		}
+		return uint(u64), nil
+	}
+	u32, err := f.GetUint32()
+	if err != nil {
+		return 0, err
+	}
+	return uint(u32), nil
+}
+
+func (f *ConsumeFuzzer) GetBytes() ([]byte, error) {
+	var length uint32
+	var err error
+	length, err = f.GetUint32()
+	if err != nil {
+		return nil, errors.New("not enough bytes to create byte array")
+	}
+
+	if length == 0 {
+		length = 30
+	}
+	bytesLeft := f.dataTotal - f.position
+	if bytesLeft <= 0 {
+		return nil, errors.New("not enough bytes to create byte array")
+	}
+
+	// If the length is the same as bytes left, we will not overflow
+	// the remaining bytes.
+	if length != bytesLeft {
+		length = length % bytesLeft
+	}
+	byteBegin := f.position
+	if byteBegin+length < byteBegin {
+		return nil, errors.New("numbers overflow")
+	}
+	f.position = byteBegin + length
+	return f.data[byteBegin:f.position], nil
+}
+
+func (f *ConsumeFuzzer) GetString() (string, error) {
+	if f.position >= f.dataTotal {
+		return "nil", errors.New("not enough bytes to create string")
+	}
+	length, err := f.GetUint32()
+	if err != nil {
+		return "nil", errors.New("not enough bytes to create string")
+	}
+	if f.position > MaxTotalLen {
+		return "nil", errors.New("created too large a string")
+	}
+	byteBegin := f.position
+	if byteBegin >= f.dataTotal {
+		return "nil", errors.New("not enough bytes to create string")
+	}
+	if byteBegin+length > f.dataTotal {
+		return "nil", errors.New("not enough bytes to create string")
+	}
+	if byteBegin > byteBegin+length {
+		return "nil", errors.New("numbers overflow")
+	}
+	f.position = byteBegin + length
+	s := string(f.data[byteBegin:f.position])
+	if f.forceUTF8Strings {
+		s = strings.ToValidUTF8(s, "")
+	}
+	return s, nil
+}
+
+func (f *ConsumeFuzzer) GetBool() (bool, error) {
+	if f.position >= f.dataTotal {
+		return false, errors.New("not enough bytes to create bool")
+	}
+	if IsDivisibleBy(int(f.data[f.position]), 2) {
+		f.position++
+		return true, nil
+	} else {
+		f.position++
+		return false, nil
+	}
+}
+
+func (f *ConsumeFuzzer) FuzzMap(m interface{}) error {
+	return f.GenerateStruct(m)
+}
+
+func returnTarBytes(buf []byte) ([]byte, error) {
+	return buf, nil
+	// Count files
+	var fileCounter int
+	tr := tar.NewReader(bytes.NewReader(buf))
+	for {
+		_, err := tr.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+		fileCounter++
+	}
+	if fileCounter >= 1 {
+		return buf, nil
+	}
+	return nil, fmt.Errorf("not enough files were created\n")
+}
+
+func setTarHeaderFormat(hdr *tar.Header, f *ConsumeFuzzer) error {
+	ind, err := f.GetInt()
+	if err != nil {
+		hdr.Format = tar.FormatGNU
+		//return nil
+	}
+	switch ind % 4 {
+	case 0:
+		hdr.Format = tar.FormatUnknown
+	case 1:
+		hdr.Format = tar.FormatUSTAR
+	case 2:
+		hdr.Format = tar.FormatPAX
+	case 3:
+		hdr.Format = tar.FormatGNU
+	}
+	return nil
+}
+
+func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error {
+	ind, err := f.GetInt()
+	if err != nil {
+		return err
+	}
+	switch ind % 13 {
+	case 0:
+		hdr.Typeflag = tar.TypeReg
+	case 1:
+		hdr.Typeflag = tar.TypeLink
+		linkname, err := f.GetString()
+		if err != nil {
+			return err
+		}
+		hdr.Linkname = linkname
+	case 2:
+		hdr.Typeflag = tar.TypeSymlink
+		linkname, err := f.GetString()
+		if err != nil {
+			return err
+		}
+		hdr.Linkname = linkname
+	case 3:
+		hdr.Typeflag = tar.TypeChar
+	case 4:
+		hdr.Typeflag = tar.TypeBlock
+	case 5:
+		hdr.Typeflag = tar.TypeDir
+	case 6:
+		hdr.Typeflag = tar.TypeFifo
+	case 7:
+		hdr.Typeflag = tar.TypeCont
+	case 8:
+		hdr.Typeflag = tar.TypeXHeader
+	case 9:
+		hdr.Typeflag = tar.TypeXGlobalHeader
+	case 10:
+		hdr.Typeflag = tar.TypeGNUSparse
+	case 11:
+		hdr.Typeflag = tar.TypeGNULongName
+	case 12:
+		hdr.Typeflag = tar.TypeGNULongLink
+	}
+	return nil
+}
+
+func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) {
+	return f.GetBytes()
+	/*length, err := f.GetUint32()
+	if err != nil {
+		return nil, errors.New("not enough bytes to create byte array")
+	}
+
+	// A bit of optimization to attempt to create a file body
+	// when we don't have as many bytes left as "length"
+	remainingBytes := f.dataTotal - f.position
+	if remainingBytes <= 0 {
+		return nil, errors.New("created too large a string")
+	}
+	if f.position+length > MaxTotalLen {
+		return nil, errors.New("created too large a string")
+	}
+	byteBegin := f.position
+	if byteBegin >= f.dataTotal {
+		return nil, errors.New("not enough bytes to create byte array")
+	}
+	if length == 0 {
+		return nil, errors.New("zero-length is not supported")
+	}
+	if byteBegin+length >= f.dataTotal {
+		return nil, errors.New("not enough bytes to create byte array")
+	}
+	if byteBegin+length < byteBegin {
+		return nil, errors.New("numbers overflow")
+	}
+	f.position = byteBegin + length
+	return f.data[byteBegin:f.position], nil*/
+}
+
+// getTarFileName is similar to GetString(), but creates string based
+// on the length of f.data to reduce the likelihood of overflowing
+// f.data.
+func (f *ConsumeFuzzer) getTarFilename() (string, error) {
+	return f.GetString()
+	/*length, err := f.GetUint32()
+	if err != nil {
+		return "nil", errors.New("not enough bytes to create string")
+	}
+
+	// A bit of optimization to attempt to create a file name
+	// when we don't have as many bytes left as "length"
+	remainingBytes := f.dataTotal - f.position
+	if remainingBytes <= 0 {
+		return "nil", errors.New("created too large a string")
+	}
+	if f.position > MaxTotalLen {
+		return "nil", errors.New("created too large a string")
+	}
+	byteBegin := f.position
+	if byteBegin >= f.dataTotal {
+		return "nil", errors.New("not enough bytes to create string")
+	}
+	if byteBegin+length > f.dataTotal {
+		return "nil", errors.New("not enough bytes to create string")
+	}
+	if byteBegin > byteBegin+length {
+		return "nil", errors.New("numbers overflow")
+	}
+	f.position = byteBegin + length
+	return string(f.data[byteBegin:f.position]), nil*/
+}
+
+type TarFile struct {
+	Hdr  *tar.Header
+	Body []byte
+}
+
+// TarBytes returns valid bytes for a tar archive
+func (f *ConsumeFuzzer) TarBytes() ([]byte, error) {
+	numberOfFiles, err := f.GetInt()
+	if err != nil {
+		return nil, err
+	}
+	var tarFiles []*TarFile
+	tarFiles = make([]*TarFile, 0)
+
+	const maxNoOfFiles = 100
+	for i := 0; i < numberOfFiles%maxNoOfFiles; i++ {
+		var filename string
+		var filebody []byte
+		var sec, nsec int
+		var err error
+
+		filename, err = f.getTarFilename()
+		if err != nil {
+			var sb strings.Builder
+			sb.WriteString("file-")
+			sb.WriteString(strconv.Itoa(i))
+			filename = sb.String()
+		}
+		filebody, err = f.createTarFileBody()
+		if err != nil {
+			var sb strings.Builder
+			sb.WriteString("filebody-")
+			sb.WriteString(strconv.Itoa(i))
+			filebody = []byte(sb.String())
+		}
+
+		sec, err = f.GetInt()
+		if err != nil {
+			sec = 1672531200 // beginning of 2023
+		}
+		nsec, err = f.GetInt()
+		if err != nil {
+			nsec = 1703980800 // end of 2023
+		}
+
+		hdr := &tar.Header{
+			Name:    filename,
+			Size:    int64(len(filebody)),
+			Mode:    0o600,
+			ModTime: time.Unix(int64(sec), int64(nsec)),
+		}
+		if err := setTarHeaderTypeflag(hdr, f); err != nil {
+			return []byte(""), err
+		}
+		if err := setTarHeaderFormat(hdr, f); err != nil {
+			return []byte(""), err
+		}
+		tf := &TarFile{
+			Hdr:  hdr,
+			Body: filebody,
+		}
+		tarFiles = append(tarFiles, tf)
+	}
+
+	var buf bytes.Buffer
+	tw := tar.NewWriter(&buf)
+	defer tw.Close()
+
+	for _, tf := range tarFiles {
+		tw.WriteHeader(tf.Hdr)
+		tw.Write(tf.Body)
+	}
+	return buf.Bytes(), nil
+}
+
+// This is similar to TarBytes, but it returns a series of
+// files instead of raw tar bytes. The advantage of this
+// api is that it is cheaper in terms of cpu power to
+// modify or check the files in the fuzzer with TarFiles()
+// because it avoids creating a tar reader.
+func (f *ConsumeFuzzer) TarFiles() ([]*TarFile, error) {
+	numberOfFiles, err := f.GetInt()
+	if err != nil {
+		return nil, err
+	}
+	var tarFiles []*TarFile
+	tarFiles = make([]*TarFile, 0)
+
+	const maxNoOfFiles = 100
+	for i := 0; i < numberOfFiles%maxNoOfFiles; i++ {
+		filename, err := f.getTarFilename()
+		if err != nil {
+			return tarFiles, err
+		}
+		filebody, err := f.createTarFileBody()
+		if err != nil {
+			return tarFiles, err
+		}
+
+		sec, err := f.GetInt()
+		if err != nil {
+			return tarFiles, err
+		}
+		nsec, err := f.GetInt()
+		if err != nil {
+			return tarFiles, err
+		}
+
+		hdr := &tar.Header{
+			Name:    filename,
+			Size:    int64(len(filebody)),
+			Mode:    0o600,
+			ModTime: time.Unix(int64(sec), int64(nsec)),
+		}
+		if err := setTarHeaderTypeflag(hdr, f); err != nil {
+			hdr.Typeflag = tar.TypeReg
+		}
+		if err := setTarHeaderFormat(hdr, f); err != nil {
+			return tarFiles, err // should not happend
+		}
+		tf := &TarFile{
+			Hdr:  hdr,
+			Body: filebody,
+		}
+		tarFiles = append(tarFiles, tf)
+	}
+	return tarFiles, nil
+}
+
+// CreateFiles creates pseudo-random files in rootDir.
+// It creates subdirs and places the files there.
+// It is the callers responsibility to ensure that
+// rootDir exists.
+func (f *ConsumeFuzzer) CreateFiles(rootDir string) error {
+	numberOfFiles, err := f.GetInt()
+	if err != nil {
+		return err
+	}
+	maxNumberOfFiles := numberOfFiles % 4000 // This is completely arbitrary
+	if maxNumberOfFiles == 0 {
+		return errors.New("maxNumberOfFiles is nil")
+	}
+
+	var noOfCreatedFiles int
+	for i := 0; i < maxNumberOfFiles; i++ {
+		// The file to create:
+		fileName, err := f.GetString()
+		if err != nil {
+			if noOfCreatedFiles > 0 {
+				// If files have been created, we don't return an error.
+				break
+			} else {
+				return errors.New("could not get fileName")
+			}
+		}
+		if strings.Contains(fileName, "..") || (len(fileName) > 0 && fileName[0] == 47) || strings.Contains(fileName, "\\") {
+			continue
+		}
+		fullFilePath := filepath.Join(rootDir, fileName)
+
+		// Find the subdirectory of the file
+		if subDir := filepath.Dir(fileName); subDir != "" && subDir != "." {
+			// create the dir first; avoid going outside the root dir
+			if strings.Contains(subDir, "../") || (len(subDir) > 0 && subDir[0] == 47) || strings.Contains(subDir, "\\") {
+				continue
+			}
+			dirPath := filepath.Join(rootDir, subDir)
+			if _, err := os.Stat(dirPath); os.IsNotExist(err) {
+				err2 := os.MkdirAll(dirPath, 0o777)
+				if err2 != nil {
+					continue
+				}
+			}
+			fullFilePath = filepath.Join(dirPath, fileName)
+		} else {
+			// Create symlink
+			createSymlink, err := f.GetBool()
+			if err != nil {
+				if noOfCreatedFiles > 0 {
+					break
+				} else {
+					return errors.New("could not create the symlink")
+				}
+			}
+			if createSymlink {
+				symlinkTarget, err := f.GetString()
+				if err != nil {
+					return err
+				}
+				err = os.Symlink(symlinkTarget, fullFilePath)
+				if err != nil {
+					return err
+				}
+				// stop loop here, since a symlink needs no further action
+				noOfCreatedFiles++
+				continue
+			}
+			// We create a normal file
+			fileContents, err := f.GetBytes()
+			if err != nil {
+				if noOfCreatedFiles > 0 {
+					break
+				} else {
+					return errors.New("could not create the file")
+				}
+			}
+			err = os.WriteFile(fullFilePath, fileContents, 0o666)
+			if err != nil {
+				continue
+			}
+			noOfCreatedFiles++
+		}
+	}
+	return nil
+}
+
+// GetStringFrom returns a string that can only consist of characters
+// included in possibleChars. It returns an error if the created string
+// does not have the specified length.
+func (f *ConsumeFuzzer) GetStringFrom(possibleChars string, length int) (string, error) {
+	if (f.dataTotal - f.position) < uint32(length) {
+		return "", errors.New("not enough bytes to create a string")
+	}
+	output := make([]byte, 0, length)
+	for i := 0; i < length; i++ {
+		charIndex, err := f.GetInt()
+		if err != nil {
+			return string(output), err
+		}
+		output = append(output, possibleChars[charIndex%len(possibleChars)])
+	}
+	return string(output), nil
+}
+
+func (f *ConsumeFuzzer) GetRune() ([]rune, error) {
+	stringToConvert, err := f.GetString()
+	if err != nil {
+		return []rune("nil"), err
+	}
+	return []rune(stringToConvert), nil
+}
+
+func (f *ConsumeFuzzer) GetFloat32() (float32, error) {
+	u32, err := f.GetNBytes(4)
+	if err != nil {
+		return 0, err
+	}
+	littleEndian, err := f.GetBool()
+	if err != nil {
+		return 0, err
+	}
+	if littleEndian {
+		u32LE := binary.LittleEndian.Uint32(u32)
+		return math.Float32frombits(u32LE), nil
+	}
+	u32BE := binary.BigEndian.Uint32(u32)
+	return math.Float32frombits(u32BE), nil
+}
+
+func (f *ConsumeFuzzer) GetFloat64() (float64, error) {
+	u64, err := f.GetNBytes(8)
+	if err != nil {
+		return 0, err
+	}
+	littleEndian, err := f.GetBool()
+	if err != nil {
+		return 0, err
+	}
+	if littleEndian {
+		u64LE := binary.LittleEndian.Uint64(u64)
+		return math.Float64frombits(u64LE), nil
+	}
+	u64BE := binary.BigEndian.Uint64(u64)
+	return math.Float64frombits(u64BE), nil
+}
+
+func (f *ConsumeFuzzer) CreateSlice(targetSlice interface{}) error {
+	return f.GenerateStruct(targetSlice)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go
--- 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,62 @@
+// Copyright 2023 The go-fuzz-headers Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gofuzzheaders
+
+import (
+	"fmt"
+	"reflect"
+)
+
+type Continue struct {
+	F *ConsumeFuzzer
+}
+
+func (f *ConsumeFuzzer) AddFuncs(fuzzFuncs []interface{}) {
+	for i := range fuzzFuncs {
+		v := reflect.ValueOf(fuzzFuncs[i])
+		if v.Kind() != reflect.Func {
+			panic("Need only funcs!")
+		}
+		t := v.Type()
+		if t.NumIn() != 2 || t.NumOut() != 1 {
+			fmt.Println(t.NumIn(), t.NumOut())
+
+			panic("Need 2 in and 1 out params. In must be the type. Out must be an error")
+		}
+		argT := t.In(0)
+		switch argT.Kind() {
+		case reflect.Ptr, reflect.Map:
+		default:
+			panic("fuzzFunc must take pointer or map type")
+		}
+		if t.In(1) != reflect.TypeOf(Continue{}) {
+			panic("fuzzFunc's second parameter must be type Continue")
+		}
+		f.Funcs[argT] = v
+	}
+}
+
+func (f *ConsumeFuzzer) GenerateWithCustom(targetStruct interface{}) error {
+	e := reflect.ValueOf(targetStruct).Elem()
+	return f.fuzzStruct(e, true)
+}
+
+func (c Continue) GenerateStruct(targetStruct interface{}) error {
+	return c.F.GenerateStruct(targetStruct)
+}
+
+func (c Continue) GenerateStructWithCustom(targetStruct interface{}) error {
+	return c.F.GenerateWithCustom(targetStruct)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go
--- 0.19.3+ds1-4/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,556 @@
+// Copyright 2023 The go-fuzz-headers Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gofuzzheaders
+
+import (
+	"fmt"
+	"strings"
+)
+
+// returns a keyword by index
+func getKeyword(f *ConsumeFuzzer) (string, error) {
+	index, err := f.GetInt()
+	if err != nil {
+		return keywords[0], err
+	}
+	for i, k := range keywords {
+		if i == index {
+			return k, nil
+		}
+	}
+	return keywords[0], fmt.Errorf("could not get a kw")
+}
+
+// Simple utility function to check if a string
+// slice contains a string.
+func containsString(s []string, e string) bool {
+	for _, a := range s {
+		if a == e {
+			return true
+		}
+	}
+	return false
+}
+
+// These keywords are used specifically for fuzzing Vitess
+var keywords = []string{
+	"accessible", "action", "add", "after", "against", "algorithm",
+	"all", "alter", "always", "analyze", "and", "as", "asc", "asensitive",
+	"auto_increment", "avg_row_length", "before", "begin", "between",
+	"bigint", "binary", "_binary", "_utf8mb4", "_utf8", "_latin1", "bit",
+	"blob", "bool", "boolean", "both", "by", "call", "cancel", "cascade",
+	"cascaded", "case", "cast", "channel", "change", "char", "character",
+	"charset", "check", "checksum", "coalesce", "code", "collate", "collation",
+	"column", "columns", "comment", "committed", "commit", "compact", "complete",
+	"compressed", "compression", "condition", "connection", "constraint", "continue",
+	"convert", "copy", "cume_dist", "substr", "substring", "create", "cross",
+	"csv", "current_date", "current_time", "current_timestamp", "current_user",
+	"cursor", "data", "database", "databases", "day", "day_hour", "day_microsecond",
+	"day_minute", "day_second", "date", "datetime", "dec", "decimal", "declare",
+	"default", "definer", "delay_key_write", "delayed", "delete", "dense_rank",
+	"desc", "describe", "deterministic", "directory", "disable", "discard",
+	"disk", "distinct", "distinctrow", "div", "double", "do", "drop", "dumpfile",
+	"duplicate", "dynamic", "each", "else", "elseif", "empty", "enable",
+	"enclosed", "encryption", "end", "enforced", "engine", "engines", "enum",
+	"error", "escape", "escaped", "event", "exchange", "exclusive", "exists",
+	"exit", "explain", "expansion", "export", "extended", "extract", "false",
+	"fetch", "fields", "first", "first_value", "fixed", "float", "float4",
+	"float8", "flush", "for", "force", "foreign", "format", "from", "full",
+	"fulltext", "function", "general", "generated", "geometry", "geometrycollection",
+	"get", "global", "gtid_executed", "grant", "group", "grouping", "groups",
+	"group_concat", "having", "header", "high_priority", "hosts", "hour", "hour_microsecond",
+	"hour_minute", "hour_second", "if", "ignore", "import", "in", "index", "indexes",
+	"infile", "inout", "inner", "inplace", "insensitive", "insert", "insert_method",
+	"int", "int1", "int2", "int3", "int4", "int8", "integer", "interval",
+	"into", "io_after_gtids", "is", "isolation", "iterate", "invoker", "join",
+	"json", "json_table", "key", "keys", "keyspaces", "key_block_size", "kill", "lag",
+	"language", "last", "last_value", "last_insert_id", "lateral", "lead", "leading",
+	"leave", "left", "less", "level", "like", "limit", "linear", "lines",
+	"linestring", "load", "local", "localtime", "localtimestamp", "lock", "logs",
+	"long", "longblob", "longtext", "loop", "low_priority", "manifest",
+	"master_bind", "match", "max_rows", "maxvalue", "mediumblob", "mediumint",
+	"mediumtext", "memory", "merge", "microsecond", "middleint", "min_rows", "minute",
+	"minute_microsecond", "minute_second", "mod", "mode", "modify", "modifies",
+	"multilinestring", "multipoint", "multipolygon", "month", "name",
+	"names", "natural", "nchar", "next", "no", "none", "not", "no_write_to_binlog",
+	"nth_value", "ntile", "null", "numeric", "of", "off", "offset", "on",
+	"only", "open", "optimize", "optimizer_costs", "option", "optionally",
+	"or", "order", "out", "outer", "outfile", "over", "overwrite", "pack_keys",
+	"parser", "partition", "partitioning", "password", "percent_rank", "plugins",
+	"point", "polygon", "precision", "primary", "privileges", "processlist",
+	"procedure", "query", "quarter", "range", "rank", "read", "reads", "read_write",
+	"real", "rebuild", "recursive", "redundant", "references", "regexp", "relay",
+	"release", "remove", "rename", "reorganize", "repair", "repeat", "repeatable",
+	"replace", "require", "resignal", "restrict", "return", "retry", "revert",
+	"revoke", "right", "rlike", "rollback", "row", "row_format", "row_number",
+	"rows", "s3", "savepoint", "schema", "schemas", "second", "second_microsecond",
+	"security", "select", "sensitive", "separator", "sequence", "serializable",
+	"session", "set", "share", "shared", "show", "signal", "signed", "slow",
+	"smallint", "spatial", "specific", "sql", "sqlexception", "sqlstate",
+	"sqlwarning", "sql_big_result", "sql_cache", "sql_calc_found_rows",
+	"sql_no_cache", "sql_small_result", "ssl", "start", "starting",
+	"stats_auto_recalc", "stats_persistent", "stats_sample_pages", "status",
+	"storage", "stored", "straight_join", "stream", "system", "vstream",
+	"table", "tables", "tablespace", "temporary", "temptable", "terminated",
+	"text", "than", "then", "time", "timestamp", "timestampadd", "timestampdiff",
+	"tinyblob", "tinyint", "tinytext", "to", "trailing", "transaction", "tree",
+	"traditional", "trigger", "triggers", "true", "truncate", "uncommitted",
+	"undefined", "undo", "union", "unique", "unlock", "unsigned", "update",
+	"upgrade", "usage", "use", "user", "user_resources", "using", "utc_date",
+	"utc_time", "utc_timestamp", "validation", "values", "variables", "varbinary",
+	"varchar", "varcharacter", "varying", "vgtid_executed", "virtual", "vindex",
+	"vindexes", "view", "vitess", "vitess_keyspaces", "vitess_metadata",
+	"vitess_migration", "vitess_migrations", "vitess_replication_status",
+	"vitess_shards", "vitess_tablets", "vschema", "warnings", "when",
+	"where", "while", "window", "with", "without", "work", "write", "xor",
+	"year", "year_month", "zerofill",
+}
+
+// Keywords that could get an additional keyword
+var needCustomString = []string{
+	"DISTINCTROW", "FROM", // Select keywords:
+	"GROUP BY", "HAVING", "WINDOW",
+	"FOR",
+	"ORDER BY", "LIMIT",
+	"INTO", "PARTITION", "AS", // Insert Keywords:
+	"ON DUPLICATE KEY UPDATE",
+	"WHERE", "LIMIT", // Delete keywords
+	"INFILE", "INTO TABLE", "CHARACTER SET", // Load keywords
+	"TERMINATED BY", "ENCLOSED BY",
+	"ESCAPED BY", "STARTING BY",
+	"TERMINATED BY", "STARTING BY",
+	"IGNORE",
+	"VALUE", "VALUES", // Replace tokens
+	"SET",                                   // Update tokens
+	"ENGINE =",                              // Drop tokens
+	"DEFINER =", "ON SCHEDULE", "RENAME TO", // Alter tokens
+	"COMMENT", "DO", "INITIAL_SIZE = ", "OPTIONS",
+}
+
+var alterTableTokens = [][]string{
+	{"CUSTOM_FUZZ_STRING"},
+	{"CUSTOM_ALTTER_TABLE_OPTIONS"},
+	{"PARTITION_OPTIONS_FOR_ALTER_TABLE"},
+}
+
+var alterTokens = [][]string{
+	{
+		"DATABASE", "SCHEMA", "DEFINER = ", "EVENT", "FUNCTION", "INSTANCE",
+		"LOGFILE GROUP", "PROCEDURE", "SERVER",
+	},
+	{"CUSTOM_FUZZ_STRING"},
+	{
+		"ON SCHEDULE", "ON COMPLETION PRESERVE", "ON COMPLETION NOT PRESERVE",
+		"ADD UNDOFILE", "OPTIONS",
+	},
+	{"RENAME TO", "INITIAL_SIZE = "},
+	{"ENABLE", "DISABLE", "DISABLE ON SLAVE", "ENGINE"},
+	{"COMMENT"},
+	{"DO"},
+}
+
+var setTokens = [][]string{
+	{"CHARACTER SET", "CHARSET", "CUSTOM_FUZZ_STRING", "NAMES"},
+	{"CUSTOM_FUZZ_STRING", "DEFAULT", "="},
+	{"CUSTOM_FUZZ_STRING"},
+}
+
+var dropTokens = [][]string{
+	{"TEMPORARY", "UNDO"},
+	{
+		"DATABASE", "SCHEMA", "EVENT", "INDEX", "LOGFILE GROUP",
+		"PROCEDURE", "FUNCTION", "SERVER", "SPATIAL REFERENCE SYSTEM",
+		"TABLE", "TABLESPACE", "TRIGGER", "VIEW",
+	},
+	{"IF EXISTS"},
+	{"CUSTOM_FUZZ_STRING"},
+	{"ON", "ENGINE = ", "RESTRICT", "CASCADE"},
+}
+
+var renameTokens = [][]string{
+	{"TABLE"},
+	{"CUSTOM_FUZZ_STRING"},
+	{"TO"},
+	{"CUSTOM_FUZZ_STRING"},
+}
+
+var truncateTokens = [][]string{
+	{"TABLE"},
+	{"CUSTOM_FUZZ_STRING"},
+}
+
+var createTokens = [][]string{
+	{"OR REPLACE", "TEMPORARY", "UNDO"}, // For create spatial reference system
+	{
+		"UNIQUE", "FULLTEXT", "SPATIAL", "ALGORITHM = UNDEFINED", "ALGORITHM = MERGE",
+		"ALGORITHM = TEMPTABLE",
+	},
+	{
+		"DATABASE", "SCHEMA", "EVENT", "FUNCTION", "INDEX", "LOGFILE GROUP",
+		"PROCEDURE", "SERVER", "SPATIAL REFERENCE SYSTEM", "TABLE", "TABLESPACE",
+		"TRIGGER", "VIEW",
+	},
+	{"IF NOT EXISTS"},
+	{"CUSTOM_FUZZ_STRING"},
+}
+
+/*
+// For future use.
+var updateTokens = [][]string{
+	{"LOW_PRIORITY"},
+	{"IGNORE"},
+	{"SET"},
+	{"WHERE"},
+	{"ORDER BY"},
+	{"LIMIT"},
+}
+*/
+
+var replaceTokens = [][]string{
+	{"LOW_PRIORITY", "DELAYED"},
+	{"INTO"},
+	{"PARTITION"},
+	{"CUSTOM_FUZZ_STRING"},
+	{"VALUES", "VALUE"},
+}
+
+var loadTokens = [][]string{
+	{"DATA"},
+	{"LOW_PRIORITY", "CONCURRENT", "LOCAL"},
+	{"INFILE"},
+	{"REPLACE", "IGNORE"},
+	{"INTO TABLE"},
+	{"PARTITION"},
+	{"CHARACTER SET"},
+	{"FIELDS", "COLUMNS"},
+	{"TERMINATED BY"},
+	{"OPTIONALLY"},
+	{"ENCLOSED BY"},
+	{"ESCAPED BY"},
+	{"LINES"},
+	{"STARTING BY"},
+	{"TERMINATED BY"},
+	{"IGNORE"},
+	{"LINES", "ROWS"},
+	{"CUSTOM_FUZZ_STRING"},
+}
+
+// These Are everything that comes after "INSERT"
+var insertTokens = [][]string{
+	{"LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", "IGNORE"},
+	{"INTO"},
+	{"PARTITION"},
+	{"CUSTOM_FUZZ_STRING"},
+	{"AS"},
+	{"ON DUPLICATE KEY UPDATE"},
+}
+
+// These are everything that comes after "SELECT"
+var selectTokens = [][]string{
+	{"*", "CUSTOM_FUZZ_STRING", "DISTINCTROW"},
+	{"HIGH_PRIORITY"},
+	{"STRAIGHT_JOIN"},
+	{"SQL_SMALL_RESULT", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT"},
+	{"SQL_NO_CACHE", "SQL_CALC_FOUND_ROWS"},
+	{"CUSTOM_FUZZ_STRING"},
+	{"FROM"},
+	{"WHERE"},
+	{"GROUP BY"},
+	{"HAVING"},
+	{"WINDOW"},
+	{"ORDER BY"},
+	{"LIMIT"},
+	{"CUSTOM_FUZZ_STRING"},
+	{"FOR"},
+}
+
+// These are everything that comes after "DELETE"
+var deleteTokens = [][]string{
+	{"LOW_PRIORITY", "QUICK", "IGNORE", "FROM", "AS"},
+	{"PARTITION"},
+	{"WHERE"},
+	{"ORDER BY"},
+	{"LIMIT"},
+}
+
+var alter_table_options = []string{
+	"ADD", "COLUMN", "FIRST", "AFTER", "INDEX", "KEY", "FULLTEXT", "SPATIAL",
+	"CONSTRAINT", "UNIQUE", "FOREIGN KEY", "CHECK", "ENFORCED", "DROP", "ALTER",
+	"NOT", "INPLACE", "COPY", "SET", "VISIBLE", "INVISIBLE", "DEFAULT", "CHANGE",
+	"CHARACTER SET", "COLLATE", "DISABLE", "ENABLE", "KEYS", "TABLESPACE", "LOCK",
+	"FORCE", "MODIFY", "SHARED", "EXCLUSIVE", "NONE", "ORDER BY", "RENAME COLUMN",
+	"AS", "=", "ASC", "DESC", "WITH", "WITHOUT", "VALIDATION", "ADD PARTITION",
+	"DROP PARTITION", "DISCARD PARTITION", "IMPORT PARTITION", "TRUNCATE PARTITION",
+	"COALESCE PARTITION", "REORGANIZE PARTITION", "EXCHANGE PARTITION",
+	"ANALYZE PARTITION", "CHECK PARTITION", "OPTIMIZE PARTITION", "REBUILD PARTITION",
+	"REPAIR PARTITION", "REMOVE PARTITIONING", "USING", "BTREE", "HASH", "COMMENT",
+	"KEY_BLOCK_SIZE", "WITH PARSER", "AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG_ROW_LENGTH",
+	"CHECKSUM", "INSERT_METHOD", "ROW_FORMAT", "DYNAMIC", "FIXED", "COMPRESSED", "REDUNDANT",
+	"COMPACT", "SECONDARY_ENGINE_ATTRIBUTE", "STATS_AUTO_RECALC", "STATS_PERSISTENT",
+	"STATS_SAMPLE_PAGES", "ZLIB", "LZ4", "ENGINE_ATTRIBUTE", "KEY_BLOCK_SIZE", "MAX_ROWS",
+	"MIN_ROWS", "PACK_KEYS", "PASSWORD", "COMPRESSION", "CONNECTION", "DIRECTORY",
+	"DELAY_KEY_WRITE", "ENCRYPTION", "STORAGE", "DISK", "MEMORY", "UNION",
+}
+
+// Creates an 'alter table' statement. 'alter table' is an exception
+// in that it has its own function. The majority of statements
+// are created by 'createStmt()'.
+func createAlterTableStmt(f *ConsumeFuzzer) (string, error) {
+	maxArgs, err := f.GetInt()
+	if err != nil {
+		return "", err
+	}
+	maxArgs = maxArgs % 30
+	if maxArgs == 0 {
+		return "", fmt.Errorf("could not create alter table stmt")
+	}
+
+	var stmt strings.Builder
+	stmt.WriteString("ALTER TABLE ")
+	for i := 0; i < maxArgs; i++ {
+		// Calculate if we get existing token or custom string
+		tokenType, err := f.GetInt()
+		if err != nil {
+			return "", err
+		}
+		if tokenType%4 == 1 {
+			customString, err := f.GetString()
+			if err != nil {
+				return "", err
+			}
+			stmt.WriteString(" " + customString)
+		} else {
+			tokenIndex, err := f.GetInt()
+			if err != nil {
+				return "", err
+			}
+			stmt.WriteString(" " + alter_table_options[tokenIndex%len(alter_table_options)])
+		}
+	}
+	return stmt.String(), nil
+}
+
+func chooseToken(tokens []string, f *ConsumeFuzzer) (string, error) {
+	index, err := f.GetInt()
+	if err != nil {
+		return "", err
+	}
+	var token strings.Builder
+	token.WriteString(tokens[index%len(tokens)])
+	if token.String() == "CUSTOM_FUZZ_STRING" {
+		customFuzzString, err := f.GetString()
+		if err != nil {
+			return "", err
+		}
+		return customFuzzString, nil
+	}
+
+	// Check if token requires an argument
+	if containsString(needCustomString, token.String()) {
+		customFuzzString, err := f.GetString()
+		if err != nil {
+			return "", err
+		}
+		token.WriteString(" " + customFuzzString)
+	}
+	return token.String(), nil
+}
+
+var stmtTypes = map[string][][]string{
+	"DELETE":      deleteTokens,
+	"INSERT":      insertTokens,
+	"SELECT":      selectTokens,
+	"LOAD":        loadTokens,
+	"REPLACE":     replaceTokens,
+	"CREATE":      createTokens,
+	"DROP":        dropTokens,
+	"RENAME":      renameTokens,
+	"TRUNCATE":    truncateTokens,
+	"SET":         setTokens,
+	"ALTER":       alterTokens,
+	"ALTER TABLE": alterTableTokens, // ALTER TABLE has its own set of tokens
+}
+
+var stmtTypeEnum = map[int]string{
+	0:  "DELETE",
+	1:  "INSERT",
+	2:  "SELECT",
+	3:  "LOAD",
+	4:  "REPLACE",
+	5:  "CREATE",
+	6:  "DROP",
+	7:  "RENAME",
+	8:  "TRUNCATE",
+	9:  "SET",
+	10: "ALTER",
+	11: "ALTER TABLE",
+}
+
+func createStmt(f *ConsumeFuzzer) (string, error) {
+	stmtIndex, err := f.GetInt()
+	if err != nil {
+		return "", err
+	}
+	stmtIndex = stmtIndex % len(stmtTypes)
+
+	queryType := stmtTypeEnum[stmtIndex]
+	tokens := stmtTypes[queryType]
+
+	// We have custom creator for ALTER TABLE
+	if queryType == "ALTER TABLE" {
+		query, err := createAlterTableStmt(f)
+		if err != nil {
+			return "", err
+		}
+		return query, nil
+	}
+
+	// Here we are creating a query that is not
+	// an 'alter table' query. For available
+	// queries, see "stmtTypes"
+
+	// First specify the first query keyword:
+	var query strings.Builder
+	query.WriteString(queryType)
+
+	// Next create the args for the
+	queryArgs, err := createStmtArgs(tokens, f)
+	if err != nil {
+		return "", err
+	}
+	query.WriteString(" " + queryArgs)
+	return query.String(), nil
+}
+
+// Creates the arguments of a statements. In a select statement
+// that would be everything after "select".
+func createStmtArgs(tokenslice [][]string, f *ConsumeFuzzer) (string, error) {
+	var query, token strings.Builder
+
+	// We go through the tokens in the tokenslice,
+	// create the respective token and add it to
+	// "query"
+	for _, tokens := range tokenslice {
+		// For extra randomization, the fuzzer can
+		// choose to not include this token.
+		includeThisToken, err := f.GetBool()
+		if err != nil {
+			return "", err
+		}
+		if !includeThisToken {
+			continue
+		}
+
+		// There may be several tokens to choose from:
+		if len(tokens) > 1 {
+			chosenToken, err := chooseToken(tokens, f)
+			if err != nil {
+				return "", err
+			}
+			query.WriteString(" " + chosenToken)
+		} else {
+			token.WriteString(tokens[0])
+
+			// In case the token is "CUSTOM_FUZZ_STRING"
+			// we will then create a non-structured string
+			if token.String() == "CUSTOM_FUZZ_STRING" {
+				customFuzzString, err := f.GetString()
+				if err != nil {
+					return "", err
+				}
+				query.WriteString(" " + customFuzzString)
+				continue
+			}
+
+			// Check if token requires an argument.
+			// Tokens that take an argument can be found
+			// in 'needCustomString'. If so, we add a
+			// non-structured string to the token.
+			if containsString(needCustomString, token.String()) {
+				customFuzzString, err := f.GetString()
+				if err != nil {
+					return "", err
+				}
+				token.WriteString(fmt.Sprintf(" %s", customFuzzString))
+			}
+			query.WriteString(fmt.Sprintf(" %s", token.String()))
+		}
+	}
+	return query.String(), nil
+}
+
+// Creates a semi-structured query. It creates a string
+// that is a combination of the keywords and random strings.
+func createQuery(f *ConsumeFuzzer) (string, error) {
+	queryLen, err := f.GetInt()
+	if err != nil {
+		return "", err
+	}
+	maxLen := queryLen % 60
+	if maxLen == 0 {
+		return "", fmt.Errorf("could not create a query")
+	}
+	var query strings.Builder
+	for i := 0; i < maxLen; i++ {
+		// Get a new token:
+		useKeyword, err := f.GetBool()
+		if err != nil {
+			return "", err
+		}
+		if useKeyword {
+			keyword, err := getKeyword(f)
+			if err != nil {
+				return "", err
+			}
+			query.WriteString(" " + keyword)
+		} else {
+			customString, err := f.GetString()
+			if err != nil {
+				return "", err
+			}
+			query.WriteString(" " + customString)
+		}
+	}
+	if query.String() == "" {
+		return "", fmt.Errorf("could not create a query")
+	}
+	return query.String(), nil
+}
+
+// GetSQLString is the API that users interact with.
+//
+// Usage:
+//
+//	f := NewConsumer(data)
+//	sqlString, err := f.GetSQLString()
+func (f *ConsumeFuzzer) GetSQLString() (string, error) {
+	var query string
+	veryStructured, err := f.GetBool()
+	if err != nil {
+		return "", err
+	}
+	if veryStructured {
+		query, err = createStmt(f)
+		if err != nil {
+			return "", err
+		}
+	} else {
+		query, err = createQuery(f)
+		if err != nil {
+			return "", err
+		}
+	}
+	return query, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/LICENSE 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/README.md 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/README.md
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,12 @@
+# go-ansiterm
+
+This is a cross platform Ansi Terminal Emulation library.  It reads a stream of Ansi characters and produces the appropriate function calls.  The results of the function calls are platform dependent.
+
+For example the parser might receive "ESC, [, A" as a stream of three characters.  This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU).  The parser then calls the cursor up function (CUU()) on an event handler.  The event handler determines what platform specific work must be done to cause the cursor to move up one position.
+
+The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png).  There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
+
+See parser_test.go for examples exercising the state machine and generating appropriate function calls.
+
+-----
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/SECURITY.md 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/SECURITY.md
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/SECURITY.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/SECURITY.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,41 @@
+<!-- BEGIN MICROSOFT SECURITY.MD V0.0.8 BLOCK -->
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com).  If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+  * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+  * Full paths of source file(s) related to the manifestation of the issue
+  * The location of the affected source code (tag/branch/commit or direct URL)
+  * Any special configuration required to reproduce the issue
+  * Step-by-step instructions to reproduce the issue
+  * Proof-of-concept or exploit code (if possible)
+  * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+<!-- END MICROSOFT SECURITY.MD BLOCK -->
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/constants.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/constants.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/constants.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/constants.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,188 @@
+package ansiterm
+
+const LogEnv = "DEBUG_TERMINAL"
+
+// ANSI constants
+// References:
+// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
+// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
+// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
+// -- http://en.wikipedia.org/wiki/ANSI_escape_code
+// -- http://vt100.net/emu/dec_ansi_parser
+// -- http://vt100.net/emu/vt500_parser.svg
+// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+// -- http://www.inwap.com/pdp10/ansicode.txt
+const (
+	// ECMA-48 Set Graphics Rendition
+	// Note:
+	// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
+	// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
+	// -- Windows does not expose the per-window cursor (i.e., caret) blink times
+	ANSI_SGR_RESET              = 0
+	ANSI_SGR_BOLD               = 1
+	ANSI_SGR_DIM                = 2
+	_ANSI_SGR_ITALIC            = 3
+	ANSI_SGR_UNDERLINE          = 4
+	_ANSI_SGR_BLINKSLOW         = 5
+	_ANSI_SGR_BLINKFAST         = 6
+	ANSI_SGR_REVERSE            = 7
+	_ANSI_SGR_INVISIBLE         = 8
+	_ANSI_SGR_LINETHROUGH       = 9
+	_ANSI_SGR_FONT_00           = 10
+	_ANSI_SGR_FONT_01           = 11
+	_ANSI_SGR_FONT_02           = 12
+	_ANSI_SGR_FONT_03           = 13
+	_ANSI_SGR_FONT_04           = 14
+	_ANSI_SGR_FONT_05           = 15
+	_ANSI_SGR_FONT_06           = 16
+	_ANSI_SGR_FONT_07           = 17
+	_ANSI_SGR_FONT_08           = 18
+	_ANSI_SGR_FONT_09           = 19
+	_ANSI_SGR_FONT_10           = 20
+	_ANSI_SGR_DOUBLEUNDERLINE   = 21
+	ANSI_SGR_BOLD_DIM_OFF       = 22
+	_ANSI_SGR_ITALIC_OFF        = 23
+	ANSI_SGR_UNDERLINE_OFF      = 24
+	_ANSI_SGR_BLINK_OFF         = 25
+	_ANSI_SGR_RESERVED_00       = 26
+	ANSI_SGR_REVERSE_OFF        = 27
+	_ANSI_SGR_INVISIBLE_OFF     = 28
+	_ANSI_SGR_LINETHROUGH_OFF   = 29
+	ANSI_SGR_FOREGROUND_BLACK   = 30
+	ANSI_SGR_FOREGROUND_RED     = 31
+	ANSI_SGR_FOREGROUND_GREEN   = 32
+	ANSI_SGR_FOREGROUND_YELLOW  = 33
+	ANSI_SGR_FOREGROUND_BLUE    = 34
+	ANSI_SGR_FOREGROUND_MAGENTA = 35
+	ANSI_SGR_FOREGROUND_CYAN    = 36
+	ANSI_SGR_FOREGROUND_WHITE   = 37
+	_ANSI_SGR_RESERVED_01       = 38
+	ANSI_SGR_FOREGROUND_DEFAULT = 39
+	ANSI_SGR_BACKGROUND_BLACK   = 40
+	ANSI_SGR_BACKGROUND_RED     = 41
+	ANSI_SGR_BACKGROUND_GREEN   = 42
+	ANSI_SGR_BACKGROUND_YELLOW  = 43
+	ANSI_SGR_BACKGROUND_BLUE    = 44
+	ANSI_SGR_BACKGROUND_MAGENTA = 45
+	ANSI_SGR_BACKGROUND_CYAN    = 46
+	ANSI_SGR_BACKGROUND_WHITE   = 47
+	_ANSI_SGR_RESERVED_02       = 48
+	ANSI_SGR_BACKGROUND_DEFAULT = 49
+	// 50 - 65: Unsupported
+
+	ANSI_MAX_CMD_LENGTH = 4096
+
+	MAX_INPUT_EVENTS = 128
+	DEFAULT_WIDTH    = 80
+	DEFAULT_HEIGHT   = 24
+
+	ANSI_BEL              = 0x07
+	ANSI_BACKSPACE        = 0x08
+	ANSI_TAB              = 0x09
+	ANSI_LINE_FEED        = 0x0A
+	ANSI_VERTICAL_TAB     = 0x0B
+	ANSI_FORM_FEED        = 0x0C
+	ANSI_CARRIAGE_RETURN  = 0x0D
+	ANSI_ESCAPE_PRIMARY   = 0x1B
+	ANSI_ESCAPE_SECONDARY = 0x5B
+	ANSI_OSC_STRING_ENTRY = 0x5D
+	ANSI_COMMAND_FIRST    = 0x40
+	ANSI_COMMAND_LAST     = 0x7E
+	DCS_ENTRY             = 0x90
+	CSI_ENTRY             = 0x9B
+	OSC_STRING            = 0x9D
+	ANSI_PARAMETER_SEP    = ";"
+	ANSI_CMD_G0           = '('
+	ANSI_CMD_G1           = ')'
+	ANSI_CMD_G2           = '*'
+	ANSI_CMD_G3           = '+'
+	ANSI_CMD_DECPNM       = '>'
+	ANSI_CMD_DECPAM       = '='
+	ANSI_CMD_OSC          = ']'
+	ANSI_CMD_STR_TERM     = '\\'
+
+	KEY_CONTROL_PARAM_2 = ";2"
+	KEY_CONTROL_PARAM_3 = ";3"
+	KEY_CONTROL_PARAM_4 = ";4"
+	KEY_CONTROL_PARAM_5 = ";5"
+	KEY_CONTROL_PARAM_6 = ";6"
+	KEY_CONTROL_PARAM_7 = ";7"
+	KEY_CONTROL_PARAM_8 = ";8"
+	KEY_ESC_CSI         = "\x1B["
+	KEY_ESC_N           = "\x1BN"
+	KEY_ESC_O           = "\x1BO"
+
+	FILL_CHARACTER = ' '
+)
+
+func getByteRange(start byte, end byte) []byte {
+	bytes := make([]byte, 0, 32)
+	for i := start; i <= end; i++ {
+		bytes = append(bytes, byte(i))
+	}
+
+	return bytes
+}
+
+var toGroundBytes = getToGroundBytes()
+var executors = getExecuteBytes()
+
+// SPACE		  20+A0 hex  Always and everywhere a blank space
+// Intermediate	  20-2F hex   !"#$%&'()*+,-./
+var intermeds = getByteRange(0x20, 0x2F)
+
+// Parameters	  30-3F hex  0123456789:;<=>?
+// CSI Parameters 30-39, 3B hex 0123456789;
+var csiParams = getByteRange(0x30, 0x3F)
+
+var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
+
+// Uppercase	  40-5F hex  @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
+var upperCase = getByteRange(0x40, 0x5F)
+
+// Lowercase	  60-7E hex  `abcdefghijlkmnopqrstuvwxyz{|}~
+var lowerCase = getByteRange(0x60, 0x7E)
+
+// Alphabetics	  40-7E hex  (all of upper and lower case)
+var alphabetics = append(upperCase, lowerCase...)
+
+var printables = getByteRange(0x20, 0x7F)
+
+var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
+var escapeToGroundBytes = getEscapeToGroundBytes()
+
+// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
+// byte ranges below
+
+func getEscapeToGroundBytes() []byte {
+	escapeToGroundBytes := getByteRange(0x30, 0x4F)
+	escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
+	escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
+	return escapeToGroundBytes
+}
+
+func getExecuteBytes() []byte {
+	executeBytes := getByteRange(0x00, 0x17)
+	executeBytes = append(executeBytes, 0x19)
+	executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
+	return executeBytes
+}
+
+func getToGroundBytes() []byte {
+	groundBytes := []byte{0x18}
+	groundBytes = append(groundBytes, 0x1A)
+	groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
+	groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
+	groundBytes = append(groundBytes, 0x99)
+	groundBytes = append(groundBytes, 0x9A)
+	groundBytes = append(groundBytes, 0x9C)
+	return groundBytes
+}
+
+// Delete		     7F hex  Always and everywhere ignored
+// C1 Control	  80-9F hex  32 additional control characters
+// G1 Displayable A1-FE hex  94 additional displayable characters
+// Special		  A0+FF hex  Same as SPACE and DELETE
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/context.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/context.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/context.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/context.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,7 @@
+package ansiterm
+
+type ansiContext struct {
+	currentChar byte
+	paramBuffer []byte
+	interBuffer []byte
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,49 @@
+package ansiterm
+
+type csiEntryState struct {
+	baseState
+}
+
+func (csiState csiEntryState) Handle(b byte) (s state, e error) {
+	csiState.parser.logf("CsiEntry::Handle %#x", b)
+
+	nextState, err := csiState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(alphabetics, b):
+		return csiState.parser.ground, nil
+	case sliceContains(csiCollectables, b):
+		return csiState.parser.csiParam, nil
+	case sliceContains(executors, b):
+		return csiState, csiState.parser.execute()
+	}
+
+	return csiState, nil
+}
+
+func (csiState csiEntryState) Transition(s state) error {
+	csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
+	csiState.baseState.Transition(s)
+
+	switch s {
+	case csiState.parser.ground:
+		return csiState.parser.csiDispatch()
+	case csiState.parser.csiParam:
+		switch {
+		case sliceContains(csiParams, csiState.parser.context.currentChar):
+			csiState.parser.collectParam()
+		case sliceContains(intermeds, csiState.parser.context.currentChar):
+			csiState.parser.collectInter()
+		}
+	}
+
+	return nil
+}
+
+func (csiState csiEntryState) Enter() error {
+	csiState.parser.clear()
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/csi_param_state.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/csi_param_state.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/csi_param_state.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,38 @@
+package ansiterm
+
+type csiParamState struct {
+	baseState
+}
+
+func (csiState csiParamState) Handle(b byte) (s state, e error) {
+	csiState.parser.logf("CsiParam::Handle %#x", b)
+
+	nextState, err := csiState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(alphabetics, b):
+		return csiState.parser.ground, nil
+	case sliceContains(csiCollectables, b):
+		csiState.parser.collectParam()
+		return csiState, nil
+	case sliceContains(executors, b):
+		return csiState, csiState.parser.execute()
+	}
+
+	return csiState, nil
+}
+
+func (csiState csiParamState) Transition(s state) error {
+	csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
+	csiState.baseState.Transition(s)
+
+	switch s {
+	case csiState.parser.ground:
+		return csiState.parser.csiDispatch()
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,36 @@
+package ansiterm
+
+type escapeIntermediateState struct {
+	baseState
+}
+
+func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
+	escState.parser.logf("escapeIntermediateState::Handle %#x", b)
+	nextState, err := escState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(intermeds, b):
+		return escState, escState.parser.collectInter()
+	case sliceContains(executors, b):
+		return escState, escState.parser.execute()
+	case sliceContains(escapeIntermediateToGroundBytes, b):
+		return escState.parser.ground, nil
+	}
+
+	return escState, nil
+}
+
+func (escState escapeIntermediateState) Transition(s state) error {
+	escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
+	escState.baseState.Transition(s)
+
+	switch s {
+	case escState.parser.ground:
+		return escState.parser.escDispatch()
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/escape_state.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/escape_state.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/escape_state.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/escape_state.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,47 @@
+package ansiterm
+
+type escapeState struct {
+	baseState
+}
+
+func (escState escapeState) Handle(b byte) (s state, e error) {
+	escState.parser.logf("escapeState::Handle %#x", b)
+	nextState, err := escState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case b == ANSI_ESCAPE_SECONDARY:
+		return escState.parser.csiEntry, nil
+	case b == ANSI_OSC_STRING_ENTRY:
+		return escState.parser.oscString, nil
+	case sliceContains(executors, b):
+		return escState, escState.parser.execute()
+	case sliceContains(escapeToGroundBytes, b):
+		return escState.parser.ground, nil
+	case sliceContains(intermeds, b):
+		return escState.parser.escapeIntermediate, nil
+	}
+
+	return escState, nil
+}
+
+func (escState escapeState) Transition(s state) error {
+	escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
+	escState.baseState.Transition(s)
+
+	switch s {
+	case escState.parser.ground:
+		return escState.parser.escDispatch()
+	case escState.parser.escapeIntermediate:
+		return escState.parser.collectInter()
+	}
+
+	return nil
+}
+
+func (escState escapeState) Enter() error {
+	escState.parser.clear()
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/event_handler.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/event_handler.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/event_handler.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/event_handler.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,90 @@
+package ansiterm
+
+type AnsiEventHandler interface {
+	// Print
+	Print(b byte) error
+
+	// Execute C0 commands
+	Execute(b byte) error
+
+	// CUrsor Up
+	CUU(int) error
+
+	// CUrsor Down
+	CUD(int) error
+
+	// CUrsor Forward
+	CUF(int) error
+
+	// CUrsor Backward
+	CUB(int) error
+
+	// Cursor to Next Line
+	CNL(int) error
+
+	// Cursor to Previous Line
+	CPL(int) error
+
+	// Cursor Horizontal position Absolute
+	CHA(int) error
+
+	// Vertical line Position Absolute
+	VPA(int) error
+
+	// CUrsor Position
+	CUP(int, int) error
+
+	// Horizontal and Vertical Position (depends on PUM)
+	HVP(int, int) error
+
+	// Text Cursor Enable Mode
+	DECTCEM(bool) error
+
+	// Origin Mode
+	DECOM(bool) error
+
+	// 132 Column Mode
+	DECCOLM(bool) error
+
+	// Erase in Display
+	ED(int) error
+
+	// Erase in Line
+	EL(int) error
+
+	// Insert Line
+	IL(int) error
+
+	// Delete Line
+	DL(int) error
+
+	// Insert Character
+	ICH(int) error
+
+	// Delete Character
+	DCH(int) error
+
+	// Set Graphics Rendition
+	SGR([]int) error
+
+	// Pan Down
+	SU(int) error
+
+	// Pan Up
+	SD(int) error
+
+	// Device Attributes
+	DA([]string) error
+
+	// Set Top and Bottom Margins
+	DECSTBM(int, int) error
+
+	// Index
+	IND() error
+
+	// Reverse Index
+	RI() error
+
+	// Flush updates from previous commands
+	Flush() error
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/ground_state.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/ground_state.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/ground_state.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/ground_state.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,24 @@
+package ansiterm
+
+type groundState struct {
+	baseState
+}
+
+func (gs groundState) Handle(b byte) (s state, e error) {
+	gs.parser.context.currentChar = b
+
+	nextState, err := gs.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(printables, b):
+		return gs, gs.parser.print()
+
+	case sliceContains(executors, b):
+		return gs, gs.parser.execute()
+	}
+
+	return gs, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/osc_string_state.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/osc_string_state.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/osc_string_state.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,23 @@
+package ansiterm
+
+type oscStringState struct {
+	baseState
+}
+
+func (oscState oscStringState) Handle(b byte) (s state, e error) {
+	oscState.parser.logf("OscString::Handle %#x", b)
+	nextState, err := oscState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	// There are several control characters and sequences which can
+	// terminate an OSC string. Most of them are handled by the baseState
+	// handler. The ANSI_BEL character is a special case which behaves as a
+	// terminator only for an OSC string.
+	if b == ANSI_BEL {
+		return oscState.parser.ground, nil
+	}
+
+	return oscState, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/parser.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/parser.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/parser.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/parser.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,151 @@
+package ansiterm
+
+import (
+	"errors"
+	"log"
+	"os"
+)
+
+type AnsiParser struct {
+	currState          state
+	eventHandler       AnsiEventHandler
+	context            *ansiContext
+	csiEntry           state
+	csiParam           state
+	dcsEntry           state
+	escape             state
+	escapeIntermediate state
+	error              state
+	ground             state
+	oscString          state
+	stateMap           []state
+
+	logf func(string, ...interface{})
+}
+
+type Option func(*AnsiParser)
+
+func WithLogf(f func(string, ...interface{})) Option {
+	return func(ap *AnsiParser) {
+		ap.logf = f
+	}
+}
+
+func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
+	ap := &AnsiParser{
+		eventHandler: evtHandler,
+		context:      &ansiContext{},
+	}
+	for _, o := range opts {
+		o(ap)
+	}
+
+	if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
+		logFile, _ := os.Create("ansiParser.log")
+		logger := log.New(logFile, "", log.LstdFlags)
+		if ap.logf != nil {
+			l := ap.logf
+			ap.logf = func(s string, v ...interface{}) {
+				l(s, v...)
+				logger.Printf(s, v...)
+			}
+		} else {
+			ap.logf = logger.Printf
+		}
+	}
+
+	if ap.logf == nil {
+		ap.logf = func(string, ...interface{}) {}
+	}
+
+	ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
+	ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
+	ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
+	ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
+	ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
+	ap.error = errorState{baseState{name: "Error", parser: ap}}
+	ap.ground = groundState{baseState{name: "Ground", parser: ap}}
+	ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
+
+	ap.stateMap = []state{
+		ap.csiEntry,
+		ap.csiParam,
+		ap.dcsEntry,
+		ap.escape,
+		ap.escapeIntermediate,
+		ap.error,
+		ap.ground,
+		ap.oscString,
+	}
+
+	ap.currState = getState(initialState, ap.stateMap)
+
+	ap.logf("CreateParser: parser %p", ap)
+	return ap
+}
+
+func getState(name string, states []state) state {
+	for _, el := range states {
+		if el.Name() == name {
+			return el
+		}
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
+	for i, b := range bytes {
+		if err := ap.handle(b); err != nil {
+			return i, err
+		}
+	}
+
+	return len(bytes), ap.eventHandler.Flush()
+}
+
+func (ap *AnsiParser) handle(b byte) error {
+	ap.context.currentChar = b
+	newState, err := ap.currState.Handle(b)
+	if err != nil {
+		return err
+	}
+
+	if newState == nil {
+		ap.logf("WARNING: newState is nil")
+		return errors.New("New state of 'nil' is invalid.")
+	}
+
+	if newState != ap.currState {
+		if err := ap.changeState(newState); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) changeState(newState state) error {
+	ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
+
+	// Exit old state
+	if err := ap.currState.Exit(); err != nil {
+		ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
+		return err
+	}
+
+	// Perform transition action
+	if err := ap.currState.Transition(newState); err != nil {
+		ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
+		return err
+	}
+
+	// Enter new state
+	if err := newState.Enter(); err != nil {
+		ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
+		return err
+	}
+
+	ap.currState = newState
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,99 @@
+package ansiterm
+
+import (
+	"strconv"
+)
+
+func parseParams(bytes []byte) ([]string, error) {
+	paramBuff := make([]byte, 0, 0)
+	params := []string{}
+
+	for _, v := range bytes {
+		if v == ';' {
+			if len(paramBuff) > 0 {
+				// Completed parameter, append it to the list
+				s := string(paramBuff)
+				params = append(params, s)
+				paramBuff = make([]byte, 0, 0)
+			}
+		} else {
+			paramBuff = append(paramBuff, v)
+		}
+	}
+
+	// Last parameter may not be terminated with ';'
+	if len(paramBuff) > 0 {
+		s := string(paramBuff)
+		params = append(params, s)
+	}
+
+	return params, nil
+}
+
+func parseCmd(context ansiContext) (string, error) {
+	return string(context.currentChar), nil
+}
+
+func getInt(params []string, dflt int) int {
+	i := getInts(params, 1, dflt)[0]
+	return i
+}
+
+func getInts(params []string, minCount int, dflt int) []int {
+	ints := []int{}
+
+	for _, v := range params {
+		i, _ := strconv.Atoi(v)
+		// Zero is mapped to the default value in VT100.
+		if i == 0 {
+			i = dflt
+		}
+		ints = append(ints, i)
+	}
+
+	if len(ints) < minCount {
+		remaining := minCount - len(ints)
+		for i := 0; i < remaining; i++ {
+			ints = append(ints, dflt)
+		}
+	}
+
+	return ints
+}
+
+func (ap *AnsiParser) modeDispatch(param string, set bool) error {
+	switch param {
+	case "?3":
+		return ap.eventHandler.DECCOLM(set)
+	case "?6":
+		return ap.eventHandler.DECOM(set)
+	case "?25":
+		return ap.eventHandler.DECTCEM(set)
+	}
+	return nil
+}
+
+func (ap *AnsiParser) hDispatch(params []string) error {
+	if len(params) == 1 {
+		return ap.modeDispatch(params[0], true)
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) lDispatch(params []string) error {
+	if len(params) == 1 {
+		return ap.modeDispatch(params[0], false)
+	}
+
+	return nil
+}
+
+func getEraseParam(params []string) int {
+	param := getInt(params, 0)
+	if param < 0 || 3 < param {
+		param = 0
+	}
+
+	return param
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/parser_actions.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/parser_actions.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/parser_actions.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/parser_actions.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,119 @@
+package ansiterm
+
+func (ap *AnsiParser) collectParam() error {
+	currChar := ap.context.currentChar
+	ap.logf("collectParam %#x", currChar)
+	ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
+	return nil
+}
+
+func (ap *AnsiParser) collectInter() error {
+	currChar := ap.context.currentChar
+	ap.logf("collectInter %#x", currChar)
+	ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
+	return nil
+}
+
+func (ap *AnsiParser) escDispatch() error {
+	cmd, _ := parseCmd(*ap.context)
+	intermeds := ap.context.interBuffer
+	ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
+	ap.logf("escDispatch: %v(%v)", cmd, intermeds)
+
+	switch cmd {
+	case "D": // IND
+		return ap.eventHandler.IND()
+	case "E": // NEL, equivalent to CRLF
+		err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
+		if err == nil {
+			err = ap.eventHandler.Execute(ANSI_LINE_FEED)
+		}
+		return err
+	case "M": // RI
+		return ap.eventHandler.RI()
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) csiDispatch() error {
+	cmd, _ := parseCmd(*ap.context)
+	params, _ := parseParams(ap.context.paramBuffer)
+	ap.logf("Parsed params: %v with length: %d", params, len(params))
+
+	ap.logf("csiDispatch: %v(%v)", cmd, params)
+
+	switch cmd {
+	case "@":
+		return ap.eventHandler.ICH(getInt(params, 1))
+	case "A":
+		return ap.eventHandler.CUU(getInt(params, 1))
+	case "B":
+		return ap.eventHandler.CUD(getInt(params, 1))
+	case "C":
+		return ap.eventHandler.CUF(getInt(params, 1))
+	case "D":
+		return ap.eventHandler.CUB(getInt(params, 1))
+	case "E":
+		return ap.eventHandler.CNL(getInt(params, 1))
+	case "F":
+		return ap.eventHandler.CPL(getInt(params, 1))
+	case "G":
+		return ap.eventHandler.CHA(getInt(params, 1))
+	case "H":
+		ints := getInts(params, 2, 1)
+		x, y := ints[0], ints[1]
+		return ap.eventHandler.CUP(x, y)
+	case "J":
+		param := getEraseParam(params)
+		return ap.eventHandler.ED(param)
+	case "K":
+		param := getEraseParam(params)
+		return ap.eventHandler.EL(param)
+	case "L":
+		return ap.eventHandler.IL(getInt(params, 1))
+	case "M":
+		return ap.eventHandler.DL(getInt(params, 1))
+	case "P":
+		return ap.eventHandler.DCH(getInt(params, 1))
+	case "S":
+		return ap.eventHandler.SU(getInt(params, 1))
+	case "T":
+		return ap.eventHandler.SD(getInt(params, 1))
+	case "c":
+		return ap.eventHandler.DA(params)
+	case "d":
+		return ap.eventHandler.VPA(getInt(params, 1))
+	case "f":
+		ints := getInts(params, 2, 1)
+		x, y := ints[0], ints[1]
+		return ap.eventHandler.HVP(x, y)
+	case "h":
+		return ap.hDispatch(params)
+	case "l":
+		return ap.lDispatch(params)
+	case "m":
+		return ap.eventHandler.SGR(getInts(params, 1, 0))
+	case "r":
+		ints := getInts(params, 2, 1)
+		top, bottom := ints[0], ints[1]
+		return ap.eventHandler.DECSTBM(top, bottom)
+	default:
+		ap.logf("ERROR: Unsupported CSI command: '%s', with full context:  %v", cmd, ap.context)
+		return nil
+	}
+
+}
+
+func (ap *AnsiParser) print() error {
+	return ap.eventHandler.Print(ap.context.currentChar)
+}
+
+func (ap *AnsiParser) clear() error {
+	ap.context = &ansiContext{}
+	return nil
+}
+
+func (ap *AnsiParser) execute() error {
+	return ap.eventHandler.Execute(ap.context.currentChar)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/states.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/states.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/states.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/states.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,71 @@
+package ansiterm
+
+type stateID int
+
+type state interface {
+	Enter() error
+	Exit() error
+	Handle(byte) (state, error)
+	Name() string
+	Transition(state) error
+}
+
+type baseState struct {
+	name   string
+	parser *AnsiParser
+}
+
+func (base baseState) Enter() error {
+	return nil
+}
+
+func (base baseState) Exit() error {
+	return nil
+}
+
+func (base baseState) Handle(b byte) (s state, e error) {
+
+	switch {
+	case b == CSI_ENTRY:
+		return base.parser.csiEntry, nil
+	case b == DCS_ENTRY:
+		return base.parser.dcsEntry, nil
+	case b == ANSI_ESCAPE_PRIMARY:
+		return base.parser.escape, nil
+	case b == OSC_STRING:
+		return base.parser.oscString, nil
+	case sliceContains(toGroundBytes, b):
+		return base.parser.ground, nil
+	}
+
+	return nil, nil
+}
+
+func (base baseState) Name() string {
+	return base.name
+}
+
+func (base baseState) Transition(s state) error {
+	if s == base.parser.ground {
+		execBytes := []byte{0x18}
+		execBytes = append(execBytes, 0x1A)
+		execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
+		execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
+		execBytes = append(execBytes, 0x99)
+		execBytes = append(execBytes, 0x9A)
+
+		if sliceContains(execBytes, base.parser.context.currentChar) {
+			return base.parser.execute()
+		}
+	}
+
+	return nil
+}
+
+type dcsEntryState struct {
+	baseState
+}
+
+type errorState struct {
+	baseState
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/utilities.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/utilities.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/utilities.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/utilities.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,21 @@
+package ansiterm
+
+import (
+	"strconv"
+)
+
+func sliceContains(bytes []byte, b byte) bool {
+	for _, v := range bytes {
+		if v == b {
+			return true
+		}
+	}
+
+	return false
+}
+
+func convertBytesToInteger(bytes []byte) int {
+	s := string(bytes)
+	i, _ := strconv.Atoi(s)
+	return i
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,196 @@
+// +build windows
+
+package winterm
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/Azure/go-ansiterm"
+	windows "golang.org/x/sys/windows"
+)
+
+// Windows keyboard constants
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
+const (
+	VK_PRIOR    = 0x21 // PAGE UP key
+	VK_NEXT     = 0x22 // PAGE DOWN key
+	VK_END      = 0x23 // END key
+	VK_HOME     = 0x24 // HOME key
+	VK_LEFT     = 0x25 // LEFT ARROW key
+	VK_UP       = 0x26 // UP ARROW key
+	VK_RIGHT    = 0x27 // RIGHT ARROW key
+	VK_DOWN     = 0x28 // DOWN ARROW key
+	VK_SELECT   = 0x29 // SELECT key
+	VK_PRINT    = 0x2A // PRINT key
+	VK_EXECUTE  = 0x2B // EXECUTE key
+	VK_SNAPSHOT = 0x2C // PRINT SCREEN key
+	VK_INSERT   = 0x2D // INS key
+	VK_DELETE   = 0x2E // DEL key
+	VK_HELP     = 0x2F // HELP key
+	VK_F1       = 0x70 // F1 key
+	VK_F2       = 0x71 // F2 key
+	VK_F3       = 0x72 // F3 key
+	VK_F4       = 0x73 // F4 key
+	VK_F5       = 0x74 // F5 key
+	VK_F6       = 0x75 // F6 key
+	VK_F7       = 0x76 // F7 key
+	VK_F8       = 0x77 // F8 key
+	VK_F9       = 0x78 // F9 key
+	VK_F10      = 0x79 // F10 key
+	VK_F11      = 0x7A // F11 key
+	VK_F12      = 0x7B // F12 key
+
+	RIGHT_ALT_PRESSED  = 0x0001
+	LEFT_ALT_PRESSED   = 0x0002
+	RIGHT_CTRL_PRESSED = 0x0004
+	LEFT_CTRL_PRESSED  = 0x0008
+	SHIFT_PRESSED      = 0x0010
+	NUMLOCK_ON         = 0x0020
+	SCROLLLOCK_ON      = 0x0040
+	CAPSLOCK_ON        = 0x0080
+	ENHANCED_KEY       = 0x0100
+)
+
+type ansiCommand struct {
+	CommandBytes []byte
+	Command      string
+	Parameters   []string
+	IsSpecial    bool
+}
+
+func newAnsiCommand(command []byte) *ansiCommand {
+
+	if isCharacterSelectionCmdChar(command[1]) {
+		// Is Character Set Selection commands
+		return &ansiCommand{
+			CommandBytes: command,
+			Command:      string(command),
+			IsSpecial:    true,
+		}
+	}
+
+	// last char is command character
+	lastCharIndex := len(command) - 1
+
+	ac := &ansiCommand{
+		CommandBytes: command,
+		Command:      string(command[lastCharIndex]),
+		IsSpecial:    false,
+	}
+
+	// more than a single escape
+	if lastCharIndex != 0 {
+		start := 1
+		// skip if double char escape sequence
+		if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
+			start++
+		}
+		// convert this to GetNextParam method
+		ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
+	}
+
+	return ac
+}
+
+func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
+	if index < 0 || index >= len(ac.Parameters) {
+		return defaultValue
+	}
+
+	param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
+	if err != nil {
+		return defaultValue
+	}
+
+	return int16(param)
+}
+
+func (ac *ansiCommand) String() string {
+	return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
+		bytesToHex(ac.CommandBytes),
+		ac.Command,
+		strings.Join(ac.Parameters, "\",\""))
+}
+
+// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
+// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
+func isAnsiCommandChar(b byte) bool {
+	switch {
+	case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
+		return true
+	case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
+		// non-CSI escape sequence terminator
+		return true
+	case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
+		// String escape sequence terminator
+		return true
+	}
+	return false
+}
+
+func isXtermOscSequence(command []byte, current byte) bool {
+	return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
+}
+
+func isCharacterSelectionCmdChar(b byte) bool {
+	return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
+}
+
+// bytesToHex converts a slice of bytes to a human-readable string.
+func bytesToHex(b []byte) string {
+	hex := make([]string, len(b))
+	for i, ch := range b {
+		hex[i] = fmt.Sprintf("%X", ch)
+	}
+	return strings.Join(hex, "")
+}
+
+// ensureInRange adjusts the passed value, if necessary, to ensure it is within
+// the passed min / max range.
+func ensureInRange(n int16, min int16, max int16) int16 {
+	if n < min {
+		return min
+	} else if n > max {
+		return max
+	} else {
+		return n
+	}
+}
+
+func GetStdFile(nFile int) (*os.File, uintptr) {
+	var file *os.File
+
+	// syscall uses negative numbers
+	// windows package uses very big uint32
+	// Keep these switches split so we don't have to convert ints too much.
+	switch uint32(nFile) {
+	case windows.STD_INPUT_HANDLE:
+		file = os.Stdin
+	case windows.STD_OUTPUT_HANDLE:
+		file = os.Stdout
+	case windows.STD_ERROR_HANDLE:
+		file = os.Stderr
+	default:
+		switch nFile {
+		case syscall.STD_INPUT_HANDLE:
+			file = os.Stdin
+		case syscall.STD_OUTPUT_HANDLE:
+			file = os.Stdout
+		case syscall.STD_ERROR_HANDLE:
+			file = os.Stderr
+		default:
+			panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
+		}
+	}
+
+	fd, err := syscall.GetStdHandle(nFile)
+	if err != nil {
+		panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
+	}
+
+	return file, uintptr(fd)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/api.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/api.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/api.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/api.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,327 @@
+// +build windows
+
+package winterm
+
+import (
+	"fmt"
+	"syscall"
+	"unsafe"
+)
+
+//===========================================================================================================
+// IMPORTANT NOTE:
+//
+//	The methods below make extensive use of the "unsafe" package to obtain the required pointers.
+//	Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
+//	variables) the pointers reference *before* the API completes.
+//
+//  As a result, in those cases, the code must hint that the variables remain in active by invoking the
+//	dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
+//	require unsafe pointers.
+//
+//	If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
+//	the garbage collector the variables remain in use if:
+//
+//	-- The value is not a pointer (e.g., int32, struct)
+//	-- The value is not referenced by the method after passing the pointer to Windows
+//
+//	See http://golang.org/doc/go1.3.
+//===========================================================================================================
+
+var (
+	kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
+
+	getConsoleCursorInfoProc       = kernel32DLL.NewProc("GetConsoleCursorInfo")
+	setConsoleCursorInfoProc       = kernel32DLL.NewProc("SetConsoleCursorInfo")
+	setConsoleCursorPositionProc   = kernel32DLL.NewProc("SetConsoleCursorPosition")
+	setConsoleModeProc             = kernel32DLL.NewProc("SetConsoleMode")
+	getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
+	setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
+	scrollConsoleScreenBufferProc  = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
+	setConsoleTextAttributeProc    = kernel32DLL.NewProc("SetConsoleTextAttribute")
+	setConsoleWindowInfoProc       = kernel32DLL.NewProc("SetConsoleWindowInfo")
+	writeConsoleOutputProc         = kernel32DLL.NewProc("WriteConsoleOutputW")
+	readConsoleInputProc           = kernel32DLL.NewProc("ReadConsoleInputW")
+	waitForSingleObjectProc        = kernel32DLL.NewProc("WaitForSingleObject")
+)
+
+// Windows Console constants
+const (
+	// Console modes
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+	ENABLE_PROCESSED_INPUT        = 0x0001
+	ENABLE_LINE_INPUT             = 0x0002
+	ENABLE_ECHO_INPUT             = 0x0004
+	ENABLE_WINDOW_INPUT           = 0x0008
+	ENABLE_MOUSE_INPUT            = 0x0010
+	ENABLE_INSERT_MODE            = 0x0020
+	ENABLE_QUICK_EDIT_MODE        = 0x0040
+	ENABLE_EXTENDED_FLAGS         = 0x0080
+	ENABLE_AUTO_POSITION          = 0x0100
+	ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
+
+	ENABLE_PROCESSED_OUTPUT            = 0x0001
+	ENABLE_WRAP_AT_EOL_OUTPUT          = 0x0002
+	ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+	DISABLE_NEWLINE_AUTO_RETURN        = 0x0008
+	ENABLE_LVB_GRID_WORLDWIDE          = 0x0010
+
+	// Character attributes
+	// Note:
+	// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
+	//    Clearing all foreground or background colors results in black; setting all creates white.
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
+	FOREGROUND_BLUE      uint16 = 0x0001
+	FOREGROUND_GREEN     uint16 = 0x0002
+	FOREGROUND_RED       uint16 = 0x0004
+	FOREGROUND_INTENSITY uint16 = 0x0008
+	FOREGROUND_MASK      uint16 = 0x000F
+
+	BACKGROUND_BLUE      uint16 = 0x0010
+	BACKGROUND_GREEN     uint16 = 0x0020
+	BACKGROUND_RED       uint16 = 0x0040
+	BACKGROUND_INTENSITY uint16 = 0x0080
+	BACKGROUND_MASK      uint16 = 0x00F0
+
+	COMMON_LVB_MASK          uint16 = 0xFF00
+	COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
+	COMMON_LVB_UNDERSCORE    uint16 = 0x8000
+
+	// Input event types
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+	KEY_EVENT                = 0x0001
+	MOUSE_EVENT              = 0x0002
+	WINDOW_BUFFER_SIZE_EVENT = 0x0004
+	MENU_EVENT               = 0x0008
+	FOCUS_EVENT              = 0x0010
+
+	// WaitForSingleObject return codes
+	WAIT_ABANDONED = 0x00000080
+	WAIT_FAILED    = 0xFFFFFFFF
+	WAIT_SIGNALED  = 0x0000000
+	WAIT_TIMEOUT   = 0x00000102
+
+	// WaitForSingleObject wait duration
+	WAIT_INFINITE       = 0xFFFFFFFF
+	WAIT_ONE_SECOND     = 1000
+	WAIT_HALF_SECOND    = 500
+	WAIT_QUARTER_SECOND = 250
+)
+
+// Windows API Console types
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
+// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
+type (
+	CHAR_INFO struct {
+		UnicodeChar uint16
+		Attributes  uint16
+	}
+
+	CONSOLE_CURSOR_INFO struct {
+		Size    uint32
+		Visible int32
+	}
+
+	CONSOLE_SCREEN_BUFFER_INFO struct {
+		Size              COORD
+		CursorPosition    COORD
+		Attributes        uint16
+		Window            SMALL_RECT
+		MaximumWindowSize COORD
+	}
+
+	COORD struct {
+		X int16
+		Y int16
+	}
+
+	SMALL_RECT struct {
+		Left   int16
+		Top    int16
+		Right  int16
+		Bottom int16
+	}
+
+	// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+	INPUT_RECORD struct {
+		EventType uint16
+		KeyEvent  KEY_EVENT_RECORD
+	}
+
+	KEY_EVENT_RECORD struct {
+		KeyDown         int32
+		RepeatCount     uint16
+		VirtualKeyCode  uint16
+		VirtualScanCode uint16
+		UnicodeChar     uint16
+		ControlKeyState uint32
+	}
+
+	WINDOW_BUFFER_SIZE struct {
+		Size COORD
+	}
+)
+
+// boolToBOOL converts a Go bool into a Windows int32.
+func boolToBOOL(f bool) int32 {
+	if f {
+		return int32(1)
+	} else {
+		return int32(0)
+	}
+}
+
+// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
+func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+	r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
+func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+	r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorPosition location of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
+func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
+	r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
+	use(coord)
+	return checkError(r1, r2, err)
+}
+
+// GetConsoleMode gets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
+func GetConsoleMode(handle uintptr) (mode uint32, err error) {
+	err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
+	return mode, err
+}
+
+// SetConsoleMode sets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+func SetConsoleMode(handle uintptr, mode uint32) error {
+	r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
+	use(mode)
+	return checkError(r1, r2, err)
+}
+
+// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
+func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
+	info := CONSOLE_SCREEN_BUFFER_INFO{}
+	err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
+	if err != nil {
+		return nil, err
+	}
+	return &info, nil
+}
+
+func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
+	r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
+	use(scrollRect)
+	use(clipRect)
+	use(destOrigin)
+	use(char)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleScreenBufferSize sets the size of the console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
+func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
+	r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
+	use(coord)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleTextAttribute sets the attributes of characters written to the
+// console screen buffer by the WriteFile or WriteConsole function.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
+func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
+	r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
+	use(attribute)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
+// Note that the size and location must be within and no larger than the backing console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
+func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
+	r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
+	use(isAbsolute)
+	use(rect)
+	return checkError(r1, r2, err)
+}
+
+// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
+func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
+	r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
+	use(buffer)
+	use(bufferSize)
+	use(bufferCoord)
+	return checkError(r1, r2, err)
+}
+
+// ReadConsoleInput reads (and removes) data from the console input buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
+func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
+	r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
+	use(buffer)
+	return checkError(r1, r2, err)
+}
+
+// WaitForSingleObject waits for the passed handle to be signaled.
+// It returns true if the handle was signaled; false otherwise.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
+func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
+	r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
+	switch r1 {
+	case WAIT_ABANDONED, WAIT_TIMEOUT:
+		return false, nil
+	case WAIT_SIGNALED:
+		return true, nil
+	}
+	use(msWait)
+	return false, err
+}
+
+// String helpers
+func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
+	return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
+}
+
+func (coord COORD) String() string {
+	return fmt.Sprintf("%v,%v", coord.X, coord.Y)
+}
+
+func (rect SMALL_RECT) String() string {
+	return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
+}
+
+// checkError evaluates the results of a Windows API call and returns the error if it failed.
+func checkError(r1, r2 uintptr, err error) error {
+	// Windows APIs return non-zero to indicate success
+	if r1 != 0 {
+		return nil
+	}
+
+	// Return the error if provided, otherwise default to EINVAL
+	if err != nil {
+		return err
+	}
+	return syscall.EINVAL
+}
+
+// coordToPointer converts a COORD into a uintptr (by fooling the type system).
+func coordToPointer(c COORD) uintptr {
+	// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
+	return uintptr(*((*uint32)(unsafe.Pointer(&c))))
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+func use(p interface{}) {}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,100 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+const (
+	FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+	BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+)
+
+// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
+// request represented by the passed ANSI mode.
+func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
+	switch ansiMode {
+
+	// Mode styles
+	case ansiterm.ANSI_SGR_BOLD:
+		windowsMode = windowsMode | FOREGROUND_INTENSITY
+
+	case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
+		windowsMode &^= FOREGROUND_INTENSITY
+
+	case ansiterm.ANSI_SGR_UNDERLINE:
+		windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
+
+	case ansiterm.ANSI_SGR_REVERSE:
+		inverted = true
+
+	case ansiterm.ANSI_SGR_REVERSE_OFF:
+		inverted = false
+
+	case ansiterm.ANSI_SGR_UNDERLINE_OFF:
+		windowsMode &^= COMMON_LVB_UNDERSCORE
+
+		// Foreground colors
+	case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
+		windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
+
+	case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
+
+	case ansiterm.ANSI_SGR_FOREGROUND_RED:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
+
+	case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+		// Background colors
+	case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
+		// Black with no intensity
+		windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
+
+	case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
+
+	case ansiterm.ANSI_SGR_BACKGROUND_RED:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
+
+	case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+	}
+
+	return windowsMode, inverted
+}
+
+// invertAttributes inverts the foreground and background colors of a Windows attributes value
+func invertAttributes(windowsMode uint16) uint16 {
+	return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,101 @@
+// +build windows
+
+package winterm
+
+const (
+	horizontal = iota
+	vertical
+)
+
+func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
+	if h.originMode {
+		sr := h.effectiveSr(info.Window)
+		return SMALL_RECT{
+			Top:    sr.top,
+			Bottom: sr.bottom,
+			Left:   0,
+			Right:  info.Size.X - 1,
+		}
+	} else {
+		return SMALL_RECT{
+			Top:    info.Window.Top,
+			Bottom: info.Window.Bottom,
+			Left:   0,
+			Right:  info.Size.X - 1,
+		}
+	}
+}
+
+// setCursorPosition sets the cursor to the specified position, bounded to the screen size
+func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
+	position.X = ensureInRange(position.X, window.Left, window.Right)
+	position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
+	err := SetConsoleCursorPosition(h.fd, position)
+	if err != nil {
+		return err
+	}
+	h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
+	return err
+}
+
+func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
+	return h.moveCursor(vertical, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
+	return h.moveCursor(horizontal, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	switch moveMode {
+	case horizontal:
+		position.X += int16(param)
+	case vertical:
+		position.Y += int16(param)
+	}
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	position.X = 0
+	position.Y += int16(param)
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	position.X = int16(param) - 1
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,84 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
+	// Ignore an invalid (negative area) request
+	if toCoord.Y < fromCoord.Y {
+		return nil
+	}
+
+	var err error
+
+	var coordStart = COORD{}
+	var coordEnd = COORD{}
+
+	xCurrent, yCurrent := fromCoord.X, fromCoord.Y
+	xEnd, yEnd := toCoord.X, toCoord.Y
+
+	// Clear any partial initial line
+	if xCurrent > 0 {
+		coordStart.X, coordStart.Y = xCurrent, yCurrent
+		coordEnd.X, coordEnd.Y = xEnd, yCurrent
+
+		err = h.clearRect(attributes, coordStart, coordEnd)
+		if err != nil {
+			return err
+		}
+
+		xCurrent = 0
+		yCurrent += 1
+	}
+
+	// Clear intervening rectangular section
+	if yCurrent < yEnd {
+		coordStart.X, coordStart.Y = xCurrent, yCurrent
+		coordEnd.X, coordEnd.Y = xEnd, yEnd-1
+
+		err = h.clearRect(attributes, coordStart, coordEnd)
+		if err != nil {
+			return err
+		}
+
+		xCurrent = 0
+		yCurrent = yEnd
+	}
+
+	// Clear remaining partial ending line
+	coordStart.X, coordStart.Y = xCurrent, yCurrent
+	coordEnd.X, coordEnd.Y = xEnd, yEnd
+
+	err = h.clearRect(attributes, coordStart, coordEnd)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
+	region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
+	width := toCoord.X - fromCoord.X + 1
+	height := toCoord.Y - fromCoord.Y + 1
+	size := uint32(width) * uint32(height)
+
+	if size <= 0 {
+		return nil
+	}
+
+	buffer := make([]CHAR_INFO, size)
+
+	char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
+	for i := 0; i < int(size); i++ {
+		buffer[i] = char
+	}
+
+	err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &region)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,118 @@
+// +build windows
+
+package winterm
+
+// effectiveSr gets the current effective scroll region in buffer coordinates
+func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
+	top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
+	bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
+	if top >= bottom {
+		top = window.Top
+		bottom = window.Bottom
+	}
+	return scrollRegion{top: top, bottom: bottom}
+}
+
+func (h *windowsAnsiEventHandler) scrollUp(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	sr := h.effectiveSr(info.Window)
+	return h.scroll(param, sr, info)
+}
+
+func (h *windowsAnsiEventHandler) scrollDown(param int) error {
+	return h.scrollUp(-param)
+}
+
+func (h *windowsAnsiEventHandler) deleteLines(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	start := info.CursorPosition.Y
+	sr := h.effectiveSr(info.Window)
+	// Lines cannot be inserted or deleted outside the scrolling region.
+	if start >= sr.top && start <= sr.bottom {
+		sr.top = start
+		return h.scroll(param, sr, info)
+	} else {
+		return nil
+	}
+}
+
+func (h *windowsAnsiEventHandler) insertLines(param int) error {
+	return h.deleteLines(-param)
+}
+
+// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
+func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+	h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
+	h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
+
+	// Copy from and clip to the scroll region (full buffer width)
+	scrollRect := SMALL_RECT{
+		Top:    sr.top,
+		Bottom: sr.bottom,
+		Left:   0,
+		Right:  info.Size.X - 1,
+	}
+
+	// Origin to which area should be copied
+	destOrigin := COORD{
+		X: 0,
+		Y: sr.top - int16(param),
+	}
+
+	char := CHAR_INFO{
+		UnicodeChar: ' ',
+		Attributes:  h.attributes,
+	}
+
+	if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	return h.scrollLine(param, info.CursorPosition, info)
+}
+
+func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
+	return h.deleteCharacters(-param)
+}
+
+// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
+func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+	// Copy from and clip to the scroll region (full buffer width)
+	scrollRect := SMALL_RECT{
+		Top:    position.Y,
+		Bottom: position.Y,
+		Left:   position.X,
+		Right:  info.Size.X - 1,
+	}
+
+	// Origin to which area should be copied
+	destOrigin := COORD{
+		X: position.X - int16(columns),
+		Y: position.Y,
+	}
+
+	char := CHAR_INFO{
+		UnicodeChar: ' ',
+		Attributes:  h.attributes,
+	}
+
+	if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+		return err
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,9 @@
+// +build windows
+
+package winterm
+
+// AddInRange increments a value by the passed quantity while ensuring the values
+// always remain within the supplied min / max range.
+func addInRange(n int16, increment int16, min int16, max int16) int16 {
+	return ensureInRange(n+increment, min, max)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
--- 0.19.3+ds1-4/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,743 @@
+// +build windows
+
+package winterm
+
+import (
+	"bytes"
+	"log"
+	"os"
+	"strconv"
+
+	"github.com/Azure/go-ansiterm"
+)
+
+type windowsAnsiEventHandler struct {
+	fd             uintptr
+	file           *os.File
+	infoReset      *CONSOLE_SCREEN_BUFFER_INFO
+	sr             scrollRegion
+	buffer         bytes.Buffer
+	attributes     uint16
+	inverted       bool
+	wrapNext       bool
+	drewMarginByte bool
+	originMode     bool
+	marginByte     byte
+	curInfo        *CONSOLE_SCREEN_BUFFER_INFO
+	curPos         COORD
+	logf           func(string, ...interface{})
+}
+
+type Option func(*windowsAnsiEventHandler)
+
+func WithLogf(f func(string, ...interface{})) Option {
+	return func(w *windowsAnsiEventHandler) {
+		w.logf = f
+	}
+}
+
+func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
+	infoReset, err := GetConsoleScreenBufferInfo(fd)
+	if err != nil {
+		return nil
+	}
+
+	h := &windowsAnsiEventHandler{
+		fd:         fd,
+		file:       file,
+		infoReset:  infoReset,
+		attributes: infoReset.Attributes,
+	}
+	for _, o := range opts {
+		o(h)
+	}
+
+	if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+		logFile, _ := os.Create("winEventHandler.log")
+		logger := log.New(logFile, "", log.LstdFlags)
+		if h.logf != nil {
+			l := h.logf
+			h.logf = func(s string, v ...interface{}) {
+				l(s, v...)
+				logger.Printf(s, v...)
+			}
+		} else {
+			h.logf = logger.Printf
+		}
+	}
+
+	if h.logf == nil {
+		h.logf = func(string, ...interface{}) {}
+	}
+
+	return h
+}
+
+type scrollRegion struct {
+	top    int16
+	bottom int16
+}
+
+// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
+// current cursor position and scroll region settings, in which case it returns
+// true. If no special handling is necessary, then it does nothing and returns
+// false.
+//
+// In the false case, the caller should ensure that a carriage return
+// and line feed are inserted or that the text is otherwise wrapped.
+func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
+	if h.wrapNext {
+		if err := h.Flush(); err != nil {
+			return false, err
+		}
+		h.clearWrap()
+	}
+	pos, info, err := h.getCurrentInfo()
+	if err != nil {
+		return false, err
+	}
+	sr := h.effectiveSr(info.Window)
+	if pos.Y == sr.bottom {
+		// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
+		// is the full window.
+		if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
+			if includeCR {
+				pos.X = 0
+				h.updatePos(pos)
+			}
+			return false, nil
+		}
+
+		// A custom scroll region is active. Scroll the window manually to simulate
+		// the LF.
+		if err := h.Flush(); err != nil {
+			return false, err
+		}
+		h.logf("Simulating LF inside scroll region")
+		if err := h.scrollUp(1); err != nil {
+			return false, err
+		}
+		if includeCR {
+			pos.X = 0
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return false, err
+			}
+		}
+		return true, nil
+
+	} else if pos.Y < info.Window.Bottom {
+		// Let Windows handle the LF.
+		pos.Y++
+		if includeCR {
+			pos.X = 0
+		}
+		h.updatePos(pos)
+		return false, nil
+	} else {
+		// The cursor is at the bottom of the screen but outside the scroll
+		// region. Skip the LF.
+		h.logf("Simulating LF outside scroll region")
+		if includeCR {
+			if err := h.Flush(); err != nil {
+				return false, err
+			}
+			pos.X = 0
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return false, err
+			}
+		}
+		return true, nil
+	}
+}
+
+// executeLF executes a LF without a CR.
+func (h *windowsAnsiEventHandler) executeLF() error {
+	handled, err := h.simulateLF(false)
+	if err != nil {
+		return err
+	}
+	if !handled {
+		// Windows LF will reset the cursor column position. Write the LF
+		// and restore the cursor position.
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+		if pos.X != 0 {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			h.logf("Resetting cursor position for LF without CR")
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) Print(b byte) error {
+	if h.wrapNext {
+		h.buffer.WriteByte(h.marginByte)
+		h.clearWrap()
+		if _, err := h.simulateLF(true); err != nil {
+			return err
+		}
+	}
+	pos, info, err := h.getCurrentInfo()
+	if err != nil {
+		return err
+	}
+	if pos.X == info.Size.X-1 {
+		h.wrapNext = true
+		h.marginByte = b
+	} else {
+		pos.X++
+		h.updatePos(pos)
+		h.buffer.WriteByte(b)
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) Execute(b byte) error {
+	switch b {
+	case ansiterm.ANSI_TAB:
+		h.logf("Execute(TAB)")
+		// Move to the next tab stop, but preserve auto-wrap if already set.
+		if !h.wrapNext {
+			pos, info, err := h.getCurrentInfo()
+			if err != nil {
+				return err
+			}
+			pos.X = (pos.X + 8) - pos.X%8
+			if pos.X >= info.Size.X {
+				pos.X = info.Size.X - 1
+			}
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return err
+			}
+		}
+		return nil
+
+	case ansiterm.ANSI_BEL:
+		h.buffer.WriteByte(ansiterm.ANSI_BEL)
+		return nil
+
+	case ansiterm.ANSI_BACKSPACE:
+		if h.wrapNext {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			h.clearWrap()
+		}
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		if pos.X > 0 {
+			pos.X--
+			h.updatePos(pos)
+			h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
+		}
+		return nil
+
+	case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
+		// Treat as true LF.
+		return h.executeLF()
+
+	case ansiterm.ANSI_LINE_FEED:
+		// Simulate a CR and LF for now since there is no way in go-ansiterm
+		// to tell if the LF should include CR (and more things break when it's
+		// missing than when it's incorrectly added).
+		handled, err := h.simulateLF(true)
+		if handled || err != nil {
+			return err
+		}
+		return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+
+	case ansiterm.ANSI_CARRIAGE_RETURN:
+		if h.wrapNext {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			h.clearWrap()
+		}
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		if pos.X != 0 {
+			pos.X = 0
+			h.updatePos(pos)
+			h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
+		}
+		return nil
+
+	default:
+		return nil
+	}
+}
+
+func (h *windowsAnsiEventHandler) CUU(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorVertical(-param)
+}
+
+func (h *windowsAnsiEventHandler) CUD(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorVertical(param)
+}
+
+func (h *windowsAnsiEventHandler) CUF(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorHorizontal(param)
+}
+
+func (h *windowsAnsiEventHandler) CUB(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorHorizontal(-param)
+}
+
+func (h *windowsAnsiEventHandler) CNL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorLine(param)
+}
+
+func (h *windowsAnsiEventHandler) CPL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorLine(-param)
+}
+
+func (h *windowsAnsiEventHandler) CHA(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorColumn(param)
+}
+
+func (h *windowsAnsiEventHandler) VPA(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("VPA: [[%d]]", param)
+	h.clearWrap()
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	window := h.getCursorWindow(info)
+	position := info.CursorPosition
+	position.Y = window.Top + int16(param) - 1
+	return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUP: [[%d %d]]", row, col)
+	h.clearWrap()
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	window := h.getCursorWindow(info)
+	position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
+	return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("HVP: [[%d %d]]", row, col)
+	h.clearWrap()
+	return h.CUP(row, col)
+}
+
+func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
+	h.clearWrap()
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
+	h.clearWrap()
+	h.originMode = enable
+	return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
+	h.clearWrap()
+	if err := h.ED(2); err != nil {
+		return err
+	}
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	targetWidth := int16(80)
+	if use132 {
+		targetWidth = 132
+	}
+	if info.Size.X < targetWidth {
+		if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+			h.logf("set buffer failed: %v", err)
+			return err
+		}
+	}
+	window := info.Window
+	window.Left = 0
+	window.Right = targetWidth - 1
+	if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+		h.logf("set window failed: %v", err)
+		return err
+	}
+	if info.Size.X > targetWidth {
+		if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+			h.logf("set buffer failed: %v", err)
+			return err
+		}
+	}
+	return SetConsoleCursorPosition(h.fd, COORD{0, 0})
+}
+
+func (h *windowsAnsiEventHandler) ED(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("ED: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+
+	// [J  -- Erases from the cursor to the end of the screen, including the cursor position.
+	// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
+	// [2J -- Erases the complete display. The cursor does not move.
+	// Notes:
+	// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	var start COORD
+	var end COORD
+
+	switch param {
+	case 0:
+		start = info.CursorPosition
+		end = COORD{info.Size.X - 1, info.Size.Y - 1}
+
+	case 1:
+		start = COORD{0, 0}
+		end = info.CursorPosition
+
+	case 2:
+		start = COORD{0, 0}
+		end = COORD{info.Size.X - 1, info.Size.Y - 1}
+	}
+
+	err = h.clearRange(h.attributes, start, end)
+	if err != nil {
+		return err
+	}
+
+	// If the whole buffer was cleared, move the window to the top while preserving
+	// the window-relative cursor position.
+	if param == 2 {
+		pos := info.CursorPosition
+		window := info.Window
+		pos.Y -= window.Top
+		window.Bottom -= window.Top
+		window.Top = 0
+		if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+			return err
+		}
+		if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) EL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("EL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+
+	// [K  -- Erases from the cursor to the end of the line, including the cursor position.
+	// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
+	// [2K -- Erases the complete line.
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	var start COORD
+	var end COORD
+
+	switch param {
+	case 0:
+		start = info.CursorPosition
+		end = COORD{info.Size.X, info.CursorPosition.Y}
+
+	case 1:
+		start = COORD{0, info.CursorPosition.Y}
+		end = info.CursorPosition
+
+	case 2:
+		start = COORD{0, info.CursorPosition.Y}
+		end = COORD{info.Size.X, info.CursorPosition.Y}
+	}
+
+	err = h.clearRange(h.attributes, start, end)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) IL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("IL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.insertLines(param)
+}
+
+func (h *windowsAnsiEventHandler) DL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.deleteLines(param)
+}
+
+func (h *windowsAnsiEventHandler) ICH(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("ICH: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.insertCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) DCH(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DCH: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.deleteCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) SGR(params []int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	strings := []string{}
+	for _, v := range params {
+		strings = append(strings, strconv.Itoa(v))
+	}
+
+	h.logf("SGR: [%v]", strings)
+
+	if len(params) <= 0 {
+		h.attributes = h.infoReset.Attributes
+		h.inverted = false
+	} else {
+		for _, attr := range params {
+
+			if attr == ansiterm.ANSI_SGR_RESET {
+				h.attributes = h.infoReset.Attributes
+				h.inverted = false
+				continue
+			}
+
+			h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
+		}
+	}
+
+	attributes := h.attributes
+	if h.inverted {
+		attributes = invertAttributes(attributes)
+	}
+	err := SetConsoleTextAttribute(h.fd, attributes)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) SU(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("SU: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.scrollUp(param)
+}
+
+func (h *windowsAnsiEventHandler) SD(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("SD: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.scrollDown(param)
+}
+
+func (h *windowsAnsiEventHandler) DA(params []string) error {
+	h.logf("DA: [%v]", params)
+	// DA cannot be implemented because it must send data on the VT100 input stream,
+	// which is not available to go-ansiterm.
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DECSTBM: [%d, %d]", top, bottom)
+
+	// Windows is 0 indexed, Linux is 1 indexed
+	h.sr.top = int16(top - 1)
+	h.sr.bottom = int16(bottom - 1)
+
+	// This command also moves the cursor to the origin.
+	h.clearWrap()
+	return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) RI() error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("RI: []")
+	h.clearWrap()
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	sr := h.effectiveSr(info.Window)
+	if info.CursorPosition.Y == sr.top {
+		return h.scrollDown(1)
+	}
+
+	return h.moveCursorVertical(-1)
+}
+
+func (h *windowsAnsiEventHandler) IND() error {
+	h.logf("IND: []")
+	return h.executeLF()
+}
+
+func (h *windowsAnsiEventHandler) Flush() error {
+	h.curInfo = nil
+	if h.buffer.Len() > 0 {
+		h.logf("Flush: [%s]", h.buffer.Bytes())
+		if _, err := h.buffer.WriteTo(h.file); err != nil {
+			return err
+		}
+	}
+
+	if h.wrapNext && !h.drewMarginByte {
+		h.logf("Flush: drawing margin byte '%c'", h.marginByte)
+
+		info, err := GetConsoleScreenBufferInfo(h.fd)
+		if err != nil {
+			return err
+		}
+
+		charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
+		size := COORD{1, 1}
+		position := COORD{0, 0}
+		region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
+		if err := WriteConsoleOutput(h.fd, charInfo, size, position, &region); err != nil {
+			return err
+		}
+		h.drewMarginByte = true
+	}
+	return nil
+}
+
+// cacheConsoleInfo ensures that the current console screen information has been queried
+// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
+func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
+	if h.curInfo == nil {
+		info, err := GetConsoleScreenBufferInfo(h.fd)
+		if err != nil {
+			return COORD{}, nil, err
+		}
+		h.curInfo = info
+		h.curPos = info.CursorPosition
+	}
+	return h.curPos, h.curInfo, nil
+}
+
+func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
+	if h.curInfo == nil {
+		panic("failed to call getCurrentInfo before calling updatePos")
+	}
+	h.curPos = pos
+}
+
+// clearWrap clears the state where the cursor is in the margin
+// waiting for the next character before wrapping the line. This must
+// be done before most operations that act on the cursor.
+func (h *windowsAnsiEventHandler) clearWrap() {
+	h.wrapNext = false
+	h.drewMarginByte = false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/.gitignore 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/.gitignore
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/.gitignore	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/.gitignore	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1 @@
+_fuzz/
\ No newline at end of file
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/.golangci.yml 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/.golangci.yml
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/.golangci.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/.golangci.yml	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,27 @@
+run:
+  deadline: 2m
+
+linters:
+  disable-all: true
+  enable:
+    - misspell
+    - govet
+    - staticcheck
+    - errcheck
+    - unparam
+    - ineffassign
+    - nakedret
+    - gocyclo
+    - dupl
+    - goimports
+    - revive
+    - gosec
+    - gosimple
+    - typecheck
+    - unused
+
+linters-settings:
+  gofmt:
+    simplify: true
+  dupl:
+    threshold: 600
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,214 @@
+# Changelog
+
+## 3.2.0 (2022-11-28)
+
+### Added
+
+- #190: Added text marshaling and unmarshaling
+- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg)
+- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker)
+- #179: Added New() version constructor (thanks @kazhuravlev)
+
+### Changed
+
+- #182/#183: Updated CI testing setup
+
+### Fixed
+
+- #186: Fixing issue where validation of constraint section gave false positives
+- #176: Fix constraints check with *-0 (thanks @mtt0)
+- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni)
+- #161: Fixed godoc (thanks @afirth)
+
+## 3.1.1 (2020-11-23)
+
+### Fixed
+
+- #158: Fixed issue with generated regex operation order that could cause problem
+
+## 3.1.0 (2020-04-15)
+
+### Added
+
+- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah)
+
+### Changed
+
+- #148: More accurate validation messages on constraints
+
+## 3.0.3 (2019-12-13)
+
+### Fixed
+
+- #141: Fixed issue with <= comparison
+
+## 3.0.2 (2019-11-14)
+
+### Fixed
+
+- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos)
+
+## 3.0.1 (2019-09-13)
+
+### Fixed
+
+- #125: Fixes issue with module path for v3
+
+## 3.0.0 (2019-09-12)
+
+This is a major release of the semver package which includes API changes. The Go
+API is compatible with ^1. The Go API was not changed because many people are using
+`go get` without Go modules for their applications and API breaking changes cause
+errors which we have or would need to support.
+
+The changes in this release are the handling based on the data passed into the
+functions. These are described in the added and changed sections below.
+
+### Added
+
+- StrictNewVersion function. This is similar to NewVersion but will return an
+  error if the version passed in is not a strict semantic version. For example,
+  1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly
+  speaking semantic versions. This function is faster, performs fewer operations,
+  and uses fewer allocations than NewVersion.
+- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint.
+  The Makefile contains the operations used. For more information on you can start
+  on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing
+- Now using Go modules
+
+### Changed
+
+- NewVersion has proper prerelease and metadata validation with error messages
+  to signal an issue with either of them
+- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the
+  version is >=1 the ^ ranges works the same as v1. For major versions of 0 the
+  rules have changed. The minor version is treated as the stable version unless
+  a patch is specified and then it is equivalent to =. One difference from npm/js
+  is that prereleases there are only to a specific version (e.g. 1.2.3).
+  Prereleases here look over multiple versions and follow semantic version
+  ordering rules. This pattern now follows along with the expected and requested
+  handling of this packaged by numerous users.
+
+## 1.5.0 (2019-09-11)
+
+### Added
+
+- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
+
+### Changed
+
+- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
+- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
+- #72: Adding docs comment pointing to vert for a cli
+- #71: Update the docs on pre-release comparator handling
+- #89: Test with new go versions (thanks @thedevsaddam)
+- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
+
+### Fixed
+
+- #78: Fix unchecked error in example code (thanks @ravron)
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+- #97: Fixed copyright file for proper display on GitHub
+- #107: Fix handling prerelease when sorting alphanum and num 
+- #109: Fixed where Validate sometimes returns wrong message on error
+
+## 1.4.2 (2018-04-10)
+
+### Changed
+
+- #72: Updated the docs to point to vert for a console appliaction
+- #71: Update the docs on pre-release comparator handling
+
+### Fixed
+
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+
+## 1.4.1 (2018-04-02)
+
+### Fixed
+
+- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
+
+## 1.4.0 (2017-10-04)
+
+### Changed
+
+- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
+
+## 1.3.1 (2017-07-10)
+
+### Fixed
+
+- Fixed #57: number comparisons in prerelease sometimes inaccurate
+
+## 1.3.0 (2017-05-02)
+
+### Added
+
+- #45: Added json (un)marshaling support (thanks @mh-cbon)
+- Stability marker. See https://masterminds.github.io/stability/
+
+### Fixed
+
+- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
+
+### Changed
+
+- #55: The godoc icon moved from png to svg
+
+## 1.2.3 (2017-04-03)
+
+### Fixed
+
+- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
+
+## Release 1.2.2 (2016-12-13)
+
+### Fixed
+
+- #34: Fixed issue where hyphen range was not working with pre-release parsing.
+
+## Release 1.2.1 (2016-11-28)
+
+### Fixed
+
+- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
+  properly.
+
+## Release 1.2.0 (2016-11-04)
+
+### Added
+
+- #20: Added MustParse function for versions (thanks @adamreese)
+- #15: Added increment methods on versions (thanks @mh-cbon)
+
+### Fixed
+
+- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
+  might not satisfy the intended compatibility. The change here ignores pre-releases
+  on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
+  constraint. For example, `^1.2.3` will ignore pre-releases while
+  `^1.2.3-alpha` will include them.
+
+## Release 1.1.1 (2016-06-30)
+
+### Changed
+
+- Issue #9: Speed up version comparison performance (thanks @sdboyer)
+- Issue #8: Added benchmarks (thanks @sdboyer)
+- Updated Go Report Card URL to new location
+- Updated Readme to add code snippet formatting (thanks @mh-cbon)
+- Updating tagging to v[SemVer] structure for compatibility with other tools.
+
+## Release 1.1.0 (2016-03-11)
+
+- Issue #2: Implemented validation to provide reasons a versions failed a
+  constraint.
+
+## Release 1.0.1 (2015-12-31)
+
+- Fixed #1: * constraint failing on valid versions.
+
+## Release 1.0.0 (2015-10-20)
+
+- Initial release
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,19 @@
+Copyright (C) 2014-2019, Matt Butcher and Matt Farina
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/Makefile 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/Makefile
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/Makefile	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/Makefile	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,30 @@
+GOPATH=$(shell go env GOPATH)
+GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
+
+.PHONY: lint
+lint: $(GOLANGCI_LINT)
+	@echo "==> Linting codebase"
+	@$(GOLANGCI_LINT) run
+
+.PHONY: test
+test:
+	@echo "==> Running tests"
+	GO111MODULE=on go test -v
+
+.PHONY: test-cover
+test-cover:
+	@echo "==> Running Tests with coverage"
+	GO111MODULE=on go test -cover .
+
+.PHONY: fuzz
+fuzz:
+	@echo "==> Running Fuzz Tests"
+	go test -fuzz=FuzzNewVersion -fuzztime=15s .
+	go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
+	go test -fuzz=FuzzNewConstraint -fuzztime=15s .
+
+$(GOLANGCI_LINT):
+	# Install golangci-lint. The configuration for it is in the .golangci.yml
+	# file in the root of the repository
+	echo ${GOPATH}
+	curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/README.md 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/README.md
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,258 @@
+# SemVer
+
+The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
+
+* Parse semantic versions
+* Sort semantic versions
+* Check if a semantic version fits within a set of constraints
+* Optionally work with a `v` prefix
+
+[![Stability:
+Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html)
+[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions)
+[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3)
+[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
+
+If you are looking for a command line tool for version comparisons please see
+[vert](https://github.com/Masterminds/vert) which uses this library.
+
+## Package Versions
+
+Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version.
+
+There are three major versions fo the `semver` package.
+
+* 3.x.x is the stable and active version. This version is focused on constraint
+  compatibility for range handling in other tools from other languages. It has
+  a similar API to the v1 releases. The development of this version is on the master
+  branch. The documentation for this version is below.
+* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
+  no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
+  There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
+* 1.x.x is the original release. It is no longer maintained. You should use the
+  v3 release instead. You can read the documentation for the 1.x.x release
+  [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
+
+## Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an error is returned if there is an issue parsing the
+version. For example,
+
+    v, err := semver.NewVersion("1.2.3-beta.1+build345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. Getting the original string is useful if the semantic version was coerced
+into a valid form.
+
+## Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+```go
+raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+vs := make([]*semver.Version, len(raw))
+for i, r := range raw {
+    v, err := semver.NewVersion(r)
+    if err != nil {
+        t.Errorf("Error parsing version: %s", err)
+    }
+
+    vs[i] = v
+}
+
+sort.Sort(semver.Collection(vs))
+```
+
+## Checking Version Constraints
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other uses `Constraints`. There are some important
+differences to notes between these two methods of comparison.
+
+1. When two versions are compared using functions such as `Compare`, `LessThan`,
+   and others it will follow the specification and always include prereleases
+   within the comparison. It will provide an answer that is valid with the
+   comparison section of the spec at https://semver.org/#spec-item-11
+2. When constraint checking is used for checks or validation it will follow a
+   different set of rules that are common for ranges with tools like npm/js
+   and Rust/Cargo. This includes considering prereleases to be invalid if the
+   ranges does not include one. If you want to have it include pre-releases a
+   simple solution is to include `-0` in your range.
+3. Constraint ranges can have some complex rules including the shorthand use of
+   ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns while PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+```go
+c, err := semver.NewConstraint(">= 1.2.3")
+if err != nil {
+    // Handle constraint not being parsable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+    // Handle version not being parsable.
+}
+// Check if the version meets the constraints. The a variable will be true.
+a := c.Check(v)
+```
+
+### Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of space or comma separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+* `=`: equal (aliased to no operator)
+* `!=`: not equal
+* `>`: greater than
+* `<`: less than
+* `>=`: greater than or equal to
+* `<=`: less than or equal to
+
+### Working With Prerelease Versions
+
+Pre-releases, for those not familiar with them, are used for software releases
+prior to stable or generally available releases. Examples of prereleases include
+development, alpha, beta, and release candidate releases. A prerelease may be
+a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
+order of precedence, prereleases come before their associated releases. In this
+example `1.2.3-beta.1 < 1.2.3`.
+
+According to the Semantic Version specification prereleases may not be
+API compliant with their release counterpart. It says,
+
+> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
+
+SemVer comparisons using constraints without a prerelease comparator will skip
+prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
+
+The reason for the `0` as a pre-release version in the example comparison is
+because pre-releases can only contain ASCII alphanumerics and hyphens (along with
+`.` separators), per the spec. Sorting happens in ASCII sort order, again per the
+spec. The lowest character is a `0` in ASCII sort order
+(see an [ASCII Table](http://www.asciitable.com/))
+
+Understanding ASCII sort ordering is important because A-Z comes before a-z. That
+means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
+sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
+the spec specifies.
+
+### Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
+* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+### Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the patch level comparison (see tilde below). For example,
+
+* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `>= 1.2.x` is equivalent to `>= 1.2.0`
+* `<= 2.x` is equivalent to `< 3`
+* `*` is equivalent to `>= 0.0.0`
+
+### Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+* `~1` is equivalent to `>= 1, < 2`
+* `~2.3` is equivalent to `>= 2.3, < 2.4`
+* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `~1.x` is equivalent to `>= 1, < 2`
+
+### Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+* `^2.3` is equivalent to `>= 2.3, < 3`
+* `^2.x` is equivalent to `>= 2.0.0, < 3`
+* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+* `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+* `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+* `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+## Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+```go
+c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+if err != nil {
+    // Handle constraint not being parseable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+    // Handle version not being parseable.
+}
+
+// Validate a version against a constraint.
+a, msgs := c.Validate(v)
+// a is false
+for _, m := range msgs {
+    fmt.Println(m)
+
+    // Loops over the errors which would read
+    // "1.3 is greater than 1.2.3"
+    // "1.3 is less than 1.4"
+}
+```
+
+## Contribute
+
+If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
+or [create a pull request](https://github.com/Masterminds/semver/pulls).
+
+## Security
+
+Security is an important consideration for this project. The project currently
+uses the following tools to help discover security issues:
+
+* [CodeQL](https://github.com/Masterminds/semver)
+* [gosec](https://github.com/securego/gosec)
+* Daily Fuzz testing
+
+If you believe you have found a security vulnerability you can privately disclose
+it through the [GitHub security page](https://github.com/Masterminds/semver/security).
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/SECURITY.md 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/SECURITY.md
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/SECURITY.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/SECURITY.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+The following versions of semver are currently supported:
+
+| Version | Supported          |
+| ------- | ------------------ |
+| 3.x     | :white_check_mark: |
+| 2.x     | :x:                |
+| 1.x     | :x:                |
+
+Fixes are only released for the latest minor version in the form of a patch release.
+
+## Reporting a Vulnerability
+
+You can privately disclose a vulnerability through GitHubs
+[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories)
+mechanism.
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/collection.go 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/collection.go
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/collection.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/collection.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,24 @@
+package semver
+
+// Collection is a collection of Version instances and implements the sort
+// interface. See the sort package for more details.
+// https://golang.org/pkg/sort/
+type Collection []*Version
+
+// Len returns the length of a collection. The number of Version instances
+// on the slice.
+func (c Collection) Len() int {
+	return len(c)
+}
+
+// Less is needed for the sort interface to compare two Version objects on the
+// slice. If checks if one is less than the other.
+func (c Collection) Less(i, j int) bool {
+	return c[i].LessThan(c[j])
+}
+
+// Swap is needed for the sort interface to replace the Version objects
+// at two different positions in the slice.
+func (c Collection) Swap(i, j int) {
+	c[i], c[j] = c[j], c[i]
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/constraints.go 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/constraints.go
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/constraints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/constraints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,594 @@
+package semver
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+// Constraints is one or more constraint that a semantic version can be
+// checked against.
+type Constraints struct {
+	constraints [][]*constraint
+}
+
+// NewConstraint returns a Constraints instance that a Version instance can
+// be checked against. If there is a parse error it will be returned.
+func NewConstraint(c string) (*Constraints, error) {
+
+	// Rewrite - ranges into a comparison operation.
+	c = rewriteRange(c)
+
+	ors := strings.Split(c, "||")
+	or := make([][]*constraint, len(ors))
+	for k, v := range ors {
+
+		// TODO: Find a way to validate and fetch all the constraints in a simpler form
+
+		// Validate the segment
+		if !validConstraintRegex.MatchString(v) {
+			return nil, fmt.Errorf("improper constraint: %s", v)
+		}
+
+		cs := findConstraintRegex.FindAllString(v, -1)
+		if cs == nil {
+			cs = append(cs, v)
+		}
+		result := make([]*constraint, len(cs))
+		for i, s := range cs {
+			pc, err := parseConstraint(s)
+			if err != nil {
+				return nil, err
+			}
+
+			result[i] = pc
+		}
+		or[k] = result
+	}
+
+	o := &Constraints{constraints: or}
+	return o, nil
+}
+
+// Check tests if a version satisfies the constraints.
+func (cs Constraints) Check(v *Version) bool {
+	// TODO(mattfarina): For v4 of this library consolidate the Check and Validate
+	// functions as the underlying functions make that possible now.
+	// loop over the ORs and check the inner ANDs
+	for _, o := range cs.constraints {
+		joy := true
+		for _, c := range o {
+			if check, _ := c.check(v); !check {
+				joy = false
+				break
+			}
+		}
+
+		if joy {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Validate checks if a version satisfies a constraint. If not a slice of
+// reasons for the failure are returned in addition to a bool.
+func (cs Constraints) Validate(v *Version) (bool, []error) {
+	// loop over the ORs and check the inner ANDs
+	var e []error
+
+	// Capture the prerelease message only once. When it happens the first time
+	// this var is marked
+	var prerelesase bool
+	for _, o := range cs.constraints {
+		joy := true
+		for _, c := range o {
+			// Before running the check handle the case there the version is
+			// a prerelease and the check is not searching for prereleases.
+			if c.con.pre == "" && v.pre != "" {
+				if !prerelesase {
+					em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+					e = append(e, em)
+					prerelesase = true
+				}
+				joy = false
+
+			} else {
+
+				if _, err := c.check(v); err != nil {
+					e = append(e, err)
+					joy = false
+				}
+			}
+		}
+
+		if joy {
+			return true, []error{}
+		}
+	}
+
+	return false, e
+}
+
+func (cs Constraints) String() string {
+	buf := make([]string, len(cs.constraints))
+	var tmp bytes.Buffer
+
+	for k, v := range cs.constraints {
+		tmp.Reset()
+		vlen := len(v)
+		for kk, c := range v {
+			tmp.WriteString(c.string())
+
+			// Space separate the AND conditions
+			if vlen > 1 && kk < vlen-1 {
+				tmp.WriteString(" ")
+			}
+		}
+		buf[k] = tmp.String()
+	}
+
+	return strings.Join(buf, " || ")
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (cs *Constraints) UnmarshalText(text []byte) error {
+	temp, err := NewConstraint(string(text))
+	if err != nil {
+		return err
+	}
+
+	*cs = *temp
+
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (cs Constraints) MarshalText() ([]byte, error) {
+	return []byte(cs.String()), nil
+}
+
+var constraintOps map[string]cfunc
+var constraintRegex *regexp.Regexp
+var constraintRangeRegex *regexp.Regexp
+
+// Used to find individual constraints within a multi-constraint string
+var findConstraintRegex *regexp.Regexp
+
+// Used to validate an segment of ANDs is valid
+var validConstraintRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+func init() {
+	constraintOps = map[string]cfunc{
+		"":   constraintTildeOrEqual,
+		"=":  constraintTildeOrEqual,
+		"!=": constraintNotEqual,
+		">":  constraintGreaterThan,
+		"<":  constraintLessThan,
+		">=": constraintGreaterThanEqual,
+		"=>": constraintGreaterThanEqual,
+		"<=": constraintLessThanEqual,
+		"=<": constraintLessThanEqual,
+		"~":  constraintTilde,
+		"~>": constraintTilde,
+		"^":  constraintCaret,
+	}
+
+	ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
+
+	constraintRegex = regexp.MustCompile(fmt.Sprintf(
+		`^\s*(%s)\s*(%s)\s*$`,
+		ops,
+		cvRegex))
+
+	constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
+		`\s*(%s)\s+-\s+(%s)\s*`,
+		cvRegex, cvRegex))
+
+	findConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+		`(%s)\s*(%s)`,
+		ops,
+		cvRegex))
+
+	// The first time a constraint shows up will look slightly different from
+	// future times it shows up due to a leading space or comma in a given
+	// string.
+	validConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+		`^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`,
+		ops,
+		cvRegex,
+		ops,
+		cvRegex))
+}
+
+// An individual constraint
+type constraint struct {
+	// The version used in the constraint check. For example, if a constraint
+	// is '<= 2.0.0' the con a version instance representing 2.0.0.
+	con *Version
+
+	// The original parsed version (e.g., 4.x from != 4.x)
+	orig string
+
+	// The original operator for the constraint
+	origfunc string
+
+	// When an x is used as part of the version (e.g., 1.x)
+	minorDirty bool
+	dirty      bool
+	patchDirty bool
+}
+
+// Check if a version meets the constraint
+func (c *constraint) check(v *Version) (bool, error) {
+	return constraintOps[c.origfunc](v, c)
+}
+
+// String prints an individual constraint into a string
+func (c *constraint) string() string {
+	return c.origfunc + c.orig
+}
+
+type cfunc func(v *Version, c *constraint) (bool, error)
+
+func parseConstraint(c string) (*constraint, error) {
+	if len(c) > 0 {
+		m := constraintRegex.FindStringSubmatch(c)
+		if m == nil {
+			return nil, fmt.Errorf("improper constraint: %s", c)
+		}
+
+		cs := &constraint{
+			orig:     m[2],
+			origfunc: m[1],
+		}
+
+		ver := m[2]
+		minorDirty := false
+		patchDirty := false
+		dirty := false
+		if isX(m[3]) || m[3] == "" {
+			ver = fmt.Sprintf("0.0.0%s", m[6])
+			dirty = true
+		} else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
+			minorDirty = true
+			dirty = true
+			ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+		} else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" {
+			dirty = true
+			patchDirty = true
+			ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+		}
+
+		con, err := NewVersion(ver)
+		if err != nil {
+
+			// The constraintRegex should catch any regex parsing errors. So,
+			// we should never get here.
+			return nil, errors.New("constraint Parser Error")
+		}
+
+		cs.con = con
+		cs.minorDirty = minorDirty
+		cs.patchDirty = patchDirty
+		cs.dirty = dirty
+
+		return cs, nil
+	}
+
+	// The rest is the special case where an empty string was passed in which
+	// is equivalent to * or >=0.0.0
+	con, err := StrictNewVersion("0.0.0")
+	if err != nil {
+
+		// The constraintRegex should catch any regex parsing errors. So,
+		// we should never get here.
+		return nil, errors.New("constraint Parser Error")
+	}
+
+	cs := &constraint{
+		con:        con,
+		orig:       c,
+		origfunc:   "",
+		minorDirty: false,
+		patchDirty: false,
+		dirty:      true,
+	}
+	return cs, nil
+}
+
+// Constraint functions
+func constraintNotEqual(v *Version, c *constraint) (bool, error) {
+	if c.dirty {
+
+		// If there is a pre-release on the version but the constraint isn't looking
+		// for them assume that pre-releases are not compatible. See issue 21 for
+		// more details.
+		if v.Prerelease() != "" && c.con.Prerelease() == "" {
+			return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+		}
+
+		if c.con.Major() != v.Major() {
+			return true, nil
+		}
+		if c.con.Minor() != v.Minor() && !c.minorDirty {
+			return true, nil
+		} else if c.minorDirty {
+			return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+		} else if c.con.Patch() != v.Patch() && !c.patchDirty {
+			return true, nil
+		} else if c.patchDirty {
+			// Need to handle prereleases if present
+			if v.Prerelease() != "" || c.con.Prerelease() != "" {
+				eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0
+				if eq {
+					return true, nil
+				}
+				return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+			}
+			return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+		}
+	}
+
+	eq := v.Equal(c.con)
+	if eq {
+		return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+	}
+
+	return true, nil
+}
+
+func constraintGreaterThan(v *Version, c *constraint) (bool, error) {
+
+	// If there is a pre-release on the version but the constraint isn't looking
+	// for them assume that pre-releases are not compatible. See issue 21 for
+	// more details.
+	if v.Prerelease() != "" && c.con.Prerelease() == "" {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	var eq bool
+
+	if !c.dirty {
+		eq = v.Compare(c.con) == 1
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+	}
+
+	if v.Major() > c.con.Major() {
+		return true, nil
+	} else if v.Major() < c.con.Major() {
+		return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+	} else if c.minorDirty {
+		// This is a range case such as >11. When the version is something like
+		// 11.1.0 is it not > 11. For that we would need 12 or higher
+		return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+	} else if c.patchDirty {
+		// This is for ranges such as >11.1. A version of 11.1.1 is not greater
+		// which one of 11.2.1 is greater
+		eq = v.Minor() > c.con.Minor()
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+	}
+
+	// If we have gotten here we are not comparing pre-preleases and can use the
+	// Compare function to accomplish that.
+	eq = v.Compare(c.con) == 1
+	if eq {
+		return true, nil
+	}
+	return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+}
+
+func constraintLessThan(v *Version, c *constraint) (bool, error) {
+	// If there is a pre-release on the version but the constraint isn't looking
+	// for them assume that pre-releases are not compatible. See issue 21 for
+	// more details.
+	if v.Prerelease() != "" && c.con.Prerelease() == "" {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	eq := v.Compare(c.con) < 0
+	if eq {
+		return true, nil
+	}
+	return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig)
+}
+
+func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) {
+
+	// If there is a pre-release on the version but the constraint isn't looking
+	// for them assume that pre-releases are not compatible. See issue 21 for
+	// more details.
+	if v.Prerelease() != "" && c.con.Prerelease() == "" {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	eq := v.Compare(c.con) >= 0
+	if eq {
+		return true, nil
+	}
+	return false, fmt.Errorf("%s is less than %s", v, c.orig)
+}
+
+func constraintLessThanEqual(v *Version, c *constraint) (bool, error) {
+	// If there is a pre-release on the version but the constraint isn't looking
+	// for them assume that pre-releases are not compatible. See issue 21 for
+	// more details.
+	if v.Prerelease() != "" && c.con.Prerelease() == "" {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	var eq bool
+
+	if !c.dirty {
+		eq = v.Compare(c.con) <= 0
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+	}
+
+	if v.Major() > c.con.Major() {
+		return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+	} else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty {
+		return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+	}
+
+	return true, nil
+}
+
+// ~*, ~>* --> >= 0.0.0 (any)
+// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
+// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
+// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
+// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
+// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
+func constraintTilde(v *Version, c *constraint) (bool, error) {
+	// If there is a pre-release on the version but the constraint isn't looking
+	// for them assume that pre-releases are not compatible. See issue 21 for
+	// more details.
+	if v.Prerelease() != "" && c.con.Prerelease() == "" {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	if v.LessThan(c.con) {
+		return false, fmt.Errorf("%s is less than %s", v, c.orig)
+	}
+
+	// ~0.0.0 is a special case where all constraints are accepted. It's
+	// equivalent to >= 0.0.0.
+	if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
+		!c.minorDirty && !c.patchDirty {
+		return true, nil
+	}
+
+	if v.Major() != c.con.Major() {
+		return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+	}
+
+	if v.Minor() != c.con.Minor() && !c.minorDirty {
+		return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig)
+	}
+
+	return true, nil
+}
+
+// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
+// it's a straight =
+func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) {
+	// If there is a pre-release on the version but the constraint isn't looking
+	// for them assume that pre-releases are not compatible. See issue 21 for
+	// more details.
+	if v.Prerelease() != "" && c.con.Prerelease() == "" {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	if c.dirty {
+		return constraintTilde(v, c)
+	}
+
+	eq := v.Equal(c.con)
+	if eq {
+		return true, nil
+	}
+
+	return false, fmt.Errorf("%s is not equal to %s", v, c.orig)
+}
+
+// ^*      -->  (any)
+// ^1.2.3  -->  >=1.2.3 <2.0.0
+// ^1.2    -->  >=1.2.0 <2.0.0
+// ^1      -->  >=1.0.0 <2.0.0
+// ^0.2.3  -->  >=0.2.3 <0.3.0
+// ^0.2    -->  >=0.2.0 <0.3.0
+// ^0.0.3  -->  >=0.0.3 <0.0.4
+// ^0.0    -->  >=0.0.0 <0.1.0
+// ^0      -->  >=0.0.0 <1.0.0
+func constraintCaret(v *Version, c *constraint) (bool, error) {
+	// If there is a pre-release on the version but the constraint isn't looking
+	// for them assume that pre-releases are not compatible. See issue 21 for
+	// more details.
+	if v.Prerelease() != "" && c.con.Prerelease() == "" {
+		return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+	}
+
+	// This less than handles prereleases
+	if v.LessThan(c.con) {
+		return false, fmt.Errorf("%s is less than %s", v, c.orig)
+	}
+
+	var eq bool
+
+	// ^ when the major > 0 is >=x.y.z < x+1
+	if c.con.Major() > 0 || c.minorDirty {
+
+		// ^ has to be within a major range for > 0. Everything less than was
+		// filtered out with the LessThan call above. This filters out those
+		// that greater but not within the same major range.
+		eq = v.Major() == c.con.Major()
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+	}
+
+	// ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1
+	if c.con.Major() == 0 && v.Major() > 0 {
+		return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+	}
+	// If the con Minor is > 0 it is not dirty
+	if c.con.Minor() > 0 || c.patchDirty {
+		eq = v.Minor() == c.con.Minor()
+		if eq {
+			return true, nil
+		}
+		return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig)
+	}
+	// ^ when the minor is 0 and minor > 0 is =0.0.z
+	if c.con.Minor() == 0 && v.Minor() > 0 {
+		return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig)
+	}
+
+	// At this point the major is 0 and the minor is 0 and not dirty. The patch
+	// is not dirty so we need to check if they are equal. If they are not equal
+	eq = c.con.Patch() == v.Patch()
+	if eq {
+		return true, nil
+	}
+	return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig)
+}
+
+func isX(x string) bool {
+	switch x {
+	case "x", "*", "X":
+		return true
+	default:
+		return false
+	}
+}
+
+func rewriteRange(i string) string {
+	m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+	if m == nil {
+		return i
+	}
+	o := i
+	for _, v := range m {
+		t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11])
+		o = strings.Replace(o, v[0], t, 1)
+	}
+
+	return o
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/doc.go 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/doc.go
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,184 @@
+/*
+Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
+
+Specifically it provides the ability to:
+
+  - Parse semantic versions
+  - Sort semantic versions
+  - Check if a semantic version fits within a set of constraints
+  - Optionally work with a `v` prefix
+
+# Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an optional error can be returned if there is an issue
+parsing the version. For example,
+
+	v, err := semver.NewVersion("1.2.3-beta.1+b345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. For more details please see the documentation
+at https://godoc.org/github.com/Masterminds/semver.
+
+# Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+	    raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+	    vs := make([]*semver.Version, len(raw))
+		for i, r := range raw {
+			v, err := semver.NewVersion(r)
+			if err != nil {
+				t.Errorf("Error parsing version: %s", err)
+			}
+
+			vs[i] = v
+		}
+
+		sort.Sort(semver.Collection(vs))
+
+# Checking Version Constraints and Comparing Versions
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other is using Constraints. There are some important
+differences to notes between these two methods of comparison.
+
+ 1. When two versions are compared using functions such as `Compare`, `LessThan`,
+    and others it will follow the specification and always include prereleases
+    within the comparison. It will provide an answer valid with the comparison
+    spec section at https://semver.org/#spec-item-11
+ 2. When constraint checking is used for checks or validation it will follow a
+    different set of rules that are common for ranges with tools like npm/js
+    and Rust/Cargo. This includes considering prereleases to be invalid if the
+    ranges does not include on. If you want to have it include pre-releases a
+    simple solution is to include `-0` in your range.
+ 3. Constraint ranges can have some complex rules including the shorthard use of
+    ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns which PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+	c, err := semver.NewConstraint(">= 1.2.3")
+	if err != nil {
+	    // Handle constraint not being parsable.
+	}
+
+	v, err := semver.NewVersion("1.3")
+	if err != nil {
+	    // Handle version not being parsable.
+	}
+	// Check if the version meets the constraints. The a variable will be true.
+	a := c.Check(v)
+
+# Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma or space separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3. This can also be written as
+`">= 1.2, < 3.0.0 || >= 4.2.3"`
+
+The basic comparisons are:
+
+  - `=`: equal (aliased to no operator)
+  - `!=`: not equal
+  - `>`: greater than
+  - `<`: less than
+  - `>=`: greater than or equal to
+  - `<=`: less than or equal to
+
+# Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+  - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+  - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+# Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the tilde operation. For example,
+
+  - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+  - `>= 1.2.x` is equivalent to `>= 1.2.0`
+  - `<= 2.x` is equivalent to `<= 3`
+  - `*` is equivalent to `>= 0.0.0`
+
+Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+  - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0`
+  - `~1` is equivalent to `>= 1, < 2`
+  - `~2.3` is equivalent to `>= 2.3 < 2.4`
+  - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+  - `~1.x` is equivalent to `>= 1 < 2`
+
+Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+  - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+  - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+  - `^2.3` is equivalent to `>= 2.3, < 3`
+  - `^2.x` is equivalent to `>= 2.0.0, < 3`
+  - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+  - `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+  - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+  - `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+  - `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+# Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+	c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+	if err != nil {
+	    // Handle constraint not being parseable.
+	}
+
+	v, _ := semver.NewVersion("1.3")
+	if err != nil {
+	    // Handle version not being parseable.
+	}
+
+	// Validate a version against a constraint.
+	a, msgs := c.Validate(v)
+	// a is false
+	for _, m := range msgs {
+	    fmt.Println(m)
+
+	    // Loops over the errors which would read
+	    // "1.3 is greater than 1.2.3"
+	    // "1.3 is less than 1.4"
+	}
+*/
+package semver
diff -pruN 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/version.go 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/version.go
--- 0.19.3+ds1-4/vendor/github.com/Masterminds/semver/v3/version.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Masterminds/semver/v3/version.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,639 @@
+package semver
+
+import (
+	"bytes"
+	"database/sql/driver"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// The compiled version of the regex created at init() is cached here so it
+// only needs to be created once.
+var versionRegex *regexp.Regexp
+
+var (
+	// ErrInvalidSemVer is returned a version is found to be invalid when
+	// being parsed.
+	ErrInvalidSemVer = errors.New("Invalid Semantic Version")
+
+	// ErrEmptyString is returned when an empty string is passed in for parsing.
+	ErrEmptyString = errors.New("Version string empty")
+
+	// ErrInvalidCharacters is returned when invalid characters are found as
+	// part of a version
+	ErrInvalidCharacters = errors.New("Invalid characters in version")
+
+	// ErrSegmentStartsZero is returned when a version segment starts with 0.
+	// This is invalid in SemVer.
+	ErrSegmentStartsZero = errors.New("Version segment starts with 0")
+
+	// ErrInvalidMetadata is returned when the metadata is an invalid format
+	ErrInvalidMetadata = errors.New("Invalid Metadata string")
+
+	// ErrInvalidPrerelease is returned when the pre-release is an invalid format
+	ErrInvalidPrerelease = errors.New("Invalid Prerelease string")
+)
+
+// semVerRegex is the regular expression used to parse a semantic version.
+const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
+	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+// Version represents a single semantic version.
+type Version struct {
+	major, minor, patch uint64
+	pre                 string
+	metadata            string
+	original            string
+}
+
+func init() {
+	versionRegex = regexp.MustCompile("^" + semVerRegex + "$")
+}
+
+const (
+	num     string = "0123456789"
+	allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num
+)
+
+// StrictNewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. Only parses valid semantic versions.
+// Performs checking that can find errors within the version.
+// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x
+// releases of semver did, use the NewVersion() function.
+func StrictNewVersion(v string) (*Version, error) {
+	// Parsing here does not use RegEx in order to increase performance and reduce
+	// allocations.
+
+	if len(v) == 0 {
+		return nil, ErrEmptyString
+	}
+
+	// Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build
+	parts := strings.SplitN(v, ".", 3)
+	if len(parts) != 3 {
+		return nil, ErrInvalidSemVer
+	}
+
+	sv := &Version{
+		original: v,
+	}
+
+	// check for prerelease or build metadata
+	var extra []string
+	if strings.ContainsAny(parts[2], "-+") {
+		// Start with the build metadata first as it needs to be on the right
+		extra = strings.SplitN(parts[2], "+", 2)
+		if len(extra) > 1 {
+			// build metadata found
+			sv.metadata = extra[1]
+			parts[2] = extra[0]
+		}
+
+		extra = strings.SplitN(parts[2], "-", 2)
+		if len(extra) > 1 {
+			// prerelease found
+			sv.pre = extra[1]
+			parts[2] = extra[0]
+		}
+	}
+
+	// Validate the number segments are valid. This includes only having positive
+	// numbers and no leading 0's.
+	for _, p := range parts {
+		if !containsOnly(p, num) {
+			return nil, ErrInvalidCharacters
+		}
+
+		if len(p) > 1 && p[0] == '0' {
+			return nil, ErrSegmentStartsZero
+		}
+	}
+
+	// Extract the major, minor, and patch elements onto the returned Version
+	var err error
+	sv.major, err = strconv.ParseUint(parts[0], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	sv.minor, err = strconv.ParseUint(parts[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	sv.patch, err = strconv.ParseUint(parts[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	// No prerelease or build metadata found so returning now as a fastpath.
+	if sv.pre == "" && sv.metadata == "" {
+		return sv, nil
+	}
+
+	if sv.pre != "" {
+		if err = validatePrerelease(sv.pre); err != nil {
+			return nil, err
+		}
+	}
+
+	if sv.metadata != "" {
+		if err = validateMetadata(sv.metadata); err != nil {
+			return nil, err
+		}
+	}
+
+	return sv, nil
+}
+
+// NewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. If the version is SemVer-ish it
+// attempts to convert it to SemVer. If you want  to validate it was a strict
+// semantic version at parse time see StrictNewVersion().
+func NewVersion(v string) (*Version, error) {
+	m := versionRegex.FindStringSubmatch(v)
+	if m == nil {
+		return nil, ErrInvalidSemVer
+	}
+
+	sv := &Version{
+		metadata: m[8],
+		pre:      m[5],
+		original: v,
+	}
+
+	var err error
+	sv.major, err = strconv.ParseUint(m[1], 10, 64)
+	if err != nil {
+		return nil, fmt.Errorf("Error parsing version segment: %s", err)
+	}
+
+	if m[2] != "" {
+		sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("Error parsing version segment: %s", err)
+		}
+	} else {
+		sv.minor = 0
+	}
+
+	if m[3] != "" {
+		sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("Error parsing version segment: %s", err)
+		}
+	} else {
+		sv.patch = 0
+	}
+
+	// Perform some basic due diligence on the extra parts to ensure they are
+	// valid.
+
+	if sv.pre != "" {
+		if err = validatePrerelease(sv.pre); err != nil {
+			return nil, err
+		}
+	}
+
+	if sv.metadata != "" {
+		if err = validateMetadata(sv.metadata); err != nil {
+			return nil, err
+		}
+	}
+
+	return sv, nil
+}
+
+// New creates a new instance of Version with each of the parts passed in as
+// arguments instead of parsing a version string.
+func New(major, minor, patch uint64, pre, metadata string) *Version {
+	v := Version{
+		major:    major,
+		minor:    minor,
+		patch:    patch,
+		pre:      pre,
+		metadata: metadata,
+		original: "",
+	}
+
+	v.original = v.String()
+
+	return &v
+}
+
+// MustParse parses a given version and panics on error.
+func MustParse(v string) *Version {
+	sv, err := NewVersion(v)
+	if err != nil {
+		panic(err)
+	}
+	return sv
+}
+
+// String converts a Version object to a string.
+// Note, if the original version contained a leading v this version will not.
+// See the Original() method to retrieve the original value. Semantic Versions
+// don't contain a leading v per the spec. Instead it's optional on
+// implementation.
+func (v Version) String() string {
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
+	if v.pre != "" {
+		fmt.Fprintf(&buf, "-%s", v.pre)
+	}
+	if v.metadata != "" {
+		fmt.Fprintf(&buf, "+%s", v.metadata)
+	}
+
+	return buf.String()
+}
+
+// Original returns the original value passed in to be parsed.
+func (v *Version) Original() string {
+	return v.original
+}
+
+// Major returns the major version.
+func (v Version) Major() uint64 {
+	return v.major
+}
+
+// Minor returns the minor version.
+func (v Version) Minor() uint64 {
+	return v.minor
+}
+
+// Patch returns the patch version.
+func (v Version) Patch() uint64 {
+	return v.patch
+}
+
+// Prerelease returns the pre-release version.
+func (v Version) Prerelease() string {
+	return v.pre
+}
+
+// Metadata returns the metadata on the version.
+func (v Version) Metadata() string {
+	return v.metadata
+}
+
+// originalVPrefix returns the original 'v' prefix if any.
+func (v Version) originalVPrefix() string {
+	// Note, only lowercase v is supported as a prefix by the parser.
+	if v.original != "" && v.original[:1] == "v" {
+		return v.original[:1]
+	}
+	return ""
+}
+
+// IncPatch produces the next patch version.
+// If the current version does not have prerelease/metadata information,
+// it unsets metadata and prerelease values, increments patch number.
+// If the current version has any of prerelease or metadata information,
+// it unsets both values and keeps current patch value
+func (v Version) IncPatch() Version {
+	vNext := v
+	// according to http://semver.org/#spec-item-9
+	// Pre-release versions have a lower precedence than the associated normal version.
+	// according to http://semver.org/#spec-item-10
+	// Build metadata SHOULD be ignored when determining version precedence.
+	if v.pre != "" {
+		vNext.metadata = ""
+		vNext.pre = ""
+	} else {
+		vNext.metadata = ""
+		vNext.pre = ""
+		vNext.patch = v.patch + 1
+	}
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext
+}
+
+// IncMinor produces the next minor version.
+// Sets patch to 0.
+// Increments minor number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMinor() Version {
+	vNext := v
+	vNext.metadata = ""
+	vNext.pre = ""
+	vNext.patch = 0
+	vNext.minor = v.minor + 1
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext
+}
+
+// IncMajor produces the next major version.
+// Sets patch to 0.
+// Sets minor to 0.
+// Increments major number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMajor() Version {
+	vNext := v
+	vNext.metadata = ""
+	vNext.pre = ""
+	vNext.patch = 0
+	vNext.minor = 0
+	vNext.major = v.major + 1
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext
+}
+
+// SetPrerelease defines the prerelease value.
+// Value must not include the required 'hyphen' prefix.
+func (v Version) SetPrerelease(prerelease string) (Version, error) {
+	vNext := v
+	if len(prerelease) > 0 {
+		if err := validatePrerelease(prerelease); err != nil {
+			return vNext, err
+		}
+	}
+	vNext.pre = prerelease
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext, nil
+}
+
+// SetMetadata defines metadata value.
+// Value must not include the required 'plus' prefix.
+func (v Version) SetMetadata(metadata string) (Version, error) {
+	vNext := v
+	if len(metadata) > 0 {
+		if err := validateMetadata(metadata); err != nil {
+			return vNext, err
+		}
+	}
+	vNext.metadata = metadata
+	vNext.original = v.originalVPrefix() + "" + vNext.String()
+	return vNext, nil
+}
+
+// LessThan tests if one version is less than another one.
+func (v *Version) LessThan(o *Version) bool {
+	return v.Compare(o) < 0
+}
+
+// GreaterThan tests if one version is greater than another one.
+func (v *Version) GreaterThan(o *Version) bool {
+	return v.Compare(o) > 0
+}
+
+// Equal tests if two versions are equal to each other.
+// Note, versions can be equal with different metadata since metadata
+// is not considered part of the comparable version.
+func (v *Version) Equal(o *Version) bool {
+	return v.Compare(o) == 0
+}
+
+// Compare compares this version to another one. It returns -1, 0, or 1 if
+// the version smaller, equal, or larger than the other version.
+//
+// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
+// lower than the version without a prerelease. Compare always takes into account
+// prereleases. If you want to work with ranges using typical range syntaxes that
+// skip prereleases if the range is not looking for them use constraints.
+func (v *Version) Compare(o *Version) int {
+	// Compare the major, minor, and patch version for differences. If a
+	// difference is found return the comparison.
+	if d := compareSegment(v.Major(), o.Major()); d != 0 {
+		return d
+	}
+	if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
+		return d
+	}
+	if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
+		return d
+	}
+
+	// At this point the major, minor, and patch versions are the same.
+	ps := v.pre
+	po := o.Prerelease()
+
+	if ps == "" && po == "" {
+		return 0
+	}
+	if ps == "" {
+		return 1
+	}
+	if po == "" {
+		return -1
+	}
+
+	return comparePrerelease(ps, po)
+}
+
+// UnmarshalJSON implements JSON.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	temp, err := NewVersion(s)
+	if err != nil {
+		return err
+	}
+	v.major = temp.major
+	v.minor = temp.minor
+	v.patch = temp.patch
+	v.pre = temp.pre
+	v.metadata = temp.metadata
+	v.original = temp.original
+	return nil
+}
+
+// MarshalJSON implements JSON.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (v *Version) UnmarshalText(text []byte) error {
+	temp, err := NewVersion(string(text))
+	if err != nil {
+		return err
+	}
+
+	*v = *temp
+
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (v Version) MarshalText() ([]byte, error) {
+	return []byte(v.String()), nil
+}
+
+// Scan implements the SQL.Scanner interface.
+func (v *Version) Scan(value interface{}) error {
+	var s string
+	s, _ = value.(string)
+	temp, err := NewVersion(s)
+	if err != nil {
+		return err
+	}
+	v.major = temp.major
+	v.minor = temp.minor
+	v.patch = temp.patch
+	v.pre = temp.pre
+	v.metadata = temp.metadata
+	v.original = temp.original
+	return nil
+}
+
+// Value implements the Driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+	return v.String(), nil
+}
+
+func compareSegment(v, o uint64) int {
+	if v < o {
+		return -1
+	}
+	if v > o {
+		return 1
+	}
+
+	return 0
+}
+
+func comparePrerelease(v, o string) int {
+	// split the prelease versions by their part. The separator, per the spec,
+	// is a .
+	sparts := strings.Split(v, ".")
+	oparts := strings.Split(o, ".")
+
+	// Find the longer length of the parts to know how many loop iterations to
+	// go through.
+	slen := len(sparts)
+	olen := len(oparts)
+
+	l := slen
+	if olen > slen {
+		l = olen
+	}
+
+	// Iterate over each part of the prereleases to compare the differences.
+	for i := 0; i < l; i++ {
+		// Since the lentgh of the parts can be different we need to create
+		// a placeholder. This is to avoid out of bounds issues.
+		stemp := ""
+		if i < slen {
+			stemp = sparts[i]
+		}
+
+		otemp := ""
+		if i < olen {
+			otemp = oparts[i]
+		}
+
+		d := comparePrePart(stemp, otemp)
+		if d != 0 {
+			return d
+		}
+	}
+
+	// Reaching here means two versions are of equal value but have different
+	// metadata (the part following a +). They are not identical in string form
+	// but the version comparison finds them to be equal.
+	return 0
+}
+
+func comparePrePart(s, o string) int {
+	// Fastpath if they are equal
+	if s == o {
+		return 0
+	}
+
+	// When s or o are empty we can use the other in an attempt to determine
+	// the response.
+	if s == "" {
+		if o != "" {
+			return -1
+		}
+		return 1
+	}
+
+	if o == "" {
+		if s != "" {
+			return 1
+		}
+		return -1
+	}
+
+	// When comparing strings "99" is greater than "103". To handle
+	// cases like this we need to detect numbers and compare them. According
+	// to the semver spec, numbers are always positive. If there is a - at the
+	// start like -99 this is to be evaluated as an alphanum. numbers always
+	// have precedence over alphanum. Parsing as Uints because negative numbers
+	// are ignored.
+
+	oi, n1 := strconv.ParseUint(o, 10, 64)
+	si, n2 := strconv.ParseUint(s, 10, 64)
+
+	// The case where both are strings compare the strings
+	if n1 != nil && n2 != nil {
+		if s > o {
+			return 1
+		}
+		return -1
+	} else if n1 != nil {
+		// o is a string and s is a number
+		return -1
+	} else if n2 != nil {
+		// s is a string and o is a number
+		return 1
+	}
+	// Both are numbers
+	if si > oi {
+		return 1
+	}
+	return -1
+}
+
+// Like strings.ContainsAny but does an only instead of any.
+func containsOnly(s string, comp string) bool {
+	return strings.IndexFunc(s, func(r rune) bool {
+		return !strings.ContainsRune(comp, r)
+	}) == -1
+}
+
+// From the spec, "Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty.
+// Numeric identifiers MUST NOT include leading zeroes.". These segments can
+// be dot separated.
+func validatePrerelease(p string) error {
+	eparts := strings.Split(p, ".")
+	for _, p := range eparts {
+		if containsOnly(p, num) {
+			if len(p) > 1 && p[0] == '0' {
+				return ErrSegmentStartsZero
+			}
+		} else if !containsOnly(p, allowed) {
+			return ErrInvalidPrerelease
+		}
+	}
+
+	return nil
+}
+
+// From the spec, "Build metadata MAY be denoted by
+// appending a plus sign and a series of dot separated identifiers immediately
+// following the patch or pre-release version. Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty."
+func validateMetadata(m string) error {
+	eparts := strings.Split(m, ".")
+	for _, p := range eparts {
+		if !containsOnly(p, allowed) {
+			return ErrInvalidMetadata
+		}
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/.gitattributes 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/.gitattributes
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/.gitattributes	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/.gitattributes	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1 @@
+* text=auto eol=lf
\ No newline at end of file
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/.gitignore 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/.gitignore
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/.gitignore	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/.gitignore	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,10 @@
+.vscode/
+
+*.exe
+
+# testing
+testdata
+
+# go workspaces
+go.work
+go.work.sum
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/.golangci.yml 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/.golangci.yml
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/.golangci.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/.golangci.yml	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,147 @@
+linters:
+  enable:
+    # style
+    - containedctx # struct contains a context
+    - dupl # duplicate code
+    - errname # erorrs are named correctly
+    - nolintlint # "//nolint" directives are properly explained
+    - revive # golint replacement
+    - unconvert # unnecessary conversions
+    - wastedassign
+
+    # bugs, performance, unused, etc ...
+    - contextcheck # function uses a non-inherited context
+    - errorlint # errors not wrapped for 1.13
+    - exhaustive # check exhaustiveness of enum switch statements
+    - gofmt # files are gofmt'ed
+    - gosec # security
+    - nilerr # returns nil even with non-nil error
+    - thelper #  test helpers without t.Helper()
+    - unparam # unused function params
+
+issues:
+  exclude-dirs:
+    - pkg/etw/sample
+
+  exclude-rules:
+    # err is very often shadowed in nested scopes
+    - linters:
+        - govet
+      text: '^shadow: declaration of "err" shadows declaration'
+
+    # ignore long lines for skip autogen directives
+    - linters:
+        - revive
+      text: "^line-length-limit: "
+      source: "^//(go:generate|sys) "
+
+    #TODO: remove after upgrading to go1.18
+    # ignore comment spacing for nolint and sys directives
+    - linters:
+        - revive
+      text: "^comment-spacings: no space between comment delimiter and comment text"
+      source: "//(cspell:|nolint:|sys |todo)"
+
+    # not on go 1.18 yet, so no any
+    - linters:
+        - revive
+      text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
+
+    # allow unjustified ignores of error checks in defer statements
+    - linters:
+        - nolintlint
+      text: "^directive `//nolint:errcheck` should provide explanation"
+      source: '^\s*defer '
+
+    # allow unjustified ignores of error lints for io.EOF
+    - linters:
+        - nolintlint
+      text: "^directive `//nolint:errorlint` should provide explanation"
+      source: '[=|!]= io.EOF'
+
+
+linters-settings:
+  exhaustive:
+    default-signifies-exhaustive: true
+  govet:
+    enable-all: true
+    disable:
+      # struct order is often for Win32 compat
+      # also, ignore pointer bytes/GC issues for now until performance becomes an issue
+      - fieldalignment
+  nolintlint:
+    require-explanation: true
+    require-specific: true
+  revive:
+    # revive is more configurable than static check, so likely the preferred alternative to static-check
+    # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
+    enable-all-rules:
+      true
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
+    rules:
+      # rules with required arguments
+      - name: argument-limit
+        disabled: true
+      - name: banned-characters
+        disabled: true
+      - name: cognitive-complexity
+        disabled: true
+      - name: cyclomatic
+        disabled: true
+      - name: file-header
+        disabled: true
+      - name: function-length
+        disabled: true
+      - name: function-result-limit
+        disabled: true
+      - name: max-public-structs
+        disabled: true
+      # geneally annoying rules
+      - name: add-constant # complains about any and all strings and integers
+        disabled: true
+      - name: confusing-naming # we frequently use "Foo()" and "foo()" together
+        disabled: true
+      - name: flag-parameter # excessive, and a common idiom we use
+        disabled: true
+      - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead
+        disabled: true
+      # general config
+      - name: line-length-limit
+        arguments:
+          - 140
+      - name: var-naming
+        arguments:
+          - []
+          - - CID
+            - CRI
+            - CTRD
+            - DACL
+            - DLL
+            - DOS
+            - ETW
+            - FSCTL
+            - GCS
+            - GMSA
+            - HCS
+            - HV
+            - IO
+            - LCOW
+            - LDAP
+            - LPAC
+            - LTSC
+            - MMIO
+            - NT
+            - OCI
+            - PMEM
+            - PWSH
+            - RX
+            - SACl
+            - SID
+            - SMB
+            - TX
+            - VHD
+            - VHDX
+            - VMID
+            - VPCI
+            - WCOW
+            - WIM
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/CODEOWNERS 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/CODEOWNERS
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/CODEOWNERS	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/CODEOWNERS	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1 @@
+  * @microsoft/containerplat
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/LICENSE 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/README.md 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/README.md
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,89 @@
+# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
+
+This repository contains utilities for efficiently performing Win32 IO operations in
+Go. Currently, this is focused on accessing named pipes and other file handles, and
+for using named pipes as a net transport.
+
+This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
+to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
+newer operating systems. This is similar to the implementation of network sockets in Go's net
+package.
+
+Please see the LICENSE file for licensing information.
+
+## Contributing
+
+This project welcomes contributions and suggestions.
+Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
+you have the right to, and actually do, grant us the rights to use your contribution.
+For details, visit [Microsoft CLA](https://cla.microsoft.com).
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to
+provide a CLA and decorate the PR appropriately (e.g., label, comment).
+Simply follow the instructions provided by the bot.
+You will only need to do this once across all repos using our CLA.
+
+Additionally, the pull request pipeline requires the following steps to be performed before
+mergining.
+
+### Code Sign-Off
+
+We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
+to certify they either authored the work themselves or otherwise have permission to use it in this project.
+
+A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
+
+Please see [the developer certificate](https://developercertificate.org) for more info,
+as well as to make sure that you can attest to the rules listed.
+Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
+
+### Linting
+
+Code must pass a linting stage, which uses [`golangci-lint`][lint].
+The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
+automatically with VSCode by adding the following to your workspace or folder settings:
+
+```json
+    "go.lintTool": "golangci-lint",
+    "go.lintOnSave": "package",
+```
+
+Additional editor [integrations options are also available][lint-ide].
+
+Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
+
+```shell
+# use . or specify a path to only lint a package
+# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
+> golangci-lint run ./...
+```
+
+### Go Generate
+
+The pipeline checks that auto-generated code, via `go generate`, are up to date.
+
+This can be done for the entire repo:
+
+```shell
+> go generate ./...
+```
+
+## Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+
+## Special Thanks
+
+Thanks to [natefinch][natefinch] for the inspiration for this library.
+See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
+
+[lint]: https://golangci-lint.run/
+[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
+[lint-install]: https://golangci-lint.run/usage/install/#local-installation
+
+[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
+[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
+
+[natefinch]: https://github.com/natefinch
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/SECURITY.md 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/SECURITY.md
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/SECURITY.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/SECURITY.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,41 @@
+<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com).  If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+  * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+  * Full paths of source file(s) related to the manifestation of the issue
+  * The location of the affected source code (tag/branch/commit or direct URL)
+  * Any special configuration required to reproduce the issue
+  * Step-by-step instructions to reproduce the issue
+  * Proof-of-concept or exploit code (if possible)
+  * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+<!-- END MICROSOFT SECURITY.MD BLOCK -->
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/backup.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/backup.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/backup.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/backup.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,287 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"runtime"
+	"unicode/utf16"
+
+	"github.com/Microsoft/go-winio/internal/fs"
+	"golang.org/x/sys/windows"
+)
+
+//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
+//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
+
+const (
+	BackupData = uint32(iota + 1)
+	BackupEaData
+	BackupSecurity
+	BackupAlternateData
+	BackupLink
+	BackupPropertyData
+	BackupObjectId //revive:disable-line:var-naming ID, not Id
+	BackupReparseData
+	BackupSparseBlock
+	BackupTxfsData
+)
+
+const (
+	StreamSparseAttributes = uint32(8)
+)
+
+//nolint:revive // var-naming: ALL_CAPS
+const (
+	WRITE_DAC              = windows.WRITE_DAC
+	WRITE_OWNER            = windows.WRITE_OWNER
+	ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
+)
+
+// BackupHeader represents a backup stream of a file.
+type BackupHeader struct {
+	//revive:disable-next-line:var-naming ID, not Id
+	Id         uint32 // The backup stream ID
+	Attributes uint32 // Stream attributes
+	Size       int64  // The size of the stream in bytes
+	Name       string // The name of the stream (for BackupAlternateData only).
+	Offset     int64  // The offset of the stream in the file (for BackupSparseBlock only).
+}
+
+type win32StreamID struct {
+	StreamID   uint32
+	Attributes uint32
+	Size       uint64
+	NameSize   uint32
+}
+
+// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
+// of BackupHeader values.
+type BackupStreamReader struct {
+	r         io.Reader
+	bytesLeft int64
+}
+
+// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
+func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
+	return &BackupStreamReader{r, 0}
+}
+
+// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
+// it was not completely read.
+func (r *BackupStreamReader) Next() (*BackupHeader, error) {
+	if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
+		if s, ok := r.r.(io.Seeker); ok {
+			// Make sure Seek on io.SeekCurrent sometimes succeeds
+			// before trying the actual seek.
+			if _, err := s.Seek(0, io.SeekCurrent); err == nil {
+				if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
+					return nil, err
+				}
+				r.bytesLeft = 0
+			}
+		}
+		if _, err := io.Copy(io.Discard, r); err != nil {
+			return nil, err
+		}
+	}
+	var wsi win32StreamID
+	if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
+		return nil, err
+	}
+	hdr := &BackupHeader{
+		Id:         wsi.StreamID,
+		Attributes: wsi.Attributes,
+		Size:       int64(wsi.Size),
+	}
+	if wsi.NameSize != 0 {
+		name := make([]uint16, int(wsi.NameSize/2))
+		if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
+			return nil, err
+		}
+		hdr.Name = windows.UTF16ToString(name)
+	}
+	if wsi.StreamID == BackupSparseBlock {
+		if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
+			return nil, err
+		}
+		hdr.Size -= 8
+	}
+	r.bytesLeft = hdr.Size
+	return hdr, nil
+}
+
+// Read reads from the current backup stream.
+func (r *BackupStreamReader) Read(b []byte) (int, error) {
+	if r.bytesLeft == 0 {
+		return 0, io.EOF
+	}
+	if int64(len(b)) > r.bytesLeft {
+		b = b[:r.bytesLeft]
+	}
+	n, err := r.r.Read(b)
+	r.bytesLeft -= int64(n)
+	if err == io.EOF {
+		err = io.ErrUnexpectedEOF
+	} else if r.bytesLeft == 0 && err == nil {
+		err = io.EOF
+	}
+	return n, err
+}
+
+// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
+type BackupStreamWriter struct {
+	w         io.Writer
+	bytesLeft int64
+}
+
+// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
+func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
+	return &BackupStreamWriter{w, 0}
+}
+
+// WriteHeader writes the next backup stream header and prepares for calls to Write().
+func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
+	if w.bytesLeft != 0 {
+		return fmt.Errorf("missing %d bytes", w.bytesLeft)
+	}
+	name := utf16.Encode([]rune(hdr.Name))
+	wsi := win32StreamID{
+		StreamID:   hdr.Id,
+		Attributes: hdr.Attributes,
+		Size:       uint64(hdr.Size),
+		NameSize:   uint32(len(name) * 2),
+	}
+	if hdr.Id == BackupSparseBlock {
+		// Include space for the int64 block offset
+		wsi.Size += 8
+	}
+	if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
+		return err
+	}
+	if len(name) != 0 {
+		if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
+			return err
+		}
+	}
+	if hdr.Id == BackupSparseBlock {
+		if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
+			return err
+		}
+	}
+	w.bytesLeft = hdr.Size
+	return nil
+}
+
+// Write writes to the current backup stream.
+func (w *BackupStreamWriter) Write(b []byte) (int, error) {
+	if w.bytesLeft < int64(len(b)) {
+		return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
+	}
+	n, err := w.w.Write(b)
+	w.bytesLeft -= int64(n)
+	return n, err
+}
+
+// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
+type BackupFileReader struct {
+	f               *os.File
+	includeSecurity bool
+	ctx             uintptr
+}
+
+// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
+// Read will attempt to read the security descriptor of the file.
+func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
+	r := &BackupFileReader{f, includeSecurity, 0}
+	return r
+}
+
+// Read reads a backup stream from the file by calling the Win32 API BackupRead().
+func (r *BackupFileReader) Read(b []byte) (int, error) {
+	var bytesRead uint32
+	err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
+	if err != nil {
+		return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
+	}
+	runtime.KeepAlive(r.f)
+	if bytesRead == 0 {
+		return 0, io.EOF
+	}
+	return int(bytesRead), nil
+}
+
+// Close frees Win32 resources associated with the BackupFileReader. It does not close
+// the underlying file.
+func (r *BackupFileReader) Close() error {
+	if r.ctx != 0 {
+		_ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
+		runtime.KeepAlive(r.f)
+		r.ctx = 0
+	}
+	return nil
+}
+
+// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
+type BackupFileWriter struct {
+	f               *os.File
+	includeSecurity bool
+	ctx             uintptr
+}
+
+// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
+// Write() will attempt to restore the security descriptor from the stream.
+func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
+	w := &BackupFileWriter{f, includeSecurity, 0}
+	return w
+}
+
+// Write restores a portion of the file using the provided backup stream.
+func (w *BackupFileWriter) Write(b []byte) (int, error) {
+	var bytesWritten uint32
+	err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
+	if err != nil {
+		return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
+	}
+	runtime.KeepAlive(w.f)
+	if int(bytesWritten) != len(b) {
+		return int(bytesWritten), errors.New("not all bytes could be written")
+	}
+	return len(b), nil
+}
+
+// Close frees Win32 resources associated with the BackupFileWriter. It does not
+// close the underlying file.
+func (w *BackupFileWriter) Close() error {
+	if w.ctx != 0 {
+		_ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
+		runtime.KeepAlive(w.f)
+		w.ctx = 0
+	}
+	return nil
+}
+
+// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
+// or restore privileges have been acquired.
+//
+// If the file opened was a directory, it cannot be used with Readdir().
+func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
+	h, err := fs.CreateFile(path,
+		fs.AccessMask(access),
+		fs.FileShareMode(share),
+		nil,
+		fs.FileCreationDisposition(createmode),
+		fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT,
+		0,
+	)
+	if err != nil {
+		err = &os.PathError{Op: "open", Path: path, Err: err}
+		return nil, err
+	}
+	return os.NewFile(uintptr(h), path), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/doc.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/doc.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,22 @@
+// This package provides utilities for efficiently performing Win32 IO operations in Go.
+// Currently, this package is provides support for genreal IO and management of
+//   - named pipes
+//   - files
+//   - [Hyper-V sockets]
+//
+// This code is similar to Go's [net] package, and uses IO completion ports to avoid
+// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
+//
+// This limits support to Windows Vista and newer operating systems.
+//
+// Additionally, this package provides support for:
+//   - creating and managing GUIDs
+//   - writing to [ETW]
+//   - opening and manageing VHDs
+//   - parsing [Windows Image files]
+//   - auto-generating Win32 API code
+//
+// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
+// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
+// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
+package winio
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/ea.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/ea.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/ea.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/ea.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,137 @@
+package winio
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+)
+
+type fileFullEaInformation struct {
+	NextEntryOffset uint32
+	Flags           uint8
+	NameLength      uint8
+	ValueLength     uint16
+}
+
+var (
+	fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
+
+	errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
+	errEaNameTooLarge  = errors.New("extended attribute name too large")
+	errEaValueTooLarge = errors.New("extended attribute value too large")
+)
+
+// ExtendedAttribute represents a single Windows EA.
+type ExtendedAttribute struct {
+	Name  string
+	Value []byte
+	Flags uint8
+}
+
+func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
+	var info fileFullEaInformation
+	err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
+	if err != nil {
+		err = errInvalidEaBuffer
+		return ea, nb, err
+	}
+
+	nameOffset := fileFullEaInformationSize
+	nameLen := int(info.NameLength)
+	valueOffset := nameOffset + int(info.NameLength) + 1
+	valueLen := int(info.ValueLength)
+	nextOffset := int(info.NextEntryOffset)
+	if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
+		err = errInvalidEaBuffer
+		return ea, nb, err
+	}
+
+	ea.Name = string(b[nameOffset : nameOffset+nameLen])
+	ea.Value = b[valueOffset : valueOffset+valueLen]
+	ea.Flags = info.Flags
+	if info.NextEntryOffset != 0 {
+		nb = b[info.NextEntryOffset:]
+	}
+	return ea, nb, err
+}
+
+// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
+// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
+func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
+	for len(b) != 0 {
+		ea, nb, err := parseEa(b)
+		if err != nil {
+			return nil, err
+		}
+
+		eas = append(eas, ea)
+		b = nb
+	}
+	return eas, err
+}
+
+func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
+	if int(uint8(len(ea.Name))) != len(ea.Name) {
+		return errEaNameTooLarge
+	}
+	if int(uint16(len(ea.Value))) != len(ea.Value) {
+		return errEaValueTooLarge
+	}
+	entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
+	withPadding := (entrySize + 3) &^ 3
+	nextOffset := uint32(0)
+	if !last {
+		nextOffset = withPadding
+	}
+	info := fileFullEaInformation{
+		NextEntryOffset: nextOffset,
+		Flags:           ea.Flags,
+		NameLength:      uint8(len(ea.Name)),
+		ValueLength:     uint16(len(ea.Value)),
+	}
+
+	err := binary.Write(buf, binary.LittleEndian, &info)
+	if err != nil {
+		return err
+	}
+
+	_, err = buf.Write([]byte(ea.Name))
+	if err != nil {
+		return err
+	}
+
+	err = buf.WriteByte(0)
+	if err != nil {
+		return err
+	}
+
+	_, err = buf.Write(ea.Value)
+	if err != nil {
+		return err
+	}
+
+	_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
+// buffer for use with BackupWrite, ZwSetEaFile, etc.
+func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
+	var buf bytes.Buffer
+	for i := range eas {
+		last := false
+		if i == len(eas)-1 {
+			last = true
+		}
+
+		err := writeEa(&buf, &eas[i], last)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return buf.Bytes(), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/file.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/file.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/file.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/file.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,320 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+	"errors"
+	"io"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/windows"
+)
+
+//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx
+//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort
+//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
+//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
+//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
+
+var (
+	ErrFileClosed = errors.New("file has already been closed")
+	ErrTimeout    = &timeoutError{}
+)
+
+type timeoutError struct{}
+
+func (*timeoutError) Error() string   { return "i/o timeout" }
+func (*timeoutError) Timeout() bool   { return true }
+func (*timeoutError) Temporary() bool { return true }
+
+type timeoutChan chan struct{}
+
+var ioInitOnce sync.Once
+var ioCompletionPort windows.Handle
+
+// ioResult contains the result of an asynchronous IO operation.
+type ioResult struct {
+	bytes uint32
+	err   error
+}
+
+// ioOperation represents an outstanding asynchronous Win32 IO.
+type ioOperation struct {
+	o  windows.Overlapped
+	ch chan ioResult
+}
+
+func initIO() {
+	h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
+	if err != nil {
+		panic(err)
+	}
+	ioCompletionPort = h
+	go ioCompletionProcessor(h)
+}
+
+// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
+// It takes ownership of this handle and will close it if it is garbage collected.
+type win32File struct {
+	handle        windows.Handle
+	wg            sync.WaitGroup
+	wgLock        sync.RWMutex
+	closing       atomic.Bool
+	socket        bool
+	readDeadline  deadlineHandler
+	writeDeadline deadlineHandler
+}
+
+type deadlineHandler struct {
+	setLock     sync.Mutex
+	channel     timeoutChan
+	channelLock sync.RWMutex
+	timer       *time.Timer
+	timedout    atomic.Bool
+}
+
+// makeWin32File makes a new win32File from an existing file handle.
+func makeWin32File(h windows.Handle) (*win32File, error) {
+	f := &win32File{handle: h}
+	ioInitOnce.Do(initIO)
+	_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
+	if err != nil {
+		return nil, err
+	}
+	err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
+	if err != nil {
+		return nil, err
+	}
+	f.readDeadline.channel = make(timeoutChan)
+	f.writeDeadline.channel = make(timeoutChan)
+	return f, nil
+}
+
+// Deprecated: use NewOpenFile instead.
+func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
+	return NewOpenFile(windows.Handle(h))
+}
+
+func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) {
+	// If we return the result of makeWin32File directly, it can result in an
+	// interface-wrapped nil, rather than a nil interface value.
+	f, err := makeWin32File(h)
+	if err != nil {
+		return nil, err
+	}
+	return f, nil
+}
+
+// closeHandle closes the resources associated with a Win32 handle.
+func (f *win32File) closeHandle() {
+	f.wgLock.Lock()
+	// Atomically set that we are closing, releasing the resources only once.
+	if !f.closing.Swap(true) {
+		f.wgLock.Unlock()
+		// cancel all IO and wait for it to complete
+		_ = cancelIoEx(f.handle, nil)
+		f.wg.Wait()
+		// at this point, no new IO can start
+		windows.Close(f.handle)
+		f.handle = 0
+	} else {
+		f.wgLock.Unlock()
+	}
+}
+
+// Close closes a win32File.
+func (f *win32File) Close() error {
+	f.closeHandle()
+	return nil
+}
+
+// IsClosed checks if the file has been closed.
+func (f *win32File) IsClosed() bool {
+	return f.closing.Load()
+}
+
+// prepareIO prepares for a new IO operation.
+// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
+func (f *win32File) prepareIO() (*ioOperation, error) {
+	f.wgLock.RLock()
+	if f.closing.Load() {
+		f.wgLock.RUnlock()
+		return nil, ErrFileClosed
+	}
+	f.wg.Add(1)
+	f.wgLock.RUnlock()
+	c := &ioOperation{}
+	c.ch = make(chan ioResult)
+	return c, nil
+}
+
+// ioCompletionProcessor processes completed async IOs forever.
+func ioCompletionProcessor(h windows.Handle) {
+	for {
+		var bytes uint32
+		var key uintptr
+		var op *ioOperation
+		err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE)
+		if op == nil {
+			panic(err)
+		}
+		op.ch <- ioResult{bytes, err}
+	}
+}
+
+// todo: helsaawy - create an asyncIO version that takes a context
+
+// asyncIO processes the return value from ReadFile or WriteFile, blocking until
+// the operation has actually completed.
+func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
+	if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
+		return int(bytes), err
+	}
+
+	if f.closing.Load() {
+		_ = cancelIoEx(f.handle, &c.o)
+	}
+
+	var timeout timeoutChan
+	if d != nil {
+		d.channelLock.Lock()
+		timeout = d.channel
+		d.channelLock.Unlock()
+	}
+
+	var r ioResult
+	select {
+	case r = <-c.ch:
+		err = r.err
+		if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
+			if f.closing.Load() {
+				err = ErrFileClosed
+			}
+		} else if err != nil && f.socket {
+			// err is from Win32. Query the overlapped structure to get the winsock error.
+			var bytes, flags uint32
+			err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
+		}
+	case <-timeout:
+		_ = cancelIoEx(f.handle, &c.o)
+		r = <-c.ch
+		err = r.err
+		if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
+			err = ErrTimeout
+		}
+	}
+
+	// runtime.KeepAlive is needed, as c is passed via native
+	// code to ioCompletionProcessor, c must remain alive
+	// until the channel read is complete.
+	// todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
+	runtime.KeepAlive(c)
+	return int(r.bytes), err
+}
+
+// Read reads from a file handle.
+func (f *win32File) Read(b []byte) (int, error) {
+	c, err := f.prepareIO()
+	if err != nil {
+		return 0, err
+	}
+	defer f.wg.Done()
+
+	if f.readDeadline.timedout.Load() {
+		return 0, ErrTimeout
+	}
+
+	var bytes uint32
+	err = windows.ReadFile(f.handle, b, &bytes, &c.o)
+	n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
+	runtime.KeepAlive(b)
+
+	// Handle EOF conditions.
+	if err == nil && n == 0 && len(b) != 0 {
+		return 0, io.EOF
+	} else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
+		return 0, io.EOF
+	}
+	return n, err
+}
+
+// Write writes to a file handle.
+func (f *win32File) Write(b []byte) (int, error) {
+	c, err := f.prepareIO()
+	if err != nil {
+		return 0, err
+	}
+	defer f.wg.Done()
+
+	if f.writeDeadline.timedout.Load() {
+		return 0, ErrTimeout
+	}
+
+	var bytes uint32
+	err = windows.WriteFile(f.handle, b, &bytes, &c.o)
+	n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
+	runtime.KeepAlive(b)
+	return n, err
+}
+
+func (f *win32File) SetReadDeadline(deadline time.Time) error {
+	return f.readDeadline.set(deadline)
+}
+
+func (f *win32File) SetWriteDeadline(deadline time.Time) error {
+	return f.writeDeadline.set(deadline)
+}
+
+func (f *win32File) Flush() error {
+	return windows.FlushFileBuffers(f.handle)
+}
+
+func (f *win32File) Fd() uintptr {
+	return uintptr(f.handle)
+}
+
+func (d *deadlineHandler) set(deadline time.Time) error {
+	d.setLock.Lock()
+	defer d.setLock.Unlock()
+
+	if d.timer != nil {
+		if !d.timer.Stop() {
+			<-d.channel
+		}
+		d.timer = nil
+	}
+	d.timedout.Store(false)
+
+	select {
+	case <-d.channel:
+		d.channelLock.Lock()
+		d.channel = make(chan struct{})
+		d.channelLock.Unlock()
+	default:
+	}
+
+	if deadline.IsZero() {
+		return nil
+	}
+
+	timeoutIO := func() {
+		d.timedout.Store(true)
+		close(d.channel)
+	}
+
+	now := time.Now()
+	duration := deadline.Sub(now)
+	if deadline.After(now) {
+		// Deadline is in the future, set a timer to wait
+		d.timer = time.AfterFunc(duration, timeoutIO)
+	} else {
+		// Deadline is in the past. Cancel all pending IO now.
+		timeoutIO()
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/fileinfo.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/fileinfo.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/fileinfo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/fileinfo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,106 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+	"os"
+	"runtime"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+// FileBasicInfo contains file access time and file attributes information.
+type FileBasicInfo struct {
+	CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
+	FileAttributes                                          uint32
+	_                                                       uint32 // padding
+}
+
+// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing
+// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64
+// alignment is necessary to pass this as FILE_BASIC_INFO.
+type alignedFileBasicInfo struct {
+	CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64
+	FileAttributes                                          uint32
+	_                                                       uint32 // padding
+}
+
+// GetFileBasicInfo retrieves times and attributes for a file.
+func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
+	bi := &alignedFileBasicInfo{}
+	if err := windows.GetFileInformationByHandleEx(
+		windows.Handle(f.Fd()),
+		windows.FileBasicInfo,
+		(*byte)(unsafe.Pointer(bi)),
+		uint32(unsafe.Sizeof(*bi)),
+	); err != nil {
+		return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+	}
+	runtime.KeepAlive(f)
+	// Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the
+	// public API of this module. The data may be unnecessarily aligned.
+	return (*FileBasicInfo)(unsafe.Pointer(bi)), nil
+}
+
+// SetFileBasicInfo sets times and attributes for a file.
+func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
+	// Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is
+	// suitable to pass to GetFileInformationByHandleEx.
+	biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi))
+	if err := windows.SetFileInformationByHandle(
+		windows.Handle(f.Fd()),
+		windows.FileBasicInfo,
+		(*byte)(unsafe.Pointer(&biAligned)),
+		uint32(unsafe.Sizeof(biAligned)),
+	); err != nil {
+		return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
+	}
+	runtime.KeepAlive(f)
+	return nil
+}
+
+// FileStandardInfo contains extended information for the file.
+// FILE_STANDARD_INFO in WinBase.h
+// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
+type FileStandardInfo struct {
+	AllocationSize, EndOfFile int64
+	NumberOfLinks             uint32
+	DeletePending, Directory  bool
+}
+
+// GetFileStandardInfo retrieves ended information for the file.
+func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
+	si := &FileStandardInfo{}
+	if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
+		windows.FileStandardInfo,
+		(*byte)(unsafe.Pointer(si)),
+		uint32(unsafe.Sizeof(*si))); err != nil {
+		return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+	}
+	runtime.KeepAlive(f)
+	return si, nil
+}
+
+// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
+// unique on a system.
+type FileIDInfo struct {
+	VolumeSerialNumber uint64
+	FileID             [16]byte
+}
+
+// GetFileID retrieves the unique (volume, file ID) pair for a file.
+func GetFileID(f *os.File) (*FileIDInfo, error) {
+	fileID := &FileIDInfo{}
+	if err := windows.GetFileInformationByHandleEx(
+		windows.Handle(f.Fd()),
+		windows.FileIdInfo,
+		(*byte)(unsafe.Pointer(fileID)),
+		uint32(unsafe.Sizeof(*fileID)),
+	); err != nil {
+		return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+	}
+	runtime.KeepAlive(f)
+	return fileID, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/hvsock.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/hvsock.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/hvsock.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/hvsock.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,582 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"time"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+
+	"github.com/Microsoft/go-winio/internal/socket"
+	"github.com/Microsoft/go-winio/pkg/guid"
+)
+
+const afHVSock = 34 // AF_HYPERV
+
+// Well known Service and VM IDs
+// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
+
+// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
+func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
+	return guid.GUID{}
+}
+
+// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
+func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff
+	return guid.GUID{
+		Data1: 0xffffffff,
+		Data2: 0xffff,
+		Data3: 0xffff,
+		Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+	}
+}
+
+// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector.
+func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838
+	return guid.GUID{
+		Data1: 0xe0e16197,
+		Data2: 0xdd56,
+		Data3: 0x4a10,
+		Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38},
+	}
+}
+
+// HvsockGUIDSiloHost is the address of a silo's host partition:
+//   - The silo host of a hosted silo is the utility VM.
+//   - The silo host of a silo on a physical host is the physical host.
+func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568
+	return guid.GUID{
+		Data1: 0x36bd0c5c,
+		Data2: 0x7276,
+		Data3: 0x4223,
+		Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68},
+	}
+}
+
+// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions.
+func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd
+	return guid.GUID{
+		Data1: 0x90db8b89,
+		Data2: 0xd35,
+		Data3: 0x4f79,
+		Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd},
+	}
+}
+
+// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition.
+// Listening on this VmId accepts connection from:
+//   - Inside silos: silo host partition.
+//   - Inside hosted silo: host of the VM.
+//   - Inside VM: VM host.
+//   - Physical host: Not supported.
+func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878
+	return guid.GUID{
+		Data1: 0xa42e7cda,
+		Data2: 0xd03f,
+		Data3: 0x480c,
+		Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78},
+	}
+}
+
+// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol.
+func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3
+	return guid.GUID{
+		Data2: 0xfacb,
+		Data3: 0x11e6,
+		Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3},
+	}
+}
+
+// An HvsockAddr is an address for a AF_HYPERV socket.
+type HvsockAddr struct {
+	VMID      guid.GUID
+	ServiceID guid.GUID
+}
+
+type rawHvsockAddr struct {
+	Family    uint16
+	_         uint16
+	VMID      guid.GUID
+	ServiceID guid.GUID
+}
+
+var _ socket.RawSockaddr = &rawHvsockAddr{}
+
+// Network returns the address's network name, "hvsock".
+func (*HvsockAddr) Network() string {
+	return "hvsock"
+}
+
+func (addr *HvsockAddr) String() string {
+	return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
+}
+
+// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
+func VsockServiceID(port uint32) guid.GUID {
+	g := hvsockVsockServiceTemplate() // make a copy
+	g.Data1 = port
+	return g
+}
+
+func (addr *HvsockAddr) raw() rawHvsockAddr {
+	return rawHvsockAddr{
+		Family:    afHVSock,
+		VMID:      addr.VMID,
+		ServiceID: addr.ServiceID,
+	}
+}
+
+func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
+	addr.VMID = raw.VMID
+	addr.ServiceID = raw.ServiceID
+}
+
+// Sockaddr returns a pointer to and the size of this struct.
+//
+// Implements the [socket.RawSockaddr] interface, and allows use in
+// [socket.Bind] and [socket.ConnectEx].
+func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) {
+	return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil
+}
+
+// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`.
+func (r *rawHvsockAddr) FromBytes(b []byte) error {
+	n := int(unsafe.Sizeof(rawHvsockAddr{}))
+
+	if len(b) < n {
+		return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize)
+	}
+
+	copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n])
+	if r.Family != afHVSock {
+		return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily)
+	}
+
+	return nil
+}
+
+// HvsockListener is a socket listener for the AF_HYPERV address family.
+type HvsockListener struct {
+	sock *win32File
+	addr HvsockAddr
+}
+
+var _ net.Listener = &HvsockListener{}
+
+// HvsockConn is a connected socket of the AF_HYPERV address family.
+type HvsockConn struct {
+	sock          *win32File
+	local, remote HvsockAddr
+}
+
+var _ net.Conn = &HvsockConn{}
+
+func newHVSocket() (*win32File, error) {
+	fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1)
+	if err != nil {
+		return nil, os.NewSyscallError("socket", err)
+	}
+	f, err := makeWin32File(fd)
+	if err != nil {
+		windows.Close(fd)
+		return nil, err
+	}
+	f.socket = true
+	return f, nil
+}
+
+// ListenHvsock listens for connections on the specified hvsock address.
+func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
+	l := &HvsockListener{addr: *addr}
+
+	var sock *win32File
+	sock, err = newHVSocket()
+	if err != nil {
+		return nil, l.opErr("listen", err)
+	}
+	defer func() {
+		if err != nil {
+			_ = sock.Close()
+		}
+	}()
+
+	sa := addr.raw()
+	err = socket.Bind(sock.handle, &sa)
+	if err != nil {
+		return nil, l.opErr("listen", os.NewSyscallError("socket", err))
+	}
+	err = windows.Listen(sock.handle, 16)
+	if err != nil {
+		return nil, l.opErr("listen", os.NewSyscallError("listen", err))
+	}
+	return &HvsockListener{sock: sock, addr: *addr}, nil
+}
+
+func (l *HvsockListener) opErr(op string, err error) error {
+	return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
+}
+
+// Addr returns the listener's network address.
+func (l *HvsockListener) Addr() net.Addr {
+	return &l.addr
+}
+
+// Accept waits for the next connection and returns it.
+func (l *HvsockListener) Accept() (_ net.Conn, err error) {
+	sock, err := newHVSocket()
+	if err != nil {
+		return nil, l.opErr("accept", err)
+	}
+	defer func() {
+		if sock != nil {
+			sock.Close()
+		}
+	}()
+	c, err := l.sock.prepareIO()
+	if err != nil {
+		return nil, l.opErr("accept", err)
+	}
+	defer l.sock.wg.Done()
+
+	// AcceptEx, per documentation, requires an extra 16 bytes per address.
+	//
+	// https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
+	const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
+	var addrbuf [addrlen * 2]byte
+
+	var bytes uint32
+	err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
+	if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
+		return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
+	}
+
+	conn := &HvsockConn{
+		sock: sock,
+	}
+	// The local address returned in the AcceptEx buffer is the same as the Listener socket's
+	// address. However, the service GUID reported by GetSockName is different from the Listeners
+	// socket, and is sometimes the same as the local address of the socket that dialed the
+	// address, with the service GUID.Data1 incremented, but othertimes is different.
+	// todo: does the local address matter? is the listener's address or the actual address appropriate?
+	conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
+	conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
+
+	// initialize the accepted socket and update its properties with those of the listening socket
+	if err = windows.Setsockopt(sock.handle,
+		windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
+		(*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
+		return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
+	}
+
+	sock = nil
+	return conn, nil
+}
+
+// Close closes the listener, causing any pending Accept calls to fail.
+func (l *HvsockListener) Close() error {
+	return l.sock.Close()
+}
+
+// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]).
+type HvsockDialer struct {
+	// Deadline is the time the Dial operation must connect before erroring.
+	Deadline time.Time
+
+	// Retries is the number of additional connects to try if the connection times out, is refused,
+	// or the host is unreachable
+	Retries uint
+
+	// RetryWait is the time to wait after a connection error to retry
+	RetryWait time.Duration
+
+	rt *time.Timer // redial wait timer
+}
+
+// Dial the Hyper-V socket at addr.
+//
+// See [HvsockDialer.Dial] for more information.
+func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
+	return (&HvsockDialer{}).Dial(ctx, addr)
+}
+
+// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful.
+// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between
+// retries.
+//
+// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx.
+func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
+	op := "dial"
+	// create the conn early to use opErr()
+	conn = &HvsockConn{
+		remote: *addr,
+	}
+
+	if !d.Deadline.IsZero() {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithDeadline(ctx, d.Deadline)
+		defer cancel()
+	}
+
+	// preemptive timeout/cancellation check
+	if err = ctx.Err(); err != nil {
+		return nil, conn.opErr(op, err)
+	}
+
+	sock, err := newHVSocket()
+	if err != nil {
+		return nil, conn.opErr(op, err)
+	}
+	defer func() {
+		if sock != nil {
+			sock.Close()
+		}
+	}()
+
+	sa := addr.raw()
+	err = socket.Bind(sock.handle, &sa)
+	if err != nil {
+		return nil, conn.opErr(op, os.NewSyscallError("bind", err))
+	}
+
+	c, err := sock.prepareIO()
+	if err != nil {
+		return nil, conn.opErr(op, err)
+	}
+	defer sock.wg.Done()
+	var bytes uint32
+	for i := uint(0); i <= d.Retries; i++ {
+		err = socket.ConnectEx(
+			sock.handle,
+			&sa,
+			nil, // sendBuf
+			0,   // sendDataLen
+			&bytes,
+			(*windows.Overlapped)(unsafe.Pointer(&c.o)))
+		_, err = sock.asyncIO(c, nil, bytes, err)
+		if i < d.Retries && canRedial(err) {
+			if err = d.redialWait(ctx); err == nil {
+				continue
+			}
+		}
+		break
+	}
+	if err != nil {
+		return nil, conn.opErr(op, os.NewSyscallError("connectex", err))
+	}
+
+	// update the connection properties, so shutdown can be used
+	if err = windows.Setsockopt(
+		sock.handle,
+		windows.SOL_SOCKET,
+		windows.SO_UPDATE_CONNECT_CONTEXT,
+		nil, // optvalue
+		0,   // optlen
+	); err != nil {
+		return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err))
+	}
+
+	// get the local name
+	var sal rawHvsockAddr
+	err = socket.GetSockName(sock.handle, &sal)
+	if err != nil {
+		return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
+	}
+	conn.local.fromRaw(&sal)
+
+	// one last check for timeout, since asyncIO doesn't check the context
+	if err = ctx.Err(); err != nil {
+		return nil, conn.opErr(op, err)
+	}
+
+	conn.sock = sock
+	sock = nil
+
+	return conn, nil
+}
+
+// redialWait waits before attempting to redial, resetting the timer as appropriate.
+func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
+	if d.RetryWait == 0 {
+		return nil
+	}
+
+	if d.rt == nil {
+		d.rt = time.NewTimer(d.RetryWait)
+	} else {
+		// should already be stopped and drained
+		d.rt.Reset(d.RetryWait)
+	}
+
+	select {
+	case <-ctx.Done():
+	case <-d.rt.C:
+		return nil
+	}
+
+	// stop and drain the timer
+	if !d.rt.Stop() {
+		<-d.rt.C
+	}
+	return ctx.Err()
+}
+
+// assumes error is a plain, unwrapped windows.Errno provided by direct syscall.
+func canRedial(err error) bool {
+	//nolint:errorlint // guaranteed to be an Errno
+	switch err {
+	case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT,
+		windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL:
+		return true
+	default:
+		return false
+	}
+}
+
+func (conn *HvsockConn) opErr(op string, err error) error {
+	// translate from "file closed" to "socket closed"
+	if errors.Is(err, ErrFileClosed) {
+		err = socket.ErrSocketClosed
+	}
+	return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
+}
+
+func (conn *HvsockConn) Read(b []byte) (int, error) {
+	c, err := conn.sock.prepareIO()
+	if err != nil {
+		return 0, conn.opErr("read", err)
+	}
+	defer conn.sock.wg.Done()
+	buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
+	var flags, bytes uint32
+	err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
+	n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
+	if err != nil {
+		var eno windows.Errno
+		if errors.As(err, &eno) {
+			err = os.NewSyscallError("wsarecv", eno)
+		}
+		return 0, conn.opErr("read", err)
+	} else if n == 0 {
+		err = io.EOF
+	}
+	return n, err
+}
+
+func (conn *HvsockConn) Write(b []byte) (int, error) {
+	t := 0
+	for len(b) != 0 {
+		n, err := conn.write(b)
+		if err != nil {
+			return t + n, err
+		}
+		t += n
+		b = b[n:]
+	}
+	return t, nil
+}
+
+func (conn *HvsockConn) write(b []byte) (int, error) {
+	c, err := conn.sock.prepareIO()
+	if err != nil {
+		return 0, conn.opErr("write", err)
+	}
+	defer conn.sock.wg.Done()
+	buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
+	var bytes uint32
+	err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
+	n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
+	if err != nil {
+		var eno windows.Errno
+		if errors.As(err, &eno) {
+			err = os.NewSyscallError("wsasend", eno)
+		}
+		return 0, conn.opErr("write", err)
+	}
+	return n, err
+}
+
+// Close closes the socket connection, failing any pending read or write calls.
+func (conn *HvsockConn) Close() error {
+	return conn.sock.Close()
+}
+
+func (conn *HvsockConn) IsClosed() bool {
+	return conn.sock.IsClosed()
+}
+
+// shutdown disables sending or receiving on a socket.
+func (conn *HvsockConn) shutdown(how int) error {
+	if conn.IsClosed() {
+		return socket.ErrSocketClosed
+	}
+
+	err := windows.Shutdown(conn.sock.handle, how)
+	if err != nil {
+		// If the connection was closed, shutdowns fail with "not connected"
+		if errors.Is(err, windows.WSAENOTCONN) ||
+			errors.Is(err, windows.WSAESHUTDOWN) {
+			err = socket.ErrSocketClosed
+		}
+		return os.NewSyscallError("shutdown", err)
+	}
+	return nil
+}
+
+// CloseRead shuts down the read end of the socket, preventing future read operations.
+func (conn *HvsockConn) CloseRead() error {
+	err := conn.shutdown(windows.SHUT_RD)
+	if err != nil {
+		return conn.opErr("closeread", err)
+	}
+	return nil
+}
+
+// CloseWrite shuts down the write end of the socket, preventing future write operations and
+// notifying the other endpoint that no more data will be written.
+func (conn *HvsockConn) CloseWrite() error {
+	err := conn.shutdown(windows.SHUT_WR)
+	if err != nil {
+		return conn.opErr("closewrite", err)
+	}
+	return nil
+}
+
+// LocalAddr returns the local address of the connection.
+func (conn *HvsockConn) LocalAddr() net.Addr {
+	return &conn.local
+}
+
+// RemoteAddr returns the remote address of the connection.
+func (conn *HvsockConn) RemoteAddr() net.Addr {
+	return &conn.remote
+}
+
+// SetDeadline implements the net.Conn SetDeadline method.
+func (conn *HvsockConn) SetDeadline(t time.Time) error {
+	// todo: implement `SetDeadline` for `win32File`
+	if err := conn.SetReadDeadline(t); err != nil {
+		return fmt.Errorf("set read deadline: %w", err)
+	}
+	if err := conn.SetWriteDeadline(t); err != nil {
+		return fmt.Errorf("set write deadline: %w", err)
+	}
+	return nil
+}
+
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
+	return conn.sock.SetReadDeadline(t)
+}
+
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
+	return conn.sock.SetWriteDeadline(t)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2 @@
+// This package contains Win32 filesystem functionality.
+package fs
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,262 @@
+//go:build windows
+
+package fs
+
+import (
+	"golang.org/x/sys/windows"
+
+	"github.com/Microsoft/go-winio/internal/stringbuffer"
+)
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
+
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
+//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
+
+const NullHandle windows.Handle = 0
+
+// AccessMask defines standard, specific, and generic rights.
+//
+// Used with CreateFile and NtCreateFile (and co.).
+//
+//	Bitmask:
+//	 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+//	 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+//	+---------------+---------------+-------------------------------+
+//	|G|G|G|G|Resvd|A| StandardRights|         SpecificRights        |
+//	|R|W|E|A|     |S|               |                               |
+//	+-+-------------+---------------+-------------------------------+
+//
+//	GR     Generic Read
+//	GW     Generic Write
+//	GE     Generic Exectue
+//	GA     Generic All
+//	Resvd  Reserved
+//	AS     Access Security System
+//
+// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask
+//
+// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights
+//
+// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants
+type AccessMask = windows.ACCESS_MASK
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+	// Not actually any.
+	//
+	// For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device"
+	// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
+	FILE_ANY_ACCESS AccessMask = 0
+
+	GENERIC_READ           AccessMask = 0x8000_0000
+	GENERIC_WRITE          AccessMask = 0x4000_0000
+	GENERIC_EXECUTE        AccessMask = 0x2000_0000
+	GENERIC_ALL            AccessMask = 0x1000_0000
+	ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000
+
+	// Specific Object Access
+	// from ntioapi.h
+
+	FILE_READ_DATA      AccessMask = (0x0001) // file & pipe
+	FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory
+
+	FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe
+	FILE_ADD_FILE   AccessMask = (0x0002) // directory
+
+	FILE_APPEND_DATA          AccessMask = (0x0004) // file
+	FILE_ADD_SUBDIRECTORY     AccessMask = (0x0004) // directory
+	FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe
+
+	FILE_READ_EA         AccessMask = (0x0008) // file & directory
+	FILE_READ_PROPERTIES AccessMask = FILE_READ_EA
+
+	FILE_WRITE_EA         AccessMask = (0x0010) // file & directory
+	FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA
+
+	FILE_EXECUTE  AccessMask = (0x0020) // file
+	FILE_TRAVERSE AccessMask = (0x0020) // directory
+
+	FILE_DELETE_CHILD AccessMask = (0x0040) // directory
+
+	FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all
+
+	FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all
+
+	FILE_ALL_ACCESS      AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF)
+	FILE_GENERIC_READ    AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
+	FILE_GENERIC_WRITE   AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
+	FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
+
+	SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF
+
+	// Standard Access
+	// from ntseapi.h
+
+	DELETE       AccessMask = 0x0001_0000
+	READ_CONTROL AccessMask = 0x0002_0000
+	WRITE_DAC    AccessMask = 0x0004_0000
+	WRITE_OWNER  AccessMask = 0x0008_0000
+	SYNCHRONIZE  AccessMask = 0x0010_0000
+
+	STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000
+
+	STANDARD_RIGHTS_READ    AccessMask = READ_CONTROL
+	STANDARD_RIGHTS_WRITE   AccessMask = READ_CONTROL
+	STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL
+
+	STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000
+)
+
+type FileShareMode uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+	FILE_SHARE_NONE        FileShareMode = 0x00
+	FILE_SHARE_READ        FileShareMode = 0x01
+	FILE_SHARE_WRITE       FileShareMode = 0x02
+	FILE_SHARE_DELETE      FileShareMode = 0x04
+	FILE_SHARE_VALID_FLAGS FileShareMode = 0x07
+)
+
+type FileCreationDisposition uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+	// from winbase.h
+
+	CREATE_NEW        FileCreationDisposition = 0x01
+	CREATE_ALWAYS     FileCreationDisposition = 0x02
+	OPEN_EXISTING     FileCreationDisposition = 0x03
+	OPEN_ALWAYS       FileCreationDisposition = 0x04
+	TRUNCATE_EXISTING FileCreationDisposition = 0x05
+)
+
+// Create disposition values for NtCreate*
+type NTFileCreationDisposition uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+	// From ntioapi.h
+
+	FILE_SUPERSEDE           NTFileCreationDisposition = 0x00
+	FILE_OPEN                NTFileCreationDisposition = 0x01
+	FILE_CREATE              NTFileCreationDisposition = 0x02
+	FILE_OPEN_IF             NTFileCreationDisposition = 0x03
+	FILE_OVERWRITE           NTFileCreationDisposition = 0x04
+	FILE_OVERWRITE_IF        NTFileCreationDisposition = 0x05
+	FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05
+)
+
+// CreateFile and co. take flags or attributes together as one parameter.
+// Define alias until we can use generics to allow both
+//
+// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
+type FileFlagOrAttribute uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+	// from winnt.h
+
+	FILE_FLAG_WRITE_THROUGH       FileFlagOrAttribute = 0x8000_0000
+	FILE_FLAG_OVERLAPPED          FileFlagOrAttribute = 0x4000_0000
+	FILE_FLAG_NO_BUFFERING        FileFlagOrAttribute = 0x2000_0000
+	FILE_FLAG_RANDOM_ACCESS       FileFlagOrAttribute = 0x1000_0000
+	FILE_FLAG_SEQUENTIAL_SCAN     FileFlagOrAttribute = 0x0800_0000
+	FILE_FLAG_DELETE_ON_CLOSE     FileFlagOrAttribute = 0x0400_0000
+	FILE_FLAG_BACKUP_SEMANTICS    FileFlagOrAttribute = 0x0200_0000
+	FILE_FLAG_POSIX_SEMANTICS     FileFlagOrAttribute = 0x0100_0000
+	FILE_FLAG_OPEN_REPARSE_POINT  FileFlagOrAttribute = 0x0020_0000
+	FILE_FLAG_OPEN_NO_RECALL      FileFlagOrAttribute = 0x0010_0000
+	FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
+)
+
+// NtCreate* functions take a dedicated CreateOptions parameter.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile
+//
+// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file
+type NTCreateOptions uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+	// From ntioapi.h
+
+	FILE_DIRECTORY_FILE            NTCreateOptions = 0x0000_0001
+	FILE_WRITE_THROUGH             NTCreateOptions = 0x0000_0002
+	FILE_SEQUENTIAL_ONLY           NTCreateOptions = 0x0000_0004
+	FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008
+
+	FILE_SYNCHRONOUS_IO_ALERT    NTCreateOptions = 0x0000_0010
+	FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020
+	FILE_NON_DIRECTORY_FILE      NTCreateOptions = 0x0000_0040
+	FILE_CREATE_TREE_CONNECTION  NTCreateOptions = 0x0000_0080
+
+	FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100
+	FILE_NO_EA_KNOWLEDGE      NTCreateOptions = 0x0000_0200
+	FILE_DISABLE_TUNNELING    NTCreateOptions = 0x0000_0400
+	FILE_RANDOM_ACCESS        NTCreateOptions = 0x0000_0800
+
+	FILE_DELETE_ON_CLOSE        NTCreateOptions = 0x0000_1000
+	FILE_OPEN_BY_FILE_ID        NTCreateOptions = 0x0000_2000
+	FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000
+	FILE_NO_COMPRESSION         NTCreateOptions = 0x0000_8000
+)
+
+type FileSQSFlag = FileFlagOrAttribute
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+	// from winbase.h
+
+	SECURITY_ANONYMOUS      FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
+	SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
+	SECURITY_IMPERSONATION  FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
+	SECURITY_DELEGATION     FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
+
+	SECURITY_SQOS_PRESENT     FileSQSFlag = 0x0010_0000
+	SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000
+)
+
+// GetFinalPathNameByHandle flags
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters
+type GetFinalPathFlag uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+	GetFinalPathDefaultFlag GetFinalPathFlag = 0x0
+
+	FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0
+	FILE_NAME_OPENED     GetFinalPathFlag = 0x8
+
+	VOLUME_NAME_DOS  GetFinalPathFlag = 0x0
+	VOLUME_NAME_GUID GetFinalPathFlag = 0x1
+	VOLUME_NAME_NT   GetFinalPathFlag = 0x2
+	VOLUME_NAME_NONE GetFinalPathFlag = 0x4
+)
+
+// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle
+// with the given handle and flags. It transparently takes care of creating a buffer of the
+// correct size for the call.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew
+func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) {
+	b := stringbuffer.NewWString()
+	//TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n?
+	for {
+		n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags))
+		if err != nil {
+			return "", err
+		}
+		// If the buffer wasn't large enough, n will be the total size needed (including null terminator).
+		// Resize and try again.
+		if n > b.Cap() {
+			b.ResizeTo(n)
+			continue
+		}
+		// If the buffer is large enough, n will be the size not including the null terminator.
+		// Convert to a Go string and return.
+		return b.String(), nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/fs/security.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/fs/security.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/fs/security.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/fs/security.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,12 @@
+package fs
+
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
+type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32`
+
+// Impersonation levels
+const (
+	SecurityAnonymous      SecurityImpersonationLevel = 0
+	SecurityIdentification SecurityImpersonationLevel = 1
+	SecurityImpersonation  SecurityImpersonationLevel = 2
+	SecurityDelegation     SecurityImpersonationLevel = 3
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,61 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package fs
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+	errERROR_EINVAL     error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return errERROR_EINVAL
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	return e
+}
+
+var (
+	modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+	procCreateFileW = modkernel32.NewProc("CreateFileW")
+)
+
+func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return
+	}
+	return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
+}
+
+func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
+	handle = windows.Handle(r0)
+	if handle == windows.InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+package socket
+
+import (
+	"unsafe"
+)
+
+// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The
+// struct must meet the Win32 sockaddr requirements specified here:
+// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
+//
+// Specifically, the struct size must be least larger than an int16 (unsigned short)
+// for the address family.
+type RawSockaddr interface {
+	// Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing
+	// for the RawSockaddr's data to be overwritten by syscalls (if necessary).
+	//
+	// It is the callers responsibility to validate that the values are valid; invalid
+	// pointers or size can cause a panic.
+	Sockaddr() (unsafe.Pointer, int32, error)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,177 @@
+//go:build windows
+
+package socket
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"sync"
+	"syscall"
+	"unsafe"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+	"golang.org/x/sys/windows"
+)
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go
+
+//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname
+//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername
+//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
+
+const socketError = uintptr(^uint32(0))
+
+var (
+	// todo(helsaawy): create custom error types to store the desired vs actual size and addr family?
+
+	ErrBufferSize     = errors.New("buffer size")
+	ErrAddrFamily     = errors.New("address family")
+	ErrInvalidPointer = errors.New("invalid pointer")
+	ErrSocketClosed   = fmt.Errorf("socket closed: %w", net.ErrClosed)
+)
+
+// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error)
+
+// GetSockName writes the local address of socket s to the [RawSockaddr] rsa.
+// If rsa is not large enough, the [windows.WSAEFAULT] is returned.
+func GetSockName(s windows.Handle, rsa RawSockaddr) error {
+	ptr, l, err := rsa.Sockaddr()
+	if err != nil {
+		return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+	}
+
+	// although getsockname returns WSAEFAULT if the buffer is too small, it does not set
+	// &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy
+	return getsockname(s, ptr, &l)
+}
+
+// GetPeerName returns the remote address the socket is connected to.
+//
+// See [GetSockName] for more information.
+func GetPeerName(s windows.Handle, rsa RawSockaddr) error {
+	ptr, l, err := rsa.Sockaddr()
+	if err != nil {
+		return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+	}
+
+	return getpeername(s, ptr, &l)
+}
+
+func Bind(s windows.Handle, rsa RawSockaddr) (err error) {
+	ptr, l, err := rsa.Sockaddr()
+	if err != nil {
+		return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+	}
+
+	return bind(s, ptr, l)
+}
+
+// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the
+// their sockaddr interface, so they cannot be used with HvsockAddr
+// Replicate functionality here from
+// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go
+
+// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at
+// runtime via a WSAIoctl call:
+// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks
+
+type runtimeFunc struct {
+	id   guid.GUID
+	once sync.Once
+	addr uintptr
+	err  error
+}
+
+func (f *runtimeFunc) Load() error {
+	f.once.Do(func() {
+		var s windows.Handle
+		s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP)
+		if f.err != nil {
+			return
+		}
+		defer windows.CloseHandle(s) //nolint:errcheck
+
+		var n uint32
+		f.err = windows.WSAIoctl(s,
+			windows.SIO_GET_EXTENSION_FUNCTION_POINTER,
+			(*byte)(unsafe.Pointer(&f.id)),
+			uint32(unsafe.Sizeof(f.id)),
+			(*byte)(unsafe.Pointer(&f.addr)),
+			uint32(unsafe.Sizeof(f.addr)),
+			&n,
+			nil, // overlapped
+			0,   // completionRoutine
+		)
+	})
+	return f.err
+}
+
+var (
+	// todo: add `AcceptEx` and `GetAcceptExSockaddrs`
+	WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS
+		Data1: 0x25a207b9,
+		Data2: 0xddf3,
+		Data3: 0x4660,
+		Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
+	}
+
+	connectExFunc = runtimeFunc{id: WSAID_CONNECTEX}
+)
+
+func ConnectEx(
+	fd windows.Handle,
+	rsa RawSockaddr,
+	sendBuf *byte,
+	sendDataLen uint32,
+	bytesSent *uint32,
+	overlapped *windows.Overlapped,
+) error {
+	if err := connectExFunc.Load(); err != nil {
+		return fmt.Errorf("failed to load ConnectEx function pointer: %w", err)
+	}
+	ptr, n, err := rsa.Sockaddr()
+	if err != nil {
+		return err
+	}
+	return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
+}
+
+// BOOL LpfnConnectex(
+//   [in]           SOCKET s,
+//   [in]           const sockaddr *name,
+//   [in]           int namelen,
+//   [in, optional] PVOID lpSendBuffer,
+//   [in]           DWORD dwSendDataLength,
+//   [out]          LPDWORD lpdwBytesSent,
+//   [in]           LPOVERLAPPED lpOverlapped
+// )
+
+func connectEx(
+	s windows.Handle,
+	name unsafe.Pointer,
+	namelen int32,
+	sendBuf *byte,
+	sendDataLen uint32,
+	bytesSent *uint32,
+	overlapped *windows.Overlapped,
+) (err error) {
+	r1, _, e1 := syscall.SyscallN(connectExFunc.addr,
+		uintptr(s),
+		uintptr(name),
+		uintptr(namelen),
+		uintptr(unsafe.Pointer(sendBuf)),
+		uintptr(sendDataLen),
+		uintptr(unsafe.Pointer(bytesSent)),
+		uintptr(unsafe.Pointer(overlapped)),
+	)
+
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,69 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package socket
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+	errERROR_EINVAL     error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return errERROR_EINVAL
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	return e
+}
+
+var (
+	modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
+
+	procbind        = modws2_32.NewProc("bind")
+	procgetpeername = modws2_32.NewProc("getpeername")
+	procgetsockname = modws2_32.NewProc("getsockname")
+)
+
+func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
+	if r1 == socketError {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
+	if r1 == socketError {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
+	if r1 == socketError {
+		err = errnoErr(e1)
+	}
+	return
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,132 @@
+package stringbuffer
+
+import (
+	"sync"
+	"unicode/utf16"
+)
+
+// TODO: worth exporting and using in mkwinsyscall?
+
+// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate
+// large path strings:
+// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310.
+const MinWStringCap = 310
+
+// use *[]uint16 since []uint16 creates an extra allocation where the slice header
+// is copied to heap and then referenced via pointer in the interface header that sync.Pool
+// stores.
+var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly
+	New: func() interface{} {
+		b := make([]uint16, MinWStringCap)
+		return &b
+	},
+}
+
+func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) }
+
+// freeBuffer copies the slice header data, and puts a pointer to that in the pool.
+// This avoids taking a pointer to the slice header in WString, which can be set to nil.
+func freeBuffer(b []uint16) { pathPool.Put(&b) }
+
+// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings
+// for interacting with Win32 APIs.
+// Sizes are specified as uint32 and not int.
+//
+// It is not thread safe.
+type WString struct {
+	// type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future.
+
+	// raw buffer
+	b []uint16
+}
+
+// NewWString returns a [WString] allocated from a shared pool with an
+// initial capacity of at least [MinWStringCap].
+// Since the buffer may have been previously used, its contents are not guaranteed to be empty.
+//
+// The buffer should be freed via [WString.Free]
+func NewWString() *WString {
+	return &WString{
+		b: newBuffer(),
+	}
+}
+
+func (b *WString) Free() {
+	if b.empty() {
+		return
+	}
+	freeBuffer(b.b)
+	b.b = nil
+}
+
+// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
+// previous buffer back into pool.
+func (b *WString) ResizeTo(c uint32) uint32 {
+	// already sufficient (or n is 0)
+	if c <= b.Cap() {
+		return b.Cap()
+	}
+
+	if c <= MinWStringCap {
+		c = MinWStringCap
+	}
+	// allocate at-least double buffer size, as is done in [bytes.Buffer] and other places
+	if c <= 2*b.Cap() {
+		c = 2 * b.Cap()
+	}
+
+	b2 := make([]uint16, c)
+	if !b.empty() {
+		copy(b2, b.b)
+		freeBuffer(b.b)
+	}
+	b.b = b2
+	return c
+}
+
+// Buffer returns the underlying []uint16 buffer.
+func (b *WString) Buffer() []uint16 {
+	if b.empty() {
+		return nil
+	}
+	return b.b
+}
+
+// Pointer returns a pointer to the first uint16 in the buffer.
+// If the [WString.Free] has already been called, the pointer will be nil.
+func (b *WString) Pointer() *uint16 {
+	if b.empty() {
+		return nil
+	}
+	return &b.b[0]
+}
+
+// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer.
+//
+// It assumes that the data is null-terminated.
+func (b *WString) String() string {
+	// Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows"
+	// and would make this code Windows-only, which makes no sense.
+	// So copy UTF16ToString code into here.
+	// If other windows-specific code is added, switch to [windows.UTF16ToString]
+
+	s := b.b
+	for i, v := range s {
+		if v == 0 {
+			s = s[:i]
+			break
+		}
+	}
+	return string(utf16.Decode(s))
+}
+
+// Cap returns the underlying buffer capacity.
+func (b *WString) Cap() uint32 {
+	if b.empty() {
+		return 0
+	}
+	return b.cap()
+}
+
+func (b *WString) cap() uint32 { return uint32(cap(b.b)) }
+func (b *WString) empty() bool { return b == nil || b.cap() == 0 }
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pipe.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pipe.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pipe.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pipe.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,586 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"runtime"
+	"time"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+
+	"github.com/Microsoft/go-winio/internal/fs"
+)
+
+//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe
+//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error)  [failretval==windows.InvalidHandle] = CreateNamedPipeW
+//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe
+//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
+//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
+//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
+//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
+//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
+//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
+
+type PipeConn interface {
+	net.Conn
+	Disconnect() error
+	Flush() error
+}
+
+// type aliases for mkwinsyscall code
+type (
+	ntAccessMask              = fs.AccessMask
+	ntFileShareMode           = fs.FileShareMode
+	ntFileCreationDisposition = fs.NTFileCreationDisposition
+	ntFileOptions             = fs.NTCreateOptions
+)
+
+type ioStatusBlock struct {
+	Status, Information uintptr
+}
+
+//	typedef struct _OBJECT_ATTRIBUTES {
+//	  ULONG           Length;
+//	  HANDLE          RootDirectory;
+//	  PUNICODE_STRING ObjectName;
+//	  ULONG           Attributes;
+//	  PVOID           SecurityDescriptor;
+//	  PVOID           SecurityQualityOfService;
+//	} OBJECT_ATTRIBUTES;
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes
+type objectAttributes struct {
+	Length             uintptr
+	RootDirectory      uintptr
+	ObjectName         *unicodeString
+	Attributes         uintptr
+	SecurityDescriptor *securityDescriptor
+	SecurityQoS        uintptr
+}
+
+type unicodeString struct {
+	Length        uint16
+	MaximumLength uint16
+	Buffer        uintptr
+}
+
+//	typedef struct _SECURITY_DESCRIPTOR {
+//	  BYTE                        Revision;
+//	  BYTE                        Sbz1;
+//	  SECURITY_DESCRIPTOR_CONTROL Control;
+//	  PSID                        Owner;
+//	  PSID                        Group;
+//	  PACL                        Sacl;
+//	  PACL                        Dacl;
+//	} SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR;
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor
+type securityDescriptor struct {
+	Revision byte
+	Sbz1     byte
+	Control  uint16
+	Owner    uintptr
+	Group    uintptr
+	Sacl     uintptr //revive:disable-line:var-naming SACL, not Sacl
+	Dacl     uintptr //revive:disable-line:var-naming DACL, not Dacl
+}
+
+type ntStatus int32
+
+func (status ntStatus) Err() error {
+	if status >= 0 {
+		return nil
+	}
+	return rtlNtStatusToDosError(status)
+}
+
+var (
+	// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
+	ErrPipeListenerClosed = net.ErrClosed
+
+	errPipeWriteClosed = errors.New("pipe has been closed for write")
+)
+
+type win32Pipe struct {
+	*win32File
+	path string
+}
+
+var _ PipeConn = (*win32Pipe)(nil)
+
+type win32MessageBytePipe struct {
+	win32Pipe
+	writeClosed bool
+	readEOF     bool
+}
+
+type pipeAddress string
+
+func (f *win32Pipe) LocalAddr() net.Addr {
+	return pipeAddress(f.path)
+}
+
+func (f *win32Pipe) RemoteAddr() net.Addr {
+	return pipeAddress(f.path)
+}
+
+func (f *win32Pipe) SetDeadline(t time.Time) error {
+	if err := f.SetReadDeadline(t); err != nil {
+		return err
+	}
+	return f.SetWriteDeadline(t)
+}
+
+func (f *win32Pipe) Disconnect() error {
+	return disconnectNamedPipe(f.win32File.handle)
+}
+
+// CloseWrite closes the write side of a message pipe in byte mode.
+func (f *win32MessageBytePipe) CloseWrite() error {
+	if f.writeClosed {
+		return errPipeWriteClosed
+	}
+	err := f.win32File.Flush()
+	if err != nil {
+		return err
+	}
+	_, err = f.win32File.Write(nil)
+	if err != nil {
+		return err
+	}
+	f.writeClosed = true
+	return nil
+}
+
+// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
+// they are used to implement CloseWrite().
+func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
+	if f.writeClosed {
+		return 0, errPipeWriteClosed
+	}
+	if len(b) == 0 {
+		return 0, nil
+	}
+	return f.win32File.Write(b)
+}
+
+// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
+// mode pipe will return io.EOF, as will all subsequent reads.
+func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
+	if f.readEOF {
+		return 0, io.EOF
+	}
+	n, err := f.win32File.Read(b)
+	if err == io.EOF { //nolint:errorlint
+		// If this was the result of a zero-byte read, then
+		// it is possible that the read was due to a zero-size
+		// message. Since we are simulating CloseWrite with a
+		// zero-byte message, ensure that all future Read() calls
+		// also return EOF.
+		f.readEOF = true
+	} else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
+		// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
+		// and the message still has more bytes. Treat this as a success, since
+		// this package presents all named pipes as byte streams.
+		err = nil
+	}
+	return n, err
+}
+
+func (pipeAddress) Network() string {
+	return "pipe"
+}
+
+func (s pipeAddress) String() string {
+	return string(s)
+}
+
+// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
+func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) {
+	for {
+		select {
+		case <-ctx.Done():
+			return windows.Handle(0), ctx.Err()
+		default:
+			h, err := fs.CreateFile(*path,
+				access,
+				0,   // mode
+				nil, // security attributes
+				fs.OPEN_EXISTING,
+				fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel),
+				0, // template file handle
+			)
+			if err == nil {
+				return h, nil
+			}
+			if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno
+				return h, &os.PathError{Err: err, Op: "open", Path: *path}
+			}
+			// Wait 10 msec and try again. This is a rather simplistic
+			// view, as we always try each 10 milliseconds.
+			time.Sleep(10 * time.Millisecond)
+		}
+	}
+}
+
+// DialPipe connects to a named pipe by path, timing out if the connection
+// takes longer than the specified duration. If timeout is nil, then we use
+// a default timeout of 2 seconds.  (We do not use WaitNamedPipe.)
+func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
+	var absTimeout time.Time
+	if timeout != nil {
+		absTimeout = time.Now().Add(*timeout)
+	} else {
+		absTimeout = time.Now().Add(2 * time.Second)
+	}
+	ctx, cancel := context.WithDeadline(context.Background(), absTimeout)
+	defer cancel()
+	conn, err := DialPipeContext(ctx, path)
+	if errors.Is(err, context.DeadlineExceeded) {
+		return nil, ErrTimeout
+	}
+	return conn, err
+}
+
+// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
+// cancellation or timeout.
+func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
+	return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE))
+}
+
+// PipeImpLevel is an enumeration of impersonation levels that may be set
+// when calling DialPipeAccessImpersonation.
+type PipeImpLevel uint32
+
+const (
+	PipeImpLevelAnonymous      = PipeImpLevel(fs.SECURITY_ANONYMOUS)
+	PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION)
+	PipeImpLevelImpersonation  = PipeImpLevel(fs.SECURITY_IMPERSONATION)
+	PipeImpLevelDelegation     = PipeImpLevel(fs.SECURITY_DELEGATION)
+)
+
+// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
+// cancellation or timeout.
+func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
+	return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous)
+}
+
+// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with
+// `access` at `impLevel` until `ctx` cancellation or timeout. The other
+// DialPipe* implementations use PipeImpLevelAnonymous.
+func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) {
+	var err error
+	var h windows.Handle
+	h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel)
+	if err != nil {
+		return nil, err
+	}
+
+	var flags uint32
+	err = getNamedPipeInfo(h, &flags, nil, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	f, err := makeWin32File(h)
+	if err != nil {
+		windows.Close(h)
+		return nil, err
+	}
+
+	// If the pipe is in message mode, return a message byte pipe, which
+	// supports CloseWrite().
+	if flags&windows.PIPE_TYPE_MESSAGE != 0 {
+		return &win32MessageBytePipe{
+			win32Pipe: win32Pipe{win32File: f, path: path},
+		}, nil
+	}
+	return &win32Pipe{win32File: f, path: path}, nil
+}
+
+type acceptResponse struct {
+	f   *win32File
+	err error
+}
+
+type win32PipeListener struct {
+	firstHandle windows.Handle
+	path        string
+	config      PipeConfig
+	acceptCh    chan (chan acceptResponse)
+	closeCh     chan int
+	doneCh      chan int
+}
+
+func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) {
+	path16, err := windows.UTF16FromString(path)
+	if err != nil {
+		return 0, &os.PathError{Op: "open", Path: path, Err: err}
+	}
+
+	var oa objectAttributes
+	oa.Length = unsafe.Sizeof(oa)
+
+	var ntPath unicodeString
+	if err := rtlDosPathNameToNtPathName(&path16[0],
+		&ntPath,
+		0,
+		0,
+	).Err(); err != nil {
+		return 0, &os.PathError{Op: "open", Path: path, Err: err}
+	}
+	defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck
+	oa.ObjectName = &ntPath
+	oa.Attributes = windows.OBJ_CASE_INSENSITIVE
+
+	// The security descriptor is only needed for the first pipe.
+	if first {
+		if sd != nil {
+			//todo: does `sdb` need to be allocated on the heap, or can go allocate it?
+			l := uint32(len(sd))
+			sdb, err := windows.LocalAlloc(0, l)
+			if err != nil {
+				return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err)
+			}
+			defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck
+			copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
+			oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
+		} else {
+			// Construct the default named pipe security descriptor.
+			var dacl uintptr
+			if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
+				return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
+			}
+			defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck
+
+			sdb := &securityDescriptor{
+				Revision: 1,
+				Control:  windows.SE_DACL_PRESENT,
+				Dacl:     dacl,
+			}
+			oa.SecurityDescriptor = sdb
+		}
+	}
+
+	typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS)
+	if c.MessageMode {
+		typ |= windows.FILE_PIPE_MESSAGE_TYPE
+	}
+
+	disposition := fs.FILE_OPEN
+	access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE
+	if first {
+		disposition = fs.FILE_CREATE
+		// By not asking for read or write access, the named pipe file system
+		// will put this pipe into an initially disconnected state, blocking
+		// client connections until the next call with first == false.
+		access = fs.SYNCHRONIZE
+	}
+
+	timeout := int64(-50 * 10000) // 50ms
+
+	var (
+		h    windows.Handle
+		iosb ioStatusBlock
+	)
+	err = ntCreateNamedPipeFile(&h,
+		access,
+		&oa,
+		&iosb,
+		fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE,
+		disposition,
+		0,
+		typ,
+		0,
+		0,
+		0xffffffff,
+		uint32(c.InputBufferSize),
+		uint32(c.OutputBufferSize),
+		&timeout).Err()
+	if err != nil {
+		return 0, &os.PathError{Op: "open", Path: path, Err: err}
+	}
+
+	runtime.KeepAlive(ntPath)
+	return h, nil
+}
+
+func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
+	h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
+	if err != nil {
+		return nil, err
+	}
+	f, err := makeWin32File(h)
+	if err != nil {
+		windows.Close(h)
+		return nil, err
+	}
+	return f, nil
+}
+
+func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
+	p, err := l.makeServerPipe()
+	if err != nil {
+		return nil, err
+	}
+
+	// Wait for the client to connect.
+	ch := make(chan error)
+	go func(p *win32File) {
+		ch <- connectPipe(p)
+	}(p)
+
+	select {
+	case err = <-ch:
+		if err != nil {
+			p.Close()
+			p = nil
+		}
+	case <-l.closeCh:
+		// Abort the connect request by closing the handle.
+		p.Close()
+		p = nil
+		err = <-ch
+		if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno
+			err = ErrPipeListenerClosed
+		}
+	}
+	return p, err
+}
+
+func (l *win32PipeListener) listenerRoutine() {
+	closed := false
+	for !closed {
+		select {
+		case <-l.closeCh:
+			closed = true
+		case responseCh := <-l.acceptCh:
+			var (
+				p   *win32File
+				err error
+			)
+			for {
+				p, err = l.makeConnectedServerPipe()
+				// If the connection was immediately closed by the client, try
+				// again.
+				if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno
+					break
+				}
+			}
+			responseCh <- acceptResponse{p, err}
+			closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
+		}
+	}
+	windows.Close(l.firstHandle)
+	l.firstHandle = 0
+	// Notify Close() and Accept() callers that the handle has been closed.
+	close(l.doneCh)
+}
+
+// PipeConfig contain configuration for the pipe listener.
+type PipeConfig struct {
+	// SecurityDescriptor contains a Windows security descriptor in SDDL format.
+	SecurityDescriptor string
+
+	// MessageMode determines whether the pipe is in byte or message mode. In either
+	// case the pipe is read in byte mode by default. The only practical difference in
+	// this implementation is that CloseWrite() is only supported for message mode pipes;
+	// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
+	// transferred to the reader (and returned as io.EOF in this implementation)
+	// when the pipe is in message mode.
+	MessageMode bool
+
+	// InputBufferSize specifies the size of the input buffer, in bytes.
+	InputBufferSize int32
+
+	// OutputBufferSize specifies the size of the output buffer, in bytes.
+	OutputBufferSize int32
+}
+
+// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
+// The pipe must not already exist.
+func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
+	var (
+		sd  []byte
+		err error
+	)
+	if c == nil {
+		c = &PipeConfig{}
+	}
+	if c.SecurityDescriptor != "" {
+		sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
+		if err != nil {
+			return nil, err
+		}
+	}
+	h, err := makeServerPipeHandle(path, sd, c, true)
+	if err != nil {
+		return nil, err
+	}
+	l := &win32PipeListener{
+		firstHandle: h,
+		path:        path,
+		config:      *c,
+		acceptCh:    make(chan (chan acceptResponse)),
+		closeCh:     make(chan int),
+		doneCh:      make(chan int),
+	}
+	go l.listenerRoutine()
+	return l, nil
+}
+
+func connectPipe(p *win32File) error {
+	c, err := p.prepareIO()
+	if err != nil {
+		return err
+	}
+	defer p.wg.Done()
+
+	err = connectNamedPipe(p.handle, &c.o)
+	_, err = p.asyncIO(c, nil, 0, err)
+	if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno
+		return err
+	}
+	return nil
+}
+
+func (l *win32PipeListener) Accept() (net.Conn, error) {
+	ch := make(chan acceptResponse)
+	select {
+	case l.acceptCh <- ch:
+		response := <-ch
+		err := response.err
+		if err != nil {
+			return nil, err
+		}
+		if l.config.MessageMode {
+			return &win32MessageBytePipe{
+				win32Pipe: win32Pipe{win32File: response.f, path: l.path},
+			}, nil
+		}
+		return &win32Pipe{win32File: response.f, path: l.path}, nil
+	case <-l.doneCh:
+		return nil, ErrPipeListenerClosed
+	}
+}
+
+func (l *win32PipeListener) Close() error {
+	select {
+	case l.closeCh <- 1:
+		<-l.doneCh
+	case <-l.doneCh:
+	}
+	return nil
+}
+
+func (l *win32PipeListener) Addr() net.Addr {
+	return pipeAddress(l.path)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,232 @@
+// Package guid provides a GUID type. The backing structure for a GUID is
+// identical to that used by the golang.org/x/sys/windows GUID type.
+// There are two main binary encodings used for a GUID, the big-endian encoding,
+// and the Windows (mixed-endian) encoding. See here for details:
+// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
+package guid
+
+import (
+	"crypto/rand"
+	"crypto/sha1" //nolint:gosec // not used for secure application
+	"encoding"
+	"encoding/binary"
+	"fmt"
+	"strconv"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment
+
+// Variant specifies which GUID variant (or "type") of the GUID. It determines
+// how the entirety of the rest of the GUID is interpreted.
+type Variant uint8
+
+// The variants specified by RFC 4122 section 4.1.1.
+const (
+	// VariantUnknown specifies a GUID variant which does not conform to one of
+	// the variant encodings specified in RFC 4122.
+	VariantUnknown Variant = iota
+	VariantNCS
+	VariantRFC4122 // RFC 4122
+	VariantMicrosoft
+	VariantFuture
+)
+
+// Version specifies how the bits in the GUID were generated. For instance, a
+// version 4 GUID is randomly generated, and a version 5 is generated from the
+// hash of an input string.
+type Version uint8
+
+func (v Version) String() string {
+	return strconv.FormatUint(uint64(v), 10)
+}
+
+var _ = (encoding.TextMarshaler)(GUID{})
+var _ = (encoding.TextUnmarshaler)(&GUID{})
+
+// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
+func NewV4() (GUID, error) {
+	var b [16]byte
+	if _, err := rand.Read(b[:]); err != nil {
+		return GUID{}, err
+	}
+
+	g := FromArray(b)
+	g.setVersion(4) // Version 4 means randomly generated.
+	g.setVariant(VariantRFC4122)
+
+	return g, nil
+}
+
+// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
+// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
+// and the sample code treats it as a series of bytes, so we do the same here.
+//
+// Some implementations, such as those found on Windows, treat the name as a
+// big-endian UTF16 stream of bytes. If that is desired, the string can be
+// encoded as such before being passed to this function.
+func NewV5(namespace GUID, name []byte) (GUID, error) {
+	b := sha1.New() //nolint:gosec // not used for secure application
+	namespaceBytes := namespace.ToArray()
+	b.Write(namespaceBytes[:])
+	b.Write(name)
+
+	a := [16]byte{}
+	copy(a[:], b.Sum(nil))
+
+	g := FromArray(a)
+	g.setVersion(5) // Version 5 means generated from a string.
+	g.setVariant(VariantRFC4122)
+
+	return g, nil
+}
+
+func fromArray(b [16]byte, order binary.ByteOrder) GUID {
+	var g GUID
+	g.Data1 = order.Uint32(b[0:4])
+	g.Data2 = order.Uint16(b[4:6])
+	g.Data3 = order.Uint16(b[6:8])
+	copy(g.Data4[:], b[8:16])
+	return g
+}
+
+func (g GUID) toArray(order binary.ByteOrder) [16]byte {
+	b := [16]byte{}
+	order.PutUint32(b[0:4], g.Data1)
+	order.PutUint16(b[4:6], g.Data2)
+	order.PutUint16(b[6:8], g.Data3)
+	copy(b[8:16], g.Data4[:])
+	return b
+}
+
+// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
+func FromArray(b [16]byte) GUID {
+	return fromArray(b, binary.BigEndian)
+}
+
+// ToArray returns an array of 16 bytes representing the GUID in big-endian
+// encoding.
+func (g GUID) ToArray() [16]byte {
+	return g.toArray(binary.BigEndian)
+}
+
+// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
+func FromWindowsArray(b [16]byte) GUID {
+	return fromArray(b, binary.LittleEndian)
+}
+
+// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
+// encoding.
+func (g GUID) ToWindowsArray() [16]byte {
+	return g.toArray(binary.LittleEndian)
+}
+
+func (g GUID) String() string {
+	return fmt.Sprintf(
+		"%08x-%04x-%04x-%04x-%012x",
+		g.Data1,
+		g.Data2,
+		g.Data3,
+		g.Data4[:2],
+		g.Data4[2:])
+}
+
+// FromString parses a string containing a GUID and returns the GUID. The only
+// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
+// format.
+func FromString(s string) (GUID, error) {
+	if len(s) != 36 {
+		return GUID{}, fmt.Errorf("invalid GUID %q", s)
+	}
+	if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+		return GUID{}, fmt.Errorf("invalid GUID %q", s)
+	}
+
+	var g GUID
+
+	data1, err := strconv.ParseUint(s[0:8], 16, 32)
+	if err != nil {
+		return GUID{}, fmt.Errorf("invalid GUID %q", s)
+	}
+	g.Data1 = uint32(data1)
+
+	data2, err := strconv.ParseUint(s[9:13], 16, 16)
+	if err != nil {
+		return GUID{}, fmt.Errorf("invalid GUID %q", s)
+	}
+	g.Data2 = uint16(data2)
+
+	data3, err := strconv.ParseUint(s[14:18], 16, 16)
+	if err != nil {
+		return GUID{}, fmt.Errorf("invalid GUID %q", s)
+	}
+	g.Data3 = uint16(data3)
+
+	for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
+		v, err := strconv.ParseUint(s[x:x+2], 16, 8)
+		if err != nil {
+			return GUID{}, fmt.Errorf("invalid GUID %q", s)
+		}
+		g.Data4[i] = uint8(v)
+	}
+
+	return g, nil
+}
+
+func (g *GUID) setVariant(v Variant) {
+	d := g.Data4[0]
+	switch v {
+	case VariantNCS:
+		d = (d & 0x7f)
+	case VariantRFC4122:
+		d = (d & 0x3f) | 0x80
+	case VariantMicrosoft:
+		d = (d & 0x1f) | 0xc0
+	case VariantFuture:
+		d = (d & 0x0f) | 0xe0
+	case VariantUnknown:
+		fallthrough
+	default:
+		panic(fmt.Sprintf("invalid variant: %d", v))
+	}
+	g.Data4[0] = d
+}
+
+// Variant returns the GUID variant, as defined in RFC 4122.
+func (g GUID) Variant() Variant {
+	b := g.Data4[0]
+	if b&0x80 == 0 {
+		return VariantNCS
+	} else if b&0xc0 == 0x80 {
+		return VariantRFC4122
+	} else if b&0xe0 == 0xc0 {
+		return VariantMicrosoft
+	} else if b&0xe0 == 0xe0 {
+		return VariantFuture
+	}
+	return VariantUnknown
+}
+
+func (g *GUID) setVersion(v Version) {
+	g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
+}
+
+// Version returns the GUID version, as defined in RFC 4122.
+func (g GUID) Version() Version {
+	return Version((g.Data3 & 0xF000) >> 12)
+}
+
+// MarshalText returns the textual representation of the GUID.
+func (g GUID) MarshalText() ([]byte, error) {
+	return []byte(g.String()), nil
+}
+
+// UnmarshalText takes the textual representation of a GUID, and unmarhals it
+// into this GUID.
+func (g *GUID) UnmarshalText(text []byte) error {
+	g2, err := FromString(string(text))
+	if err != nil {
+		return err
+	}
+	*g = g2
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,16 @@
+//go:build !windows
+// +build !windows
+
+package guid
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type as that is only available to builds
+// targeted at `windows`. The representation matches that used by native Windows
+// code.
+type GUID struct {
+	Data1 uint32
+	Data2 uint16
+	Data3 uint16
+	Data4 [8]byte
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,13 @@
+//go:build windows
+// +build windows
+
+package guid
+
+import "golang.org/x/sys/windows"
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type so that stringification and
+// marshaling can be supported. The representation matches that used by native
+// Windows code.
+type GUID windows.GUID
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT.
+
+package guid
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[VariantUnknown-0]
+	_ = x[VariantNCS-1]
+	_ = x[VariantRFC4122-2]
+	_ = x[VariantMicrosoft-3]
+	_ = x[VariantFuture-4]
+}
+
+const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture"
+
+var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33}
+
+func (i Variant) String() string {
+	if i >= Variant(len(_Variant_index)-1) {
+		return "Variant(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _Variant_name[_Variant_index[i]:_Variant_index[i+1]]
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/privilege.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/privilege.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/privilege.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/privilege.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,196 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"runtime"
+	"sync"
+	"unicode/utf16"
+
+	"golang.org/x/sys/windows"
+)
+
+//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
+//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
+//sys revertToSelf() (err error) = advapi32.RevertToSelf
+//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
+//sys getCurrentThread() (h windows.Handle) = GetCurrentThread
+//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
+//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
+//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
+
+const (
+	//revive:disable-next-line:var-naming ALL_CAPS
+	SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
+
+	//revive:disable-next-line:var-naming ALL_CAPS
+	ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED
+
+	SeBackupPrivilege   = "SeBackupPrivilege"
+	SeRestorePrivilege  = "SeRestorePrivilege"
+	SeSecurityPrivilege = "SeSecurityPrivilege"
+)
+
+var (
+	privNames     = make(map[string]uint64)
+	privNameMutex sync.Mutex
+)
+
+// PrivilegeError represents an error enabling privileges.
+type PrivilegeError struct {
+	privileges []uint64
+}
+
+func (e *PrivilegeError) Error() string {
+	s := "Could not enable privilege "
+	if len(e.privileges) > 1 {
+		s = "Could not enable privileges "
+	}
+	for i, p := range e.privileges {
+		if i != 0 {
+			s += ", "
+		}
+		s += `"`
+		s += getPrivilegeName(p)
+		s += `"`
+	}
+	return s
+}
+
+// RunWithPrivilege enables a single privilege for a function call.
+func RunWithPrivilege(name string, fn func() error) error {
+	return RunWithPrivileges([]string{name}, fn)
+}
+
+// RunWithPrivileges enables privileges for a function call.
+func RunWithPrivileges(names []string, fn func() error) error {
+	privileges, err := mapPrivileges(names)
+	if err != nil {
+		return err
+	}
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+	token, err := newThreadToken()
+	if err != nil {
+		return err
+	}
+	defer releaseThreadToken(token)
+	err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
+	if err != nil {
+		return err
+	}
+	return fn()
+}
+
+func mapPrivileges(names []string) ([]uint64, error) {
+	privileges := make([]uint64, 0, len(names))
+	privNameMutex.Lock()
+	defer privNameMutex.Unlock()
+	for _, name := range names {
+		p, ok := privNames[name]
+		if !ok {
+			err := lookupPrivilegeValue("", name, &p)
+			if err != nil {
+				return nil, err
+			}
+			privNames[name] = p
+		}
+		privileges = append(privileges, p)
+	}
+	return privileges, nil
+}
+
+// EnableProcessPrivileges enables privileges globally for the process.
+func EnableProcessPrivileges(names []string) error {
+	return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
+}
+
+// DisableProcessPrivileges disables privileges globally for the process.
+func DisableProcessPrivileges(names []string) error {
+	return enableDisableProcessPrivilege(names, 0)
+}
+
+func enableDisableProcessPrivilege(names []string, action uint32) error {
+	privileges, err := mapPrivileges(names)
+	if err != nil {
+		return err
+	}
+
+	p := windows.CurrentProcess()
+	var token windows.Token
+	err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
+	if err != nil {
+		return err
+	}
+
+	defer token.Close()
+	return adjustPrivileges(token, privileges, action)
+}
+
+func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
+	var b bytes.Buffer
+	_ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
+	for _, p := range privileges {
+		_ = binary.Write(&b, binary.LittleEndian, p)
+		_ = binary.Write(&b, binary.LittleEndian, action)
+	}
+	prevState := make([]byte, b.Len())
+	reqSize := uint32(0)
+	success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
+	if !success {
+		return err
+	}
+	if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno
+		return &PrivilegeError{privileges}
+	}
+	return nil
+}
+
+func getPrivilegeName(luid uint64) string {
+	var nameBuffer [256]uint16
+	bufSize := uint32(len(nameBuffer))
+	err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
+	if err != nil {
+		return fmt.Sprintf("<unknown privilege %d>", luid)
+	}
+
+	var displayNameBuffer [256]uint16
+	displayBufSize := uint32(len(displayNameBuffer))
+	var langID uint32
+	err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
+	if err != nil {
+		return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
+	}
+
+	return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
+}
+
+func newThreadToken() (windows.Token, error) {
+	err := impersonateSelf(windows.SecurityImpersonation)
+	if err != nil {
+		return 0, err
+	}
+
+	var token windows.Token
+	err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token)
+	if err != nil {
+		rerr := revertToSelf()
+		if rerr != nil {
+			panic(rerr)
+		}
+		return 0, err
+	}
+	return token, nil
+}
+
+func releaseThreadToken(h windows.Token) {
+	err := revertToSelf()
+	if err != nil {
+		panic(err)
+	}
+	h.Close()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/reparse.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/reparse.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/reparse.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/reparse.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,131 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"strings"
+	"unicode/utf16"
+	"unsafe"
+)
+
+const (
+	reparseTagMountPoint = 0xA0000003
+	reparseTagSymlink    = 0xA000000C
+)
+
+type reparseDataBuffer struct {
+	ReparseTag           uint32
+	ReparseDataLength    uint16
+	Reserved             uint16
+	SubstituteNameOffset uint16
+	SubstituteNameLength uint16
+	PrintNameOffset      uint16
+	PrintNameLength      uint16
+}
+
+// ReparsePoint describes a Win32 symlink or mount point.
+type ReparsePoint struct {
+	Target       string
+	IsMountPoint bool
+}
+
+// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
+// mount point reparse point.
+type UnsupportedReparsePointError struct {
+	Tag uint32
+}
+
+func (e *UnsupportedReparsePointError) Error() string {
+	return fmt.Sprintf("unsupported reparse point %x", e.Tag)
+}
+
+// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
+// or a mount point.
+func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
+	tag := binary.LittleEndian.Uint32(b[0:4])
+	return DecodeReparsePointData(tag, b[8:])
+}
+
+func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
+	isMountPoint := false
+	switch tag {
+	case reparseTagMountPoint:
+		isMountPoint = true
+	case reparseTagSymlink:
+	default:
+		return nil, &UnsupportedReparsePointError{tag}
+	}
+	nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
+	if !isMountPoint {
+		nameOffset += 4
+	}
+	nameLength := binary.LittleEndian.Uint16(b[6:8])
+	name := make([]uint16, nameLength/2)
+	err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
+	if err != nil {
+		return nil, err
+	}
+	return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
+}
+
+func isDriveLetter(c byte) bool {
+	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
+// mount point.
+func EncodeReparsePoint(rp *ReparsePoint) []byte {
+	// Generate an NT path and determine if this is a relative path.
+	var ntTarget string
+	relative := false
+	if strings.HasPrefix(rp.Target, `\\?\`) {
+		ntTarget = `\??\` + rp.Target[4:]
+	} else if strings.HasPrefix(rp.Target, `\\`) {
+		ntTarget = `\??\UNC\` + rp.Target[2:]
+	} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
+		ntTarget = `\??\` + rp.Target
+	} else {
+		ntTarget = rp.Target
+		relative = true
+	}
+
+	// The paths must be NUL-terminated even though they are counted strings.
+	target16 := utf16.Encode([]rune(rp.Target + "\x00"))
+	ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
+
+	size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
+	size += len(ntTarget16)*2 + len(target16)*2
+
+	tag := uint32(reparseTagMountPoint)
+	if !rp.IsMountPoint {
+		tag = reparseTagSymlink
+		size += 4 // Add room for symlink flags
+	}
+
+	data := reparseDataBuffer{
+		ReparseTag:           tag,
+		ReparseDataLength:    uint16(size),
+		SubstituteNameOffset: 0,
+		SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
+		PrintNameOffset:      uint16(len(ntTarget16) * 2),
+		PrintNameLength:      uint16((len(target16) - 1) * 2),
+	}
+
+	var b bytes.Buffer
+	_ = binary.Write(&b, binary.LittleEndian, &data)
+	if !rp.IsMountPoint {
+		flags := uint32(0)
+		if relative {
+			flags |= 1
+		}
+		_ = binary.Write(&b, binary.LittleEndian, flags)
+	}
+
+	_ = binary.Write(&b, binary.LittleEndian, ntTarget16)
+	_ = binary.Write(&b, binary.LittleEndian, target16)
+	return b.Bytes()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/sd.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/sd.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/sd.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/sd.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,133 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+	"errors"
+	"fmt"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
+//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
+//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
+//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
+
+type AccountLookupError struct {
+	Name string
+	Err  error
+}
+
+func (e *AccountLookupError) Error() string {
+	if e.Name == "" {
+		return "lookup account: empty account name specified"
+	}
+	var s string
+	switch {
+	case errors.Is(e.Err, windows.ERROR_INVALID_SID):
+		s = "the security ID structure is invalid"
+	case errors.Is(e.Err, windows.ERROR_NONE_MAPPED):
+		s = "not found"
+	default:
+		s = e.Err.Error()
+	}
+	return "lookup account " + e.Name + ": " + s
+}
+
+func (e *AccountLookupError) Unwrap() error { return e.Err }
+
+type SddlConversionError struct {
+	Sddl string
+	Err  error
+}
+
+func (e *SddlConversionError) Error() string {
+	return "convert " + e.Sddl + ": " + e.Err.Error()
+}
+
+func (e *SddlConversionError) Unwrap() error { return e.Err }
+
+// LookupSidByName looks up the SID of an account by name
+//
+//revive:disable-next-line:var-naming SID, not Sid
+func LookupSidByName(name string) (sid string, err error) {
+	if name == "" {
+		return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED}
+	}
+
+	var sidSize, sidNameUse, refDomainSize uint32
+	err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
+	if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
+		return "", &AccountLookupError{name, err}
+	}
+	sidBuffer := make([]byte, sidSize)
+	refDomainBuffer := make([]uint16, refDomainSize)
+	err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
+	if err != nil {
+		return "", &AccountLookupError{name, err}
+	}
+	var strBuffer *uint16
+	err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
+	if err != nil {
+		return "", &AccountLookupError{name, err}
+	}
+	sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
+	_, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer)))
+	return sid, nil
+}
+
+// LookupNameBySid looks up the name of an account by SID
+//
+//revive:disable-next-line:var-naming SID, not Sid
+func LookupNameBySid(sid string) (name string, err error) {
+	if sid == "" {
+		return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED}
+	}
+
+	sidBuffer, err := windows.UTF16PtrFromString(sid)
+	if err != nil {
+		return "", &AccountLookupError{sid, err}
+	}
+
+	var sidPtr *byte
+	if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
+		return "", &AccountLookupError{sid, err}
+	}
+	defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck
+
+	var nameSize, refDomainSize, sidNameUse uint32
+	err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
+	if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
+		return "", &AccountLookupError{sid, err}
+	}
+
+	nameBuffer := make([]uint16, nameSize)
+	refDomainBuffer := make([]uint16, refDomainSize)
+	err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
+	if err != nil {
+		return "", &AccountLookupError{sid, err}
+	}
+
+	name = windows.UTF16ToString(nameBuffer)
+	return name, nil
+}
+
+func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
+	sd, err := windows.SecurityDescriptorFromString(sddl)
+	if err != nil {
+		return nil, &SddlConversionError{Sddl: sddl, Err: err}
+	}
+	b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
+	return b, nil
+}
+
+func SecurityDescriptorToSddl(sd []byte) (string, error) {
+	if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l {
+		return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE)
+	}
+	s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0]))
+	return s.String(), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/syscall.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/syscall.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/syscall.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/syscall.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,5 @@
+//go:build windows
+
+package winio
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
diff -pruN 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
--- 0.19.3+ds1-4/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,378 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package winio
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+	errERROR_EINVAL     error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return errERROR_EINVAL
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	return e
+}
+
+var (
+	modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+	modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+	modntdll    = windows.NewLazySystemDLL("ntdll.dll")
+	modws2_32   = windows.NewLazySystemDLL("ws2_32.dll")
+
+	procAdjustTokenPrivileges              = modadvapi32.NewProc("AdjustTokenPrivileges")
+	procConvertSidToStringSidW             = modadvapi32.NewProc("ConvertSidToStringSidW")
+	procConvertStringSidToSidW             = modadvapi32.NewProc("ConvertStringSidToSidW")
+	procImpersonateSelf                    = modadvapi32.NewProc("ImpersonateSelf")
+	procLookupAccountNameW                 = modadvapi32.NewProc("LookupAccountNameW")
+	procLookupAccountSidW                  = modadvapi32.NewProc("LookupAccountSidW")
+	procLookupPrivilegeDisplayNameW        = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
+	procLookupPrivilegeNameW               = modadvapi32.NewProc("LookupPrivilegeNameW")
+	procLookupPrivilegeValueW              = modadvapi32.NewProc("LookupPrivilegeValueW")
+	procOpenThreadToken                    = modadvapi32.NewProc("OpenThreadToken")
+	procRevertToSelf                       = modadvapi32.NewProc("RevertToSelf")
+	procBackupRead                         = modkernel32.NewProc("BackupRead")
+	procBackupWrite                        = modkernel32.NewProc("BackupWrite")
+	procCancelIoEx                         = modkernel32.NewProc("CancelIoEx")
+	procConnectNamedPipe                   = modkernel32.NewProc("ConnectNamedPipe")
+	procCreateIoCompletionPort             = modkernel32.NewProc("CreateIoCompletionPort")
+	procCreateNamedPipeW                   = modkernel32.NewProc("CreateNamedPipeW")
+	procDisconnectNamedPipe                = modkernel32.NewProc("DisconnectNamedPipe")
+	procGetCurrentThread                   = modkernel32.NewProc("GetCurrentThread")
+	procGetNamedPipeHandleStateW           = modkernel32.NewProc("GetNamedPipeHandleStateW")
+	procGetNamedPipeInfo                   = modkernel32.NewProc("GetNamedPipeInfo")
+	procGetQueuedCompletionStatus          = modkernel32.NewProc("GetQueuedCompletionStatus")
+	procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
+	procNtCreateNamedPipeFile              = modntdll.NewProc("NtCreateNamedPipeFile")
+	procRtlDefaultNpAcl                    = modntdll.NewProc("RtlDefaultNpAcl")
+	procRtlDosPathNameToNtPathName_U       = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
+	procRtlNtStatusToDosErrorNoTeb         = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
+	procWSAGetOverlappedResult             = modws2_32.NewProc("WSAGetOverlappedResult")
+)
+
+func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
+	var _p0 uint32
+	if releaseAll {
+		_p0 = 1
+	}
+	r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
+	success = r0 != 0
+	if true {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func convertSidToStringSid(sid *byte, str **uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func convertStringSidToSid(str *uint16, sid **byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func impersonateSelf(level uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(accountName)
+	if err != nil {
+		return
+	}
+	return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
+}
+
+func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(systemName)
+	if err != nil {
+		return
+	}
+	return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
+}
+
+func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(systemName)
+	if err != nil {
+		return
+	}
+	return _lookupPrivilegeName(_p0, luid, buffer, size)
+}
+
+func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(systemName)
+	if err != nil {
+		return
+	}
+	var _p1 *uint16
+	_p1, err = syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return
+	}
+	return _lookupPrivilegeValue(_p0, _p1, luid)
+}
+
+func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
+	var _p0 uint32
+	if openAsSelf {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func revertToSelf() (err error) {
+	r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+	var _p0 *byte
+	if len(b) > 0 {
+		_p0 = &b[0]
+	}
+	var _p1 uint32
+	if abort {
+		_p1 = 1
+	}
+	var _p2 uint32
+	if processSecurity {
+		_p2 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+	var _p0 *byte
+	if len(b) > 0 {
+		_p0 = &b[0]
+	}
+	var _p1 uint32
+	if abort {
+		_p1 = 1
+	}
+	var _p2 uint32
+	if processSecurity {
+		_p2 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount))
+	newport = windows.Handle(r0)
+	if newport == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return
+	}
+	return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
+}
+
+func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
+	handle = windows.Handle(r0)
+	if handle == windows.InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func disconnectNamedPipe(pipe windows.Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getCurrentThread() (h windows.Handle) {
+	r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr())
+	h = windows.Handle(r0)
+	return
+}
+
+func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
+	r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
+	status = ntStatus(r0)
+	return
+}
+
+func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
+	r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl)))
+	status = ntStatus(r0)
+	return
+}
+
+func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
+	r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved))
+	status = ntStatus(r0)
+	return
+}
+
+func rtlNtStatusToDosError(status ntStatus) (winerr error) {
+	r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status))
+	if r0 != 0 {
+		winerr = syscall.Errno(r0)
+	}
+	return
+}
+
+func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
+	var _p0 uint32
+	if wait {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/.gitignore 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/.gitignore
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/.gitignore	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/.gitignore	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,53 @@
+# Ignore docs files
+_gh_pages
+_site
+
+# Ignore temporary files
+README.html
+coverage.out
+.tmp
+
+# Numerous always-ignore extensions
+*.diff
+*.err
+*.log
+*.orig
+*.rej
+*.swo
+*.swp
+*.vi
+*.zip
+*~
+
+# OS or Editor folders
+._*
+.cache
+.DS_Store
+.idea
+.project
+.settings
+.tmproj
+*.esproj
+*.sublime-project
+*.sublime-workspace
+nbproject
+Thumbs.db
+
+# Komodo
+.komodotools
+*.komodoproject
+
+# SCSS-Lint
+scss-lint-report.xml
+
+# grunt-contrib-sass cache
+.sass-cache
+
+# Jekyll metadata
+docs/.jekyll-metadata
+
+# Folders to ignore
+.build
+.test
+bower_components
+node_modules
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/.travis.yml 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/.travis.yml
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/.travis.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/.travis.yml	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,30 @@
+language: go
+sudo: false
+matrix:
+  fast_finish: true
+  include:
+    - go: 1.14.x
+      env: TEST_METHOD=goveralls
+    - go: 1.13.x
+    - go: 1.12.x
+    - go: 1.11.x
+    - go: 1.10.x
+    - go: tip
+    - go: 1.9.x
+    - go: 1.8.x
+    - go: 1.7.x
+    - go: 1.6.x
+    - go: 1.5.x
+  allow_failures:
+    - go: tip
+    - go: 1.11.x
+    - go: 1.10.x
+    - go: 1.9.x
+    - go: 1.8.x
+    - go: 1.7.x
+    - go: 1.6.x
+    - go: 1.5.x
+script: ./test.sh $TEST_METHOD
+notifications:
+  email:
+    on_success: never
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/DCO 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/DCO
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/DCO	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/DCO	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,36 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/LICENSE 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/MAINTAINERS 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/MAINTAINERS
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/MAINTAINERS	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/MAINTAINERS	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1 @@
+Alex Bucataru <alex@alrux.com> (@AlexBucataru)
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/NOTICE 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/NOTICE
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/NOTICE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/NOTICE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,5 @@
+Alrux Go EXTensions (AGExt) - package levenshtein
+Copyright 2016 ALRUX Inc.
+
+This product includes software developed at ALRUX Inc.
+(http://www.alrux.com/).
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/README.md 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/README.md
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,38 @@
+# A Go package for calculating the Levenshtein distance between two strings
+
+[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest)
+[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein) 
+[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein)
+[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein)
+[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein)
+
+
+This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org).
+
+## Project Status
+
+v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
+
+This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.
+
+## Overview
+
+The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
+
+A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
+
+The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
+
+The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
+
+The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
+
+## Installation
+
+```
+go get github.com/agext/levenshtein
+```
+
+## License
+
+Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/levenshtein.go 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/levenshtein.go
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/levenshtein.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/levenshtein.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,290 @@
+// Copyright 2016 ALRUX Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure.
+
+The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
+
+A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
+
+The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
+
+The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
+
+The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
+*/
+package levenshtein
+
+// Calculate determines the Levenshtein distance between two strings, using
+// the given costs for each edit operation. It returns the distance along with
+// the lengths of the longest common prefix and suffix.
+//
+// If maxCost is non-zero, the calculation stops as soon as the distance is determined
+// to be greater than maxCost. Therefore, any return value higher than maxCost is a
+// lower bound for the actual distance.
+func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) {
+	l1, l2 := len(str1), len(str2)
+	// trim common prefix, if any, as it doesn't affect the distance
+	for ; prefixLen < l1 && prefixLen < l2; prefixLen++ {
+		if str1[prefixLen] != str2[prefixLen] {
+			break
+		}
+	}
+	str1, str2 = str1[prefixLen:], str2[prefixLen:]
+	l1 -= prefixLen
+	l2 -= prefixLen
+	// trim common suffix, if any, as it doesn't affect the distance
+	for 0 < l1 && 0 < l2 {
+		if str1[l1-1] != str2[l2-1] {
+			str1, str2 = str1[:l1], str2[:l2]
+			break
+		}
+		l1--
+		l2--
+		suffixLen++
+	}
+	// if the first string is empty, the distance is the length of the second string times the cost of insertion
+	if l1 == 0 {
+		dist = l2 * insCost
+		return
+	}
+	// if the second string is empty, the distance is the length of the first string times the cost of deletion
+	if l2 == 0 {
+		dist = l1 * delCost
+		return
+	}
+
+	// variables used in inner "for" loops
+	var y, dy, c, l int
+
+	// if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited'
+	if maxCost > 0 {
+		if subCost < delCost+insCost {
+			if maxCost >= l1*subCost+(l2-l1)*insCost {
+				maxCost = 0
+			}
+		} else {
+			if maxCost >= l1*delCost+l2*insCost {
+				maxCost = 0
+			}
+		}
+	}
+
+	if maxCost > 0 {
+		// prefer the longer string first, to minimize time;
+		// a swap also transposes the meanings of insertion and deletion.
+		if l1 < l2 {
+			str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
+		}
+
+		// the length differential times cost of deletion is a lower bound for the cost;
+		// if it is higher than the maxCost, there is no point going into the main calculation.
+		if dist = (l1 - l2) * delCost; dist > maxCost {
+			return
+		}
+
+		d := make([]int, l1+1)
+
+		// offset and length of d in the current row
+		doff, dlen := 0, 1
+		for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ {
+			d[y] = dy
+			y++
+			dy = y * delCost
+		}
+		// fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
+
+		for x := 0; x < l2; x++ {
+			dy, d[doff] = d[doff], d[doff]+insCost
+			for doff < l1 && d[doff] > maxCost && dlen > 0 {
+				if str1[doff] != str2[x] {
+					dy += subCost
+				}
+				doff++
+				dlen--
+				if c = d[doff] + insCost; c < dy {
+					dy = c
+				}
+				dy, d[doff] = d[doff], dy
+			}
+			for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy {
+				if str1[y] != str2[x] {
+					dy += subCost
+				}
+				if c = d[y] + delCost; c < dy {
+					dy = c
+				}
+				y++
+				if c = d[y] + insCost; c < dy {
+					dy = c
+				}
+			}
+			if y < l1 {
+				if str1[y] != str2[x] {
+					dy += subCost
+				}
+				if c = d[y] + delCost; c < dy {
+					dy = c
+				}
+				for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy {
+					y++
+					dlen++
+				}
+			}
+			// fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
+			if dlen == 0 {
+				dist = maxCost + 1
+				return
+			}
+		}
+		if doff+dlen-1 < l1 {
+			dist = maxCost + 1
+			return
+		}
+		dist = d[l1]
+	} else {
+		// ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is
+		// worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space
+		// http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html
+
+		// prefer the shorter string first, to minimize space; time is O(l1*l2) anyway;
+		// a swap also transposes the meanings of insertion and deletion.
+		if l1 > l2 {
+			str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
+		}
+		d := make([]int, l1+1)
+
+		for y = 1; y <= l1; y++ {
+			d[y] = y * delCost
+		}
+		for x := 0; x < l2; x++ {
+			dy, d[0] = d[0], d[0]+insCost
+			for y = 0; y < l1; dy, d[y] = d[y], dy {
+				if str1[y] != str2[x] {
+					dy += subCost
+				}
+				if c = d[y] + delCost; c < dy {
+					dy = c
+				}
+				y++
+				if c = d[y] + insCost; c < dy {
+					dy = c
+				}
+			}
+		}
+		dist = d[l1]
+	}
+
+	return
+}
+
+// Distance returns the Levenshtein distance between str1 and str2, using the
+// default or provided cost values. Pass nil for the third argument to use the
+// default cost of 1 for all three operations, with no maximum.
+func Distance(str1, str2 string, p *Params) int {
+	if p == nil {
+		p = defaultParams
+	}
+	dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost)
+	return dist
+}
+
+// Similarity returns a score in the range of 0..1 for how similar the two strings are.
+// A score of 1 means the strings are identical, and 0 means they have nothing in common.
+//
+// A nil third argument uses the default cost of 1 for all three operations.
+//
+// If a non-zero MinScore value is provided in the parameters, scores lower than it
+// will be returned as 0.
+func Similarity(str1, str2 string, p *Params) float64 {
+	return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus
+}
+
+// Match returns a similarity score adjusted by the same method as proposed by Winkler for
+// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their
+// similarity score is already over a threshold.
+//
+// The score is in the range of 0..1, with 1 meaning the strings are identical,
+// and 0 meaning they have nothing in common.
+//
+// A nil third argument uses the default cost of 1 for all three operations, maximum length of
+// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7.
+//
+// If a non-zero MinScore value is provided in the parameters, scores lower than it
+// will be returned as 0.
+func Match(str1, str2 string, p *Params) float64 {
+	s1, s2 := []rune(str1), []rune(str2)
+	l1, l2 := len(s1), len(s2)
+	// two empty strings are identical; shortcut also avoids divByZero issues later on.
+	if l1 == 0 && l2 == 0 {
+		return 1
+	}
+
+	if p == nil {
+		p = defaultParams
+	}
+
+	// a min over 1 can never be satisfied, so the score is 0.
+	if p.minScore > 1 {
+		return 0
+	}
+
+	insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0
+	if l1 > l2 {
+		l1, l2, insCost, delCost = l2, l1, delCost, insCost
+	}
+
+	if p.subCost < delCost+insCost {
+		maxDist = l1*p.subCost + (l2-l1)*insCost
+	} else {
+		maxDist = l1*delCost + l2*insCost
+	}
+
+	// a zero min is always satisfied, so no need to set a max cost.
+	if p.minScore > 0 {
+		// if p.minScore is lower than p.bonusThreshold, we can use a simplified formula
+		// for the max cost, because a sim score below min cannot receive a bonus.
+		if p.minScore < p.bonusThreshold {
+			// round down the max - a cost equal to a rounded up max would already be under min.
+			max = int((1 - p.minScore) * float64(maxDist))
+		} else {
+			// p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim)
+			// p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist))
+			// p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist
+			// 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist
+			// (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist
+			max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale))
+		}
+	}
+
+	dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost)
+	if max > 0 && dist > max {
+		return 0
+	}
+	sim := 1 - float64(dist)/float64(maxDist)
+
+	if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 {
+		if pl > p.bonusPrefix {
+			pl = p.bonusPrefix
+		}
+		sim += float64(pl) * p.bonusScale * (1 - sim)
+	}
+
+	if sim < p.minScore {
+		return 0
+	}
+
+	return sim
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/params.go 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/params.go
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/params.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/params.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,152 @@
+// Copyright 2016 ALRUX Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package levenshtein
+
+// Params represents a set of parameter values for the various formulas involved
+// in the calculation of the Levenshtein string metrics.
+type Params struct {
+	insCost        int
+	subCost        int
+	delCost        int
+	maxCost        int
+	minScore       float64
+	bonusPrefix    int
+	bonusScale     float64
+	bonusThreshold float64
+}
+
+var (
+	defaultParams = NewParams()
+)
+
+// NewParams creates a new set of parameters and initializes it with the default values.
+func NewParams() *Params {
+	return &Params{
+		insCost:        1,
+		subCost:        1,
+		delCost:        1,
+		maxCost:        0,
+		minScore:       0,
+		bonusPrefix:    4,
+		bonusScale:     .1,
+		bonusThreshold: .7,
+	}
+}
+
+// Clone returns a pointer to a copy of the receiver parameter set, or of a new
+// default parameter set if the receiver is nil.
+func (p *Params) Clone() *Params {
+	if p == nil {
+		return NewParams()
+	}
+	return &Params{
+		insCost:        p.insCost,
+		subCost:        p.subCost,
+		delCost:        p.delCost,
+		maxCost:        p.maxCost,
+		minScore:       p.minScore,
+		bonusPrefix:    p.bonusPrefix,
+		bonusScale:     p.bonusScale,
+		bonusThreshold: p.bonusThreshold,
+	}
+}
+
+// InsCost overrides the default value of 1 for the cost of insertion.
+// The new value must be zero or positive.
+func (p *Params) InsCost(v int) *Params {
+	if v >= 0 {
+		p.insCost = v
+	}
+	return p
+}
+
+// SubCost overrides the default value of 1 for the cost of substitution.
+// The new value must be zero or positive.
+func (p *Params) SubCost(v int) *Params {
+	if v >= 0 {
+		p.subCost = v
+	}
+	return p
+}
+
+// DelCost overrides the default value of 1 for the cost of deletion.
+// The new value must be zero or positive.
+func (p *Params) DelCost(v int) *Params {
+	if v >= 0 {
+		p.delCost = v
+	}
+	return p
+}
+
+// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost.
+// The calculation of Distance() stops when the result is guaranteed to exceed
+// this maximum, returning a lower-bound rather than exact value.
+// The new value must be zero or positive.
+func (p *Params) MaxCost(v int) *Params {
+	if v >= 0 {
+		p.maxCost = v
+	}
+	return p
+}
+
+// MinScore overrides the default value of 0 for the minimum similarity score.
+// Scores below this threshold are returned as 0 by Similarity() and Match().
+// The new value must be zero or positive. Note that a minimum greater than 1
+// can never be satisfied, resulting in a score of 0 for any pair of strings.
+func (p *Params) MinScore(v float64) *Params {
+	if v >= 0 {
+		p.minScore = v
+	}
+	return p
+}
+
+// BonusPrefix overrides the default value for the maximum length of
+// common prefix to be considered for bonus by Match().
+// The new value must be zero or positive.
+func (p *Params) BonusPrefix(v int) *Params {
+	if v >= 0 {
+		p.bonusPrefix = v
+	}
+	return p
+}
+
+// BonusScale overrides the default value for the scaling factor used by Match()
+// in calculating the bonus.
+// The new value must be zero or positive. To guarantee that the similarity score
+// remains in the interval 0..1, this scaling factor is not allowed to exceed
+// 1 / BonusPrefix.
+func (p *Params) BonusScale(v float64) *Params {
+	if v >= 0 {
+		p.bonusScale = v
+	}
+
+	// the bonus cannot exceed (1-sim), or the score may become greater than 1.
+	if float64(p.bonusPrefix)*p.bonusScale > 1 {
+		p.bonusScale = 1 / float64(p.bonusPrefix)
+	}
+
+	return p
+}
+
+// BonusThreshold overrides the default value for the minimum similarity score
+// for which Match() can assign a bonus.
+// The new value must be zero or positive. Note that a threshold greater than 1
+// effectively makes Match() become the equivalent of Similarity().
+func (p *Params) BonusThreshold(v float64) *Params {
+	if v >= 0 {
+		p.bonusThreshold = v
+	}
+	return p
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/test.sh 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/test.sh
--- 0.19.3+ds1-4/vendor/github.com/agext/levenshtein/test.sh	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/agext/levenshtein/test.sh	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,10 @@
+set -ev
+
+if [[ "$1" == "goveralls" ]]; then
+	echo "Testing with goveralls..."
+	go get github.com/mattn/goveralls
+	$HOME/gopath/bin/goveralls -service=travis-ci
+else
+	echo "Testing with go test..."
+	go test -v ./...
+fi
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-cidr/LICENSE 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-cidr/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-cidr/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-cidr/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Martin Atkins
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,218 @@
+// Package cidr is a collection of assorted utilities for computing
+// network and host addresses within network ranges.
+//
+// It expects a CIDR-type address structure where addresses are divided into
+// some number of prefix bits representing the network and then the remaining
+// suffix bits represent the host.
+//
+// For example, it can help to calculate addresses for sub-networks of a
+// parent network, or to calculate host addresses within a particular prefix.
+//
+// At present this package is prioritizing simplicity of implementation and
+// de-prioritizing speed and memory usage. Thus caution is advised before
+// using this package in performance-critical applications or hot codepaths.
+// Patches to improve the speed and memory usage may be accepted as long as
+// they do not result in a significant increase in code complexity.
+package cidr
+
+import (
+	"fmt"
+	"math/big"
+	"net"
+)
+
+// Subnet takes a parent CIDR range and creates a subnet within it
+// with the given number of additional prefix bits and the given
+// network number.
+//
+// For example, 10.3.0.0/16, extended by 8 bits, with a network number
+// of 5, becomes 10.3.5.0/24 .
+func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) {
+	ip := base.IP
+	mask := base.Mask
+
+	parentLen, addrLen := mask.Size()
+	newPrefixLen := parentLen + newBits
+
+	if newPrefixLen > addrLen {
+		return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits)
+	}
+
+	maxNetNum := uint64(1<<uint64(newBits)) - 1
+	if uint64(num) > maxNetNum {
+		return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num)
+	}
+
+	return &net.IPNet{
+		IP:   insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen),
+		Mask: net.CIDRMask(newPrefixLen, addrLen),
+	}, nil
+}
+
+// Host takes a parent CIDR range and turns it into a host IP address with
+// the given host number.
+//
+// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2.
+func Host(base *net.IPNet, num int) (net.IP, error) {
+	ip := base.IP
+	mask := base.Mask
+	bigNum := big.NewInt(int64(num))
+
+	parentLen, addrLen := mask.Size()
+	hostLen := addrLen - parentLen
+
+	maxHostNum := big.NewInt(int64(1))
+	maxHostNum.Lsh(maxHostNum, uint(hostLen))
+	maxHostNum.Sub(maxHostNum, big.NewInt(1))
+
+	numUint64 := big.NewInt(int64(bigNum.Uint64()))
+	if bigNum.Cmp(big.NewInt(0)) == -1 {
+		numUint64.Neg(bigNum)
+		numUint64.Sub(numUint64, big.NewInt(int64(1)))
+		bigNum.Sub(maxHostNum, numUint64)
+	}
+
+	if numUint64.Cmp(maxHostNum) == 1 {
+		return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num)
+	}
+	var bitlength int
+	if ip.To4() != nil {
+		bitlength = 32
+	} else {
+		bitlength = 128
+	}
+	return insertNumIntoIP(ip, bigNum, bitlength), nil
+}
+
+// AddressRange returns the first and last addresses in the given CIDR range.
+func AddressRange(network *net.IPNet) (net.IP, net.IP) {
+	// the first IP is easy
+	firstIP := network.IP
+
+	// the last IP is the network address OR NOT the mask address
+	prefixLen, bits := network.Mask.Size()
+	if prefixLen == bits {
+		// Easy!
+		// But make sure that our two slices are distinct, since they
+		// would be in all other cases.
+		lastIP := make([]byte, len(firstIP))
+		copy(lastIP, firstIP)
+		return firstIP, lastIP
+	}
+
+	firstIPInt, bits := ipToInt(firstIP)
+	hostLen := uint(bits) - uint(prefixLen)
+	lastIPInt := big.NewInt(1)
+	lastIPInt.Lsh(lastIPInt, hostLen)
+	lastIPInt.Sub(lastIPInt, big.NewInt(1))
+	lastIPInt.Or(lastIPInt, firstIPInt)
+
+	return firstIP, intToIP(lastIPInt, bits)
+}
+
+// AddressCount returns the number of distinct host addresses within the given
+// CIDR range.
+//
+// Since the result is a uint64, this function returns meaningful information
+// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65.
+func AddressCount(network *net.IPNet) uint64 {
+	prefixLen, bits := network.Mask.Size()
+	return 1 << (uint64(bits) - uint64(prefixLen))
+}
+
+//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies
+//none of the subnets overlap and all subnets are in the supernet
+//it returns an error if any of those conditions are not satisfied
+func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error {
+	firstLastIP := make([][]net.IP, len(subnets))
+	for i, s := range subnets {
+		first, last := AddressRange(s)
+		firstLastIP[i] = []net.IP{first, last}
+	}
+	for i, s := range subnets {
+		if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) {
+			return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String())
+		}
+		for j := 0; j < len(subnets); j++ {
+			if i == j {
+				continue
+			}
+
+			first := firstLastIP[j][0]
+			last := firstLastIP[j][1]
+			if s.Contains(first) || s.Contains(last) {
+				return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String())
+			}
+		}
+	}
+	return nil
+}
+
+// PreviousSubnet returns the subnet of the desired mask in the IP space
+// just lower than the start of IPNet provided. If the IP space rolls over
+// then the second return value is true
+func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
+	startIP := checkIPv4(network.IP)
+	previousIP := make(net.IP, len(startIP))
+	copy(previousIP, startIP)
+	cMask := net.CIDRMask(prefixLen, 8*len(previousIP))
+	previousIP = Dec(previousIP)
+	previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask}
+	if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) {
+		return previous, true
+	}
+	return previous, false
+}
+
+// NextSubnet returns the next available subnet of the desired mask size
+// starting for the maximum IP of the offset subnet
+// If the IP exceeds the maxium IP then the second return value is true
+func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
+	_, currentLast := AddressRange(network)
+	mask := net.CIDRMask(prefixLen, 8*len(currentLast))
+	currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask}
+	_, last := AddressRange(currentSubnet)
+	last = Inc(last)
+	next := &net.IPNet{IP: last.Mask(mask), Mask: mask}
+	if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) {
+		return next, true
+	}
+	return next, false
+}
+
+//Inc increases the IP by one this returns a new []byte for the IP
+func Inc(IP net.IP) net.IP {
+	IP = checkIPv4(IP)
+	incIP := make([]byte, len(IP))
+	copy(incIP, IP)
+	for j := len(incIP) - 1; j >= 0; j-- {
+		incIP[j]++
+		if incIP[j] > 0 {
+			break
+		}
+	}
+	return incIP
+}
+
+//Dec decreases the IP by one this returns a new []byte for the IP
+func Dec(IP net.IP) net.IP {
+	IP = checkIPv4(IP)
+	decIP := make([]byte, len(IP))
+	copy(decIP, IP)
+	decIP = checkIPv4(decIP)
+	for j := len(decIP) - 1; j >= 0; j-- {
+		decIP[j]--
+		if decIP[j] < 255 {
+			break
+		}
+	}
+	return decIP
+}
+
+func checkIPv4(ip net.IP) net.IP {
+	// Go for some reason allocs IPv6len for IPv4 so we have to correct it
+	if v4 := ip.To4(); v4 != nil {
+		return v4
+	}
+	return ip
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,37 @@
+package cidr
+
+import (
+	"fmt"
+	"math/big"
+	"net"
+)
+
+func ipToInt(ip net.IP) (*big.Int, int) {
+	val := &big.Int{}
+	val.SetBytes([]byte(ip))
+	if len(ip) == net.IPv4len {
+		return val, 32
+	} else if len(ip) == net.IPv6len {
+		return val, 128
+	} else {
+		panic(fmt.Errorf("Unsupported address length %d", len(ip)))
+	}
+}
+
+func intToIP(ipInt *big.Int, bits int) net.IP {
+	ipBytes := ipInt.Bytes()
+	ret := make([]byte, bits/8)
+	// Pack our IP bytes into the end of the return array,
+	// since big.Int.Bytes() removes front zero padding.
+	for i := 1; i <= len(ipBytes); i++ {
+		ret[len(ret)-i] = ipBytes[len(ipBytes)-i]
+	}
+	return net.IP(ret)
+}
+
+func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP {
+	ipInt, totalBits := ipToInt(ip)
+	bigNum.Lsh(bigNum, uint(totalBits-prefixLen))
+	ipInt.Or(ipInt, bigNum)
+	return intToIP(ipInt, totalBits)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/LICENSE 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,95 @@
+Copyright (c) 2017 Martin Atkins
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+---------
+
+Unicode table generation programs are under a separate copyright and license:
+
+Copyright (c) 2014 Couchbase, Inc.
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+except in compliance with the License. You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the
+License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+either express or implied. See the License for the specific language governing permissions
+and limitations under the License.
+
+---------
+
+Grapheme break data is provided as part of the Unicode character database,
+copright 2016 Unicode, Inc, which is provided with the following license:
+
+Unicode Data Files include all data files under the directories
+http://www.unicode.org/Public/, http://www.unicode.org/reports/,
+http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
+http://www.unicode.org/utility/trac/browser/.
+
+Unicode Data Files do not include PDF online code charts under the
+directory http://www.unicode.org/Public/.
+
+Software includes any source code published in the Unicode Standard
+or under the directories
+http://www.unicode.org/Public/, http://www.unicode.org/reports/,
+http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
+http://www.unicode.org/utility/trac/browser/.
+
+NOTICE TO USER: Carefully read the following legal agreement.
+BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
+DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
+YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
+TERMS AND CONDITIONS OF THIS AGREEMENT.
+IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
+THE DATA FILES OR SOFTWARE.
+
+COPYRIGHT AND PERMISSION NOTICE
+
+Copyright © 1991-2017 Unicode, Inc. All rights reserved.
+Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Unicode data files and any associated documentation
+(the "Data Files") or Unicode software and any associated documentation
+(the "Software") to deal in the Data Files or Software
+without restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, and/or sell copies of
+the Data Files or Software, and to permit persons to whom the Data Files
+or Software are furnished to do so, provided that either
+(a) this copyright and permission notice appear with all copies
+of the Data Files or Software, or
+(b) this copyright and permission notice appear in associated
+Documentation.
+
+THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
+NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale,
+use or other dealings in these Data Files or Software without prior
+written authorization of the copyright holder.
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/all_tokens.go 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/all_tokens.go
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/all_tokens.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/all_tokens.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,30 @@
+package textseg
+
+import (
+	"bufio"
+	"bytes"
+)
+
+// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
+// all of the recognized tokens in the given buffer.
+func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(buf))
+	scanner.Split(splitFunc)
+	var ret [][]byte
+	for scanner.Scan() {
+		ret = append(ret, scanner.Bytes())
+	}
+	return ret, scanner.Err()
+}
+
+// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
+// recognized tokens in the given buffer.
+func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(buf))
+	scanner.Split(splitFunc)
+	var ret int
+	for scanner.Scan() {
+		ret++
+	}
+	return ret, scanner.Err()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/emoji_table.rl 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/emoji_table.rl
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/emoji_table.rl	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/emoji_table.rl	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,545 @@
+# The following Ragel file was autogenerated with unicode2ragel.rb 
+# from: https://www.unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt
+#
+# It defines ["Extended_Pictographic"].
+#
+# To use this, make sure that your alphtype is set to byte,
+# and that your input is in utf8.
+
+%%{
+    machine Emoji;
+    
+    Extended_Pictographic = 
+        0xC2 0xA9               #E0.6   [1] (©️)       copyright
+      | 0xC2 0xAE               #E0.6   [1] (®️)       registered
+      | 0xE2 0x80 0xBC          #E0.6   [1] (‼️)       double exclamation mark
+      | 0xE2 0x81 0x89          #E0.6   [1] (⁉️)       exclamation question ...
+      | 0xE2 0x84 0xA2          #E0.6   [1] (™️)       trade mark
+      | 0xE2 0x84 0xB9          #E0.6   [1] (ℹ️)       information
+      | 0xE2 0x86 0x94..0x99    #E0.6   [6] (↔️..↙️)    left-right arrow..do...
+      | 0xE2 0x86 0xA9..0xAA    #E0.6   [2] (↩️..↪️)    right arrow curving ...
+      | 0xE2 0x8C 0x9A..0x9B    #E0.6   [2] (⌚..⌛)    watch..hourglass done
+      | 0xE2 0x8C 0xA8          #E1.0   [1] (⌨️)       keyboard
+      | 0xE2 0x8E 0x88          #E0.0   [1] (⎈)       HELM SYMBOL
+      | 0xE2 0x8F 0x8F          #E1.0   [1] (⏏️)       eject button
+      | 0xE2 0x8F 0xA9..0xAC    #E0.6   [4] (⏩..⏬)    fast-forward button..f...
+      | 0xE2 0x8F 0xAD..0xAE    #E0.7   [2] (⏭️..⏮️)    next track button..l...
+      | 0xE2 0x8F 0xAF          #E1.0   [1] (⏯️)       play or pause button
+      | 0xE2 0x8F 0xB0          #E0.6   [1] (⏰)       alarm clock
+      | 0xE2 0x8F 0xB1..0xB2    #E1.0   [2] (⏱️..⏲️)    stopwatch..timer clock
+      | 0xE2 0x8F 0xB3          #E0.6   [1] (⏳)       hourglass not done
+      | 0xE2 0x8F 0xB8..0xBA    #E0.7   [3] (⏸️..⏺️)    pause button..record...
+      | 0xE2 0x93 0x82          #E0.6   [1] (Ⓜ️)       circled M
+      | 0xE2 0x96 0xAA..0xAB    #E0.6   [2] (▪️..▫️)    black small square.....
+      | 0xE2 0x96 0xB6          #E0.6   [1] (▶️)       play button
+      | 0xE2 0x97 0x80          #E0.6   [1] (◀️)       reverse button
+      | 0xE2 0x97 0xBB..0xBE    #E0.6   [4] (◻️..◾)    white medium square.....
+      | 0xE2 0x98 0x80..0x81    #E0.6   [2] (☀️..☁️)    sun..cloud
+      | 0xE2 0x98 0x82..0x83    #E0.7   [2] (☂️..☃️)    umbrella..snowman
+      | 0xE2 0x98 0x84          #E1.0   [1] (☄️)       comet
+      | 0xE2 0x98 0x85          #E0.0   [1] (★)       BLACK STAR
+      | 0xE2 0x98 0x87..0x8D    #E0.0   [7] (☇..☍)    LIGHTNING..OPPOSITION
+      | 0xE2 0x98 0x8E          #E0.6   [1] (☎️)       telephone
+      | 0xE2 0x98 0x8F..0x90    #E0.0   [2] (☏..☐)    WHITE TELEPHONE..BALLO...
+      | 0xE2 0x98 0x91          #E0.6   [1] (☑️)       check box with check
+      | 0xE2 0x98 0x92          #E0.0   [1] (☒)       BALLOT BOX WITH X
+      | 0xE2 0x98 0x94..0x95    #E0.6   [2] (☔..☕)    umbrella with rain dro...
+      | 0xE2 0x98 0x96..0x97    #E0.0   [2] (☖..☗)    WHITE SHOGI PIECE..BLA...
+      | 0xE2 0x98 0x98          #E1.0   [1] (☘️)       shamrock
+      | 0xE2 0x98 0x99..0x9C    #E0.0   [4] (☙..☜)    REVERSED ROTATED FLORA...
+      | 0xE2 0x98 0x9D          #E0.6   [1] (☝️)       index pointing up
+      | 0xE2 0x98 0x9E..0x9F    #E0.0   [2] (☞..☟)    WHITE RIGHT POINTING I...
+      | 0xE2 0x98 0xA0          #E1.0   [1] (☠️)       skull and crossbones
+      | 0xE2 0x98 0xA1          #E0.0   [1] (☡)       CAUTION SIGN
+      | 0xE2 0x98 0xA2..0xA3    #E1.0   [2] (☢️..☣️)    radioactive..biohazard
+      | 0xE2 0x98 0xA4..0xA5    #E0.0   [2] (☤..☥)    CADUCEUS..ANKH
+      | 0xE2 0x98 0xA6          #E1.0   [1] (☦️)       orthodox cross
+      | 0xE2 0x98 0xA7..0xA9    #E0.0   [3] (☧..☩)    CHI RHO..CROSS OF JERU...
+      | 0xE2 0x98 0xAA          #E0.7   [1] (☪️)       star and crescent
+      | 0xE2 0x98 0xAB..0xAD    #E0.0   [3] (☫..☭)    FARSI SYMBOL..HAMMER A...
+      | 0xE2 0x98 0xAE          #E1.0   [1] (☮️)       peace symbol
+      | 0xE2 0x98 0xAF          #E0.7   [1] (☯️)       yin yang
+      | 0xE2 0x98 0xB0..0xB7    #E0.0   [8] (☰..☷)    TRIGRAM FOR HEAVEN..TR...
+      | 0xE2 0x98 0xB8..0xB9    #E0.7   [2] (☸️..☹️)    wheel of dharma..fro...
+      | 0xE2 0x98 0xBA          #E0.6   [1] (☺️)       smiling face
+      | 0xE2 0x98 0xBB..0xBF    #E0.0   [5] (☻..☿)    BLACK SMILING FACE..ME...
+      | 0xE2 0x99 0x80          #E4.0   [1] (♀️)       female sign
+      | 0xE2 0x99 0x81          #E0.0   [1] (♁)       EARTH
+      | 0xE2 0x99 0x82          #E4.0   [1] (♂️)       male sign
+      | 0xE2 0x99 0x83..0x87    #E0.0   [5] (♃..♇)    JUPITER..PLUTO
+      | 0xE2 0x99 0x88..0x93    #E0.6  [12] (♈..♓)    Aries..Pisces
+      | 0xE2 0x99 0x94..0x9E    #E0.0  [11] (♔..♞)    WHITE CHESS KING..BLAC...
+      | 0xE2 0x99 0x9F          #E11.0  [1] (♟️)       chess pawn
+      | 0xE2 0x99 0xA0          #E0.6   [1] (♠️)       spade suit
+      | 0xE2 0x99 0xA1..0xA2    #E0.0   [2] (♡..♢)    WHITE HEART SUIT..WHIT...
+      | 0xE2 0x99 0xA3          #E0.6   [1] (♣️)       club suit
+      | 0xE2 0x99 0xA4          #E0.0   [1] (♤)       WHITE SPADE SUIT
+      | 0xE2 0x99 0xA5..0xA6    #E0.6   [2] (♥️..♦️)    heart suit..diamond ...
+      | 0xE2 0x99 0xA7          #E0.0   [1] (♧)       WHITE CLUB SUIT
+      | 0xE2 0x99 0xA8          #E0.6   [1] (♨️)       hot springs
+      | 0xE2 0x99 0xA9..0xBA    #E0.0  [18] (♩..♺)    QUARTER NOTE..RECYCLIN...
+      | 0xE2 0x99 0xBB          #E0.6   [1] (♻️)       recycling symbol
+      | 0xE2 0x99 0xBC..0xBD    #E0.0   [2] (♼..♽)    RECYCLED PAPER SYMBOL....
+      | 0xE2 0x99 0xBE          #E11.0  [1] (♾️)       infinity
+      | 0xE2 0x99 0xBF          #E0.6   [1] (♿)       wheelchair symbol
+      | 0xE2 0x9A 0x80..0x85    #E0.0   [6] (⚀..⚅)    DIE FACE-1..DIE FACE-6
+      | 0xE2 0x9A 0x90..0x91    #E0.0   [2] (⚐..⚑)    WHITE FLAG..BLACK FLAG
+      | 0xE2 0x9A 0x92          #E1.0   [1] (⚒️)       hammer and pick
+      | 0xE2 0x9A 0x93          #E0.6   [1] (⚓)       anchor
+      | 0xE2 0x9A 0x94          #E1.0   [1] (⚔️)       crossed swords
+      | 0xE2 0x9A 0x95          #E4.0   [1] (⚕️)       medical symbol
+      | 0xE2 0x9A 0x96..0x97    #E1.0   [2] (⚖️..⚗️)    balance scale..alembic
+      | 0xE2 0x9A 0x98          #E0.0   [1] (⚘)       FLOWER
+      | 0xE2 0x9A 0x99          #E1.0   [1] (⚙️)       gear
+      | 0xE2 0x9A 0x9A          #E0.0   [1] (⚚)       STAFF OF HERMES
+      | 0xE2 0x9A 0x9B..0x9C    #E1.0   [2] (⚛️..⚜️)    atom symbol..fleur-d...
+      | 0xE2 0x9A 0x9D..0x9F    #E0.0   [3] (⚝..⚟)    OUTLINED WHITE STAR..T...
+      | 0xE2 0x9A 0xA0..0xA1    #E0.6   [2] (⚠️..⚡)    warning..high voltage
+      | 0xE2 0x9A 0xA2..0xA6    #E0.0   [5] (⚢..⚦)    DOUBLED FEMALE SIGN..M...
+      | 0xE2 0x9A 0xA7          #E13.0  [1] (⚧️)       transgender symbol
+      | 0xE2 0x9A 0xA8..0xA9    #E0.0   [2] (⚨..⚩)    VERTICAL MALE WITH STR...
+      | 0xE2 0x9A 0xAA..0xAB    #E0.6   [2] (⚪..⚫)    white circle..black ci...
+      | 0xE2 0x9A 0xAC..0xAF    #E0.0   [4] (⚬..⚯)    MEDIUM SMALL WHITE CIR...
+      | 0xE2 0x9A 0xB0..0xB1    #E1.0   [2] (⚰️..⚱️)    coffin..funeral urn
+      | 0xE2 0x9A 0xB2..0xBC    #E0.0  [11] (⚲..⚼)    NEUTER..SESQUIQUADRATE
+      | 0xE2 0x9A 0xBD..0xBE    #E0.6   [2] (⚽..⚾)    soccer ball..baseball
+      | 0xE2 0x9A 0xBF..0xFF    #E0.0   [5] (⚿..⛃)    SQUARED KEY..BLACK DRA...
+      | 0xE2 0x9B 0x00..0x83    #
+      | 0xE2 0x9B 0x84..0x85    #E0.6   [2] (⛄..⛅)    snowman without snow.....
+      | 0xE2 0x9B 0x86..0x87    #E0.0   [2] (⛆..⛇)    RAIN..BLACK SNOWMAN
+      | 0xE2 0x9B 0x88          #E0.7   [1] (⛈️)       cloud with lightning ...
+      | 0xE2 0x9B 0x89..0x8D    #E0.0   [5] (⛉..⛍)    TURNED WHITE SHOGI PIE...
+      | 0xE2 0x9B 0x8E          #E0.6   [1] (⛎)       Ophiuchus
+      | 0xE2 0x9B 0x8F          #E0.7   [1] (⛏️)       pick
+      | 0xE2 0x9B 0x90          #E0.0   [1] (⛐)       CAR SLIDING
+      | 0xE2 0x9B 0x91          #E0.7   [1] (⛑️)       rescue worker’s helmet
+      | 0xE2 0x9B 0x92          #E0.0   [1] (⛒)       CIRCLED CROSSING LANES
+      | 0xE2 0x9B 0x93          #E0.7   [1] (⛓️)       chains
+      | 0xE2 0x9B 0x94          #E0.6   [1] (⛔)       no entry
+      | 0xE2 0x9B 0x95..0xA8    #E0.0  [20] (⛕..⛨)    ALTERNATE ONE-WAY LEFT...
+      | 0xE2 0x9B 0xA9          #E0.7   [1] (⛩️)       shinto shrine
+      | 0xE2 0x9B 0xAA          #E0.6   [1] (⛪)       church
+      | 0xE2 0x9B 0xAB..0xAF    #E0.0   [5] (⛫..⛯)    CASTLE..MAP SYMBOL FOR...
+      | 0xE2 0x9B 0xB0..0xB1    #E0.7   [2] (⛰️..⛱️)    mountain..umbrella o...
+      | 0xE2 0x9B 0xB2..0xB3    #E0.6   [2] (⛲..⛳)    fountain..flag in hole
+      | 0xE2 0x9B 0xB4          #E0.7   [1] (⛴️)       ferry
+      | 0xE2 0x9B 0xB5          #E0.6   [1] (⛵)       sailboat
+      | 0xE2 0x9B 0xB6          #E0.0   [1] (⛶)       SQUARE FOUR CORNERS
+      | 0xE2 0x9B 0xB7..0xB9    #E0.7   [3] (⛷️..⛹️)    skier..person bounci...
+      | 0xE2 0x9B 0xBA          #E0.6   [1] (⛺)       tent
+      | 0xE2 0x9B 0xBB..0xBC    #E0.0   [2] (⛻..⛼)    JAPANESE BANK SYMBOL.....
+      | 0xE2 0x9B 0xBD          #E0.6   [1] (⛽)       fuel pump
+      | 0xE2 0x9B 0xBE..0xFF    #E0.0   [4] (⛾..✁)    CUP ON BLACK SQUARE..U...
+      | 0xE2 0x9C 0x00..0x81    #
+      | 0xE2 0x9C 0x82          #E0.6   [1] (✂️)       scissors
+      | 0xE2 0x9C 0x83..0x84    #E0.0   [2] (✃..✄)    LOWER BLADE SCISSORS.....
+      | 0xE2 0x9C 0x85          #E0.6   [1] (✅)       check mark button
+      | 0xE2 0x9C 0x88..0x8C    #E0.6   [5] (✈️..✌️)    airplane..victory hand
+      | 0xE2 0x9C 0x8D          #E0.7   [1] (✍️)       writing hand
+      | 0xE2 0x9C 0x8E          #E0.0   [1] (✎)       LOWER RIGHT PENCIL
+      | 0xE2 0x9C 0x8F          #E0.6   [1] (✏️)       pencil
+      | 0xE2 0x9C 0x90..0x91    #E0.0   [2] (✐..✑)    UPPER RIGHT PENCIL..WH...
+      | 0xE2 0x9C 0x92          #E0.6   [1] (✒️)       black nib
+      | 0xE2 0x9C 0x94          #E0.6   [1] (✔️)       check mark
+      | 0xE2 0x9C 0x96          #E0.6   [1] (✖️)       multiply
+      | 0xE2 0x9C 0x9D          #E0.7   [1] (✝️)       latin cross
+      | 0xE2 0x9C 0xA1          #E0.7   [1] (✡️)       star of David
+      | 0xE2 0x9C 0xA8          #E0.6   [1] (✨)       sparkles
+      | 0xE2 0x9C 0xB3..0xB4    #E0.6   [2] (✳️..✴️)    eight-spoked asteris...
+      | 0xE2 0x9D 0x84          #E0.6   [1] (❄️)       snowflake
+      | 0xE2 0x9D 0x87          #E0.6   [1] (❇️)       sparkle
+      | 0xE2 0x9D 0x8C          #E0.6   [1] (❌)       cross mark
+      | 0xE2 0x9D 0x8E          #E0.6   [1] (❎)       cross mark button
+      | 0xE2 0x9D 0x93..0x95    #E0.6   [3] (❓..❕)    red question mark..whi...
+      | 0xE2 0x9D 0x97          #E0.6   [1] (❗)       red exclamation mark
+      | 0xE2 0x9D 0xA3          #E1.0   [1] (❣️)       heart exclamation
+      | 0xE2 0x9D 0xA4          #E0.6   [1] (❤️)       red heart
+      | 0xE2 0x9D 0xA5..0xA7    #E0.0   [3] (❥..❧)    ROTATED HEAVY BLACK HE...
+      | 0xE2 0x9E 0x95..0x97    #E0.6   [3] (➕..➗)    plus..divide
+      | 0xE2 0x9E 0xA1          #E0.6   [1] (➡️)       right arrow
+      | 0xE2 0x9E 0xB0          #E0.6   [1] (➰)       curly loop
+      | 0xE2 0x9E 0xBF          #E1.0   [1] (➿)       double curly loop
+      | 0xE2 0xA4 0xB4..0xB5    #E0.6   [2] (⤴️..⤵️)    right arrow curving ...
+      | 0xE2 0xAC 0x85..0x87    #E0.6   [3] (⬅️..⬇️)    left arrow..down arrow
+      | 0xE2 0xAC 0x9B..0x9C    #E0.6   [2] (⬛..⬜)    black large square..wh...
+      | 0xE2 0xAD 0x90          #E0.6   [1] (⭐)       star
+      | 0xE2 0xAD 0x95          #E0.6   [1] (⭕)       hollow red circle
+      | 0xE3 0x80 0xB0          #E0.6   [1] (〰️)       wavy dash
+      | 0xE3 0x80 0xBD          #E0.6   [1] (〽️)       part alternation mark
+      | 0xE3 0x8A 0x97          #E0.6   [1] (㊗️)       Japanese “congratulat...
+      | 0xE3 0x8A 0x99          #E0.6   [1] (㊙️)       Japanese “secret” button
+      | 0xF0 0x9F 0x80 0x80..0x83  #E0.0   [4] (🀀..🀃)    MAHJONG TILE EAST W...
+      | 0xF0 0x9F 0x80 0x84     #E0.6   [1] (🀄)       mahjong red dragon
+      | 0xF0 0x9F 0x80 0x85..0xFF        #E0.0 [202] (🀅..🃎)    MAHJONG TILE ...
+      | 0xF0 0x9F 0x81..0x82 0x00..0xFF  #
+      | 0xF0 0x9F 0x83 0x00..0x8E        #
+      | 0xF0 0x9F 0x83 0x8F     #E0.6   [1] (🃏)       joker
+      | 0xF0 0x9F 0x83 0x90..0xBF  #E0.0  [48] (🃐..🃿)    <reserved-1F0D0>..<...
+      | 0xF0 0x9F 0x84 0x8D..0x8F  #E0.0   [3] (🄍..🄏)    CIRCLED ZERO WITH S...
+      | 0xF0 0x9F 0x84 0xAF     #E0.0   [1] (🄯)       COPYLEFT SYMBOL
+      | 0xF0 0x9F 0x85 0xAC..0xAF  #E0.0   [4] (🅬..🅯)    RAISED MR SIGN..CIR...
+      | 0xF0 0x9F 0x85 0xB0..0xB1  #E0.6   [2] (🅰️..🅱️)    A button (blood t...
+      | 0xF0 0x9F 0x85 0xBE..0xBF  #E0.6   [2] (🅾️..🅿️)    O button (blood t...
+      | 0xF0 0x9F 0x86 0x8E     #E0.6   [1] (🆎)       AB button (blood type)
+      | 0xF0 0x9F 0x86 0x91..0x9A  #E0.6  [10] (🆑..🆚)    CL button..VS button
+      | 0xF0 0x9F 0x86 0xAD..0xFF  #E0.0  [57] (🆭..🇥)    MASK WORK SYMBOL..<...
+      | 0xF0 0x9F 0x87 0x00..0xA5  #
+      | 0xF0 0x9F 0x88 0x81..0x82  #E0.6   [2] (🈁..🈂️)    Japanese “here” bu...
+      | 0xF0 0x9F 0x88 0x83..0x8F  #E0.0  [13] (🈃..🈏)    <reserved-1F203>..<...
+      | 0xF0 0x9F 0x88 0x9A     #E0.6   [1] (🈚)       Japanese “free of char...
+      | 0xF0 0x9F 0x88 0xAF     #E0.6   [1] (🈯)       Japanese “reserved” bu...
+      | 0xF0 0x9F 0x88 0xB2..0xBA  #E0.6   [9] (🈲..🈺)    Japanese “prohibite...
+      | 0xF0 0x9F 0x88 0xBC..0xBF  #E0.0   [4] (🈼..🈿)    <reserved-1F23C>..<...
+      | 0xF0 0x9F 0x89 0x89..0x8F  #E0.0   [7] (🉉..🉏)    <reserved-1F249>..<...
+      | 0xF0 0x9F 0x89 0x90..0x91  #E0.6   [2] (🉐..🉑)    Japanese “bargain” ...
+      | 0xF0 0x9F 0x89 0x92..0xFF        #E0.0 [174] (🉒..🋿)    <reserved-1F2...
+      | 0xF0 0x9F 0x8A..0x8A 0x00..0xFF  #
+      | 0xF0 0x9F 0x8B 0x00..0xBF        #
+      | 0xF0 0x9F 0x8C 0x80..0x8C  #E0.6  [13] (🌀..🌌)    cyclone..milky way
+      | 0xF0 0x9F 0x8C 0x8D..0x8E  #E0.7   [2] (🌍..🌎)    globe showing Europ...
+      | 0xF0 0x9F 0x8C 0x8F     #E0.6   [1] (🌏)       globe showing Asia-Aus...
+      | 0xF0 0x9F 0x8C 0x90     #E1.0   [1] (🌐)       globe with meridians
+      | 0xF0 0x9F 0x8C 0x91     #E0.6   [1] (🌑)       new moon
+      | 0xF0 0x9F 0x8C 0x92     #E1.0   [1] (🌒)       waxing crescent moon
+      | 0xF0 0x9F 0x8C 0x93..0x95  #E0.6   [3] (🌓..🌕)    first quarter moon....
+      | 0xF0 0x9F 0x8C 0x96..0x98  #E1.0   [3] (🌖..🌘)    waning gibbous moon...
+      | 0xF0 0x9F 0x8C 0x99     #E0.6   [1] (🌙)       crescent moon
+      | 0xF0 0x9F 0x8C 0x9A     #E1.0   [1] (🌚)       new moon face
+      | 0xF0 0x9F 0x8C 0x9B     #E0.6   [1] (🌛)       first quarter moon face
+      | 0xF0 0x9F 0x8C 0x9C     #E0.7   [1] (🌜)       last quarter moon face
+      | 0xF0 0x9F 0x8C 0x9D..0x9E  #E1.0   [2] (🌝..🌞)    full moon face..sun...
+      | 0xF0 0x9F 0x8C 0x9F..0xA0  #E0.6   [2] (🌟..🌠)    glowing star..shoot...
+      | 0xF0 0x9F 0x8C 0xA1     #E0.7   [1] (🌡️)       thermometer
+      | 0xF0 0x9F 0x8C 0xA2..0xA3  #E0.0   [2] (🌢..🌣)    BLACK DROPLET..WHIT...
+      | 0xF0 0x9F 0x8C 0xA4..0xAC  #E0.7   [9] (🌤️..🌬️)    sun behind small ...
+      | 0xF0 0x9F 0x8C 0xAD..0xAF  #E1.0   [3] (🌭..🌯)    hot dog..burrito
+      | 0xF0 0x9F 0x8C 0xB0..0xB1  #E0.6   [2] (🌰..🌱)    chestnut..seedling
+      | 0xF0 0x9F 0x8C 0xB2..0xB3  #E1.0   [2] (🌲..🌳)    evergreen tree..dec...
+      | 0xF0 0x9F 0x8C 0xB4..0xB5  #E0.6   [2] (🌴..🌵)    palm tree..cactus
+      | 0xF0 0x9F 0x8C 0xB6     #E0.7   [1] (🌶️)       hot pepper
+      | 0xF0 0x9F 0x8C 0xB7..0xFF  #E0.6  [20] (🌷..🍊)    tulip..tangerine
+      | 0xF0 0x9F 0x8D 0x00..0x8A  #
+      | 0xF0 0x9F 0x8D 0x8B     #E1.0   [1] (🍋)       lemon
+      | 0xF0 0x9F 0x8D 0x8C..0x8F  #E0.6   [4] (🍌..🍏)    banana..green apple
+      | 0xF0 0x9F 0x8D 0x90     #E1.0   [1] (🍐)       pear
+      | 0xF0 0x9F 0x8D 0x91..0xBB  #E0.6  [43] (🍑..🍻)    peach..clinking bee...
+      | 0xF0 0x9F 0x8D 0xBC     #E1.0   [1] (🍼)       baby bottle
+      | 0xF0 0x9F 0x8D 0xBD     #E0.7   [1] (🍽️)       fork and knife with p...
+      | 0xF0 0x9F 0x8D 0xBE..0xBF  #E1.0   [2] (🍾..🍿)    bottle with popping...
+      | 0xF0 0x9F 0x8E 0x80..0x93  #E0.6  [20] (🎀..🎓)    ribbon..graduation cap
+      | 0xF0 0x9F 0x8E 0x94..0x95  #E0.0   [2] (🎔..🎕)    HEART WITH TIP ON T...
+      | 0xF0 0x9F 0x8E 0x96..0x97  #E0.7   [2] (🎖️..🎗️)    military medal..r...
+      | 0xF0 0x9F 0x8E 0x98     #E0.0   [1] (🎘)       MUSICAL KEYBOARD WITH ...
+      | 0xF0 0x9F 0x8E 0x99..0x9B  #E0.7   [3] (🎙️..🎛️)    studio microphone...
+      | 0xF0 0x9F 0x8E 0x9C..0x9D  #E0.0   [2] (🎜..🎝)    BEAMED ASCENDING MU...
+      | 0xF0 0x9F 0x8E 0x9E..0x9F  #E0.7   [2] (🎞️..🎟️)    film frames..admi...
+      | 0xF0 0x9F 0x8E 0xA0..0xFF  #E0.6  [37] (🎠..🏄)    carousel horse..per...
+      | 0xF0 0x9F 0x8F 0x00..0x84  #
+      | 0xF0 0x9F 0x8F 0x85     #E1.0   [1] (🏅)       sports medal
+      | 0xF0 0x9F 0x8F 0x86     #E0.6   [1] (🏆)       trophy
+      | 0xF0 0x9F 0x8F 0x87     #E1.0   [1] (🏇)       horse racing
+      | 0xF0 0x9F 0x8F 0x88     #E0.6   [1] (🏈)       american football
+      | 0xF0 0x9F 0x8F 0x89     #E1.0   [1] (🏉)       rugby football
+      | 0xF0 0x9F 0x8F 0x8A     #E0.6   [1] (🏊)       person swimming
+      | 0xF0 0x9F 0x8F 0x8B..0x8E  #E0.7   [4] (🏋️..🏎️)    person lifting we...
+      | 0xF0 0x9F 0x8F 0x8F..0x93  #E1.0   [5] (🏏..🏓)    cricket game..ping ...
+      | 0xF0 0x9F 0x8F 0x94..0x9F  #E0.7  [12] (🏔️..🏟️)    snow-capped mount...
+      | 0xF0 0x9F 0x8F 0xA0..0xA3  #E0.6   [4] (🏠..🏣)    house..Japanese pos...
+      | 0xF0 0x9F 0x8F 0xA4     #E1.0   [1] (🏤)       post office
+      | 0xF0 0x9F 0x8F 0xA5..0xB0  #E0.6  [12] (🏥..🏰)    hospital..castle
+      | 0xF0 0x9F 0x8F 0xB1..0xB2  #E0.0   [2] (🏱..🏲)    WHITE PENNANT..BLAC...
+      | 0xF0 0x9F 0x8F 0xB3     #E0.7   [1] (🏳️)       white flag
+      | 0xF0 0x9F 0x8F 0xB4     #E1.0   [1] (🏴)       black flag
+      | 0xF0 0x9F 0x8F 0xB5     #E0.7   [1] (🏵️)       rosette
+      | 0xF0 0x9F 0x8F 0xB6     #E0.0   [1] (🏶)       BLACK ROSETTE
+      | 0xF0 0x9F 0x8F 0xB7     #E0.7   [1] (🏷️)       label
+      | 0xF0 0x9F 0x8F 0xB8..0xBA  #E1.0   [3] (🏸..🏺)    badminton..amphora
+      | 0xF0 0x9F 0x90 0x80..0x87  #E1.0   [8] (🐀..🐇)    rat..rabbit
+      | 0xF0 0x9F 0x90 0x88     #E0.7   [1] (🐈)       cat
+      | 0xF0 0x9F 0x90 0x89..0x8B  #E1.0   [3] (🐉..🐋)    dragon..whale
+      | 0xF0 0x9F 0x90 0x8C..0x8E  #E0.6   [3] (🐌..🐎)    snail..horse
+      | 0xF0 0x9F 0x90 0x8F..0x90  #E1.0   [2] (🐏..🐐)    ram..goat
+      | 0xF0 0x9F 0x90 0x91..0x92  #E0.6   [2] (🐑..🐒)    ewe..monkey
+      | 0xF0 0x9F 0x90 0x93     #E1.0   [1] (🐓)       rooster
+      | 0xF0 0x9F 0x90 0x94     #E0.6   [1] (🐔)       chicken
+      | 0xF0 0x9F 0x90 0x95     #E0.7   [1] (🐕)       dog
+      | 0xF0 0x9F 0x90 0x96     #E1.0   [1] (🐖)       pig
+      | 0xF0 0x9F 0x90 0x97..0xA9  #E0.6  [19] (🐗..🐩)    boar..poodle
+      | 0xF0 0x9F 0x90 0xAA     #E1.0   [1] (🐪)       camel
+      | 0xF0 0x9F 0x90 0xAB..0xBE  #E0.6  [20] (🐫..🐾)    two-hump camel..paw...
+      | 0xF0 0x9F 0x90 0xBF     #E0.7   [1] (🐿️)       chipmunk
+      | 0xF0 0x9F 0x91 0x80     #E0.6   [1] (👀)       eyes
+      | 0xF0 0x9F 0x91 0x81     #E0.7   [1] (👁️)       eye
+      | 0xF0 0x9F 0x91 0x82..0xA4  #E0.6  [35] (👂..👤)    ear..bust in silhou...
+      | 0xF0 0x9F 0x91 0xA5     #E1.0   [1] (👥)       busts in silhouette
+      | 0xF0 0x9F 0x91 0xA6..0xAB  #E0.6   [6] (👦..👫)    boy..woman and man ...
+      | 0xF0 0x9F 0x91 0xAC..0xAD  #E1.0   [2] (👬..👭)    men holding hands.....
+      | 0xF0 0x9F 0x91 0xAE..0xFF  #E0.6  [63] (👮..💬)    police officer..spe...
+      | 0xF0 0x9F 0x92 0x00..0xAC  #
+      | 0xF0 0x9F 0x92 0xAD     #E1.0   [1] (💭)       thought balloon
+      | 0xF0 0x9F 0x92 0xAE..0xB5  #E0.6   [8] (💮..💵)    white flower..dolla...
+      | 0xF0 0x9F 0x92 0xB6..0xB7  #E1.0   [2] (💶..💷)    euro banknote..poun...
+      | 0xF0 0x9F 0x92 0xB8..0xFF  #E0.6  [52] (💸..📫)    money with wings..c...
+      | 0xF0 0x9F 0x93 0x00..0xAB  #
+      | 0xF0 0x9F 0x93 0xAC..0xAD  #E0.7   [2] (📬..📭)    open mailbox with r...
+      | 0xF0 0x9F 0x93 0xAE     #E0.6   [1] (📮)       postbox
+      | 0xF0 0x9F 0x93 0xAF     #E1.0   [1] (📯)       postal horn
+      | 0xF0 0x9F 0x93 0xB0..0xB4  #E0.6   [5] (📰..📴)    newspaper..mobile p...
+      | 0xF0 0x9F 0x93 0xB5     #E1.0   [1] (📵)       no mobile phones
+      | 0xF0 0x9F 0x93 0xB6..0xB7  #E0.6   [2] (📶..📷)    antenna bars..camera
+      | 0xF0 0x9F 0x93 0xB8     #E1.0   [1] (📸)       camera with flash
+      | 0xF0 0x9F 0x93 0xB9..0xBC  #E0.6   [4] (📹..📼)    video camera..video...
+      | 0xF0 0x9F 0x93 0xBD     #E0.7   [1] (📽️)       film projector
+      | 0xF0 0x9F 0x93 0xBE     #E0.0   [1] (📾)       PORTABLE STEREO
+      | 0xF0 0x9F 0x93 0xBF..0xFF  #E1.0   [4] (📿..🔂)    prayer beads..repea...
+      | 0xF0 0x9F 0x94 0x00..0x82  #
+      | 0xF0 0x9F 0x94 0x83     #E0.6   [1] (🔃)       clockwise vertical arrows
+      | 0xF0 0x9F 0x94 0x84..0x87  #E1.0   [4] (🔄..🔇)    counterclockwise ar...
+      | 0xF0 0x9F 0x94 0x88     #E0.7   [1] (🔈)       speaker low volume
+      | 0xF0 0x9F 0x94 0x89     #E1.0   [1] (🔉)       speaker medium volume
+      | 0xF0 0x9F 0x94 0x8A..0x94  #E0.6  [11] (🔊..🔔)    speaker high volume...
+      | 0xF0 0x9F 0x94 0x95     #E1.0   [1] (🔕)       bell with slash
+      | 0xF0 0x9F 0x94 0x96..0xAB  #E0.6  [22] (🔖..🔫)    bookmark..water pistol
+      | 0xF0 0x9F 0x94 0xAC..0xAD  #E1.0   [2] (🔬..🔭)    microscope..telescope
+      | 0xF0 0x9F 0x94 0xAE..0xBD  #E0.6  [16] (🔮..🔽)    crystal ball..downw...
+      | 0xF0 0x9F 0x95 0x86..0x88  #E0.0   [3] (🕆..🕈)    WHITE LATIN CROSS.....
+      | 0xF0 0x9F 0x95 0x89..0x8A  #E0.7   [2] (🕉️..🕊️)    om..dove
+      | 0xF0 0x9F 0x95 0x8B..0x8E  #E1.0   [4] (🕋..🕎)    kaaba..menorah
+      | 0xF0 0x9F 0x95 0x8F     #E0.0   [1] (🕏)       BOWL OF HYGIEIA
+      | 0xF0 0x9F 0x95 0x90..0x9B  #E0.6  [12] (🕐..🕛)    one o’clock..twelve...
+      | 0xF0 0x9F 0x95 0x9C..0xA7  #E0.7  [12] (🕜..🕧)    one-thirty..twelve-...
+      | 0xF0 0x9F 0x95 0xA8..0xAE  #E0.0   [7] (🕨..🕮)    RIGHT SPEAKER..BOOK
+      | 0xF0 0x9F 0x95 0xAF..0xB0  #E0.7   [2] (🕯️..🕰️)    candle..mantelpie...
+      | 0xF0 0x9F 0x95 0xB1..0xB2  #E0.0   [2] (🕱..🕲)    BLACK SKULL AND CRO...
+      | 0xF0 0x9F 0x95 0xB3..0xB9  #E0.7   [7] (🕳️..🕹️)    hole..joystick
+      | 0xF0 0x9F 0x95 0xBA     #E3.0   [1] (🕺)       man dancing
+      | 0xF0 0x9F 0x95 0xBB..0xFF  #E0.0  [12] (🕻..🖆)    LEFT HAND TELEPHONE...
+      | 0xF0 0x9F 0x96 0x00..0x86  #
+      | 0xF0 0x9F 0x96 0x87     #E0.7   [1] (🖇️)       linked paperclips
+      | 0xF0 0x9F 0x96 0x88..0x89  #E0.0   [2] (🖈..🖉)    BLACK PUSHPIN..LOWE...
+      | 0xF0 0x9F 0x96 0x8A..0x8D  #E0.7   [4] (🖊️..🖍️)    pen..crayon
+      | 0xF0 0x9F 0x96 0x8E..0x8F  #E0.0   [2] (🖎..🖏)    LEFT WRITING HAND.....
+      | 0xF0 0x9F 0x96 0x90     #E0.7   [1] (🖐️)       hand with fingers spl...
+      | 0xF0 0x9F 0x96 0x91..0x94  #E0.0   [4] (🖑..🖔)    REVERSED RAISED HAN...
+      | 0xF0 0x9F 0x96 0x95..0x96  #E1.0   [2] (🖕..🖖)    middle finger..vulc...
+      | 0xF0 0x9F 0x96 0x97..0xA3  #E0.0  [13] (🖗..🖣)    WHITE DOWN POINTING...
+      | 0xF0 0x9F 0x96 0xA4     #E3.0   [1] (🖤)       black heart
+      | 0xF0 0x9F 0x96 0xA5     #E0.7   [1] (🖥️)       desktop computer
+      | 0xF0 0x9F 0x96 0xA6..0xA7  #E0.0   [2] (🖦..🖧)    KEYBOARD AND MOUSE....
+      | 0xF0 0x9F 0x96 0xA8     #E0.7   [1] (🖨️)       printer
+      | 0xF0 0x9F 0x96 0xA9..0xB0  #E0.0   [8] (🖩..🖰)    POCKET CALCULATOR.....
+      | 0xF0 0x9F 0x96 0xB1..0xB2  #E0.7   [2] (🖱️..🖲️)    computer mouse..t...
+      | 0xF0 0x9F 0x96 0xB3..0xBB  #E0.0   [9] (🖳..🖻)    OLD PERSONAL COMPUT...
+      | 0xF0 0x9F 0x96 0xBC     #E0.7   [1] (🖼️)       framed picture
+      | 0xF0 0x9F 0x96 0xBD..0xFF  #E0.0   [5] (🖽..🗁)    FRAME WITH TILES..O...
+      | 0xF0 0x9F 0x97 0x00..0x81  #
+      | 0xF0 0x9F 0x97 0x82..0x84  #E0.7   [3] (🗂️..🗄️)    card index divide...
+      | 0xF0 0x9F 0x97 0x85..0x90  #E0.0  [12] (🗅..🗐)    EMPTY NOTE..PAGES
+      | 0xF0 0x9F 0x97 0x91..0x93  #E0.7   [3] (🗑️..🗓️)    wastebasket..spir...
+      | 0xF0 0x9F 0x97 0x94..0x9B  #E0.0   [8] (🗔..🗛)    DESKTOP WINDOW..DEC...
+      | 0xF0 0x9F 0x97 0x9C..0x9E  #E0.7   [3] (🗜️..🗞️)    clamp..rolled-up ...
+      | 0xF0 0x9F 0x97 0x9F..0xA0  #E0.0   [2] (🗟..🗠)    PAGE WITH CIRCLED T...
+      | 0xF0 0x9F 0x97 0xA1     #E0.7   [1] (🗡️)       dagger
+      | 0xF0 0x9F 0x97 0xA2     #E0.0   [1] (🗢)       LIPS
+      | 0xF0 0x9F 0x97 0xA3     #E0.7   [1] (🗣️)       speaking head
+      | 0xF0 0x9F 0x97 0xA4..0xA7  #E0.0   [4] (🗤..🗧)    THREE RAYS ABOVE..T...
+      | 0xF0 0x9F 0x97 0xA8     #E2.0   [1] (🗨️)       left speech bubble
+      | 0xF0 0x9F 0x97 0xA9..0xAE  #E0.0   [6] (🗩..🗮)    RIGHT SPEECH BUBBLE...
+      | 0xF0 0x9F 0x97 0xAF     #E0.7   [1] (🗯️)       right anger bubble
+      | 0xF0 0x9F 0x97 0xB0..0xB2  #E0.0   [3] (🗰..🗲)    MOOD BUBBLE..LIGHTN...
+      | 0xF0 0x9F 0x97 0xB3     #E0.7   [1] (🗳️)       ballot box with ballot
+      | 0xF0 0x9F 0x97 0xB4..0xB9  #E0.0   [6] (🗴..🗹)    BALLOT SCRIPT X..BA...
+      | 0xF0 0x9F 0x97 0xBA     #E0.7   [1] (🗺️)       world map
+      | 0xF0 0x9F 0x97 0xBB..0xBF  #E0.6   [5] (🗻..🗿)    mount fuji..moai
+      | 0xF0 0x9F 0x98 0x80     #E1.0   [1] (😀)       grinning face
+      | 0xF0 0x9F 0x98 0x81..0x86  #E0.6   [6] (😁..😆)    beaming face with s...
+      | 0xF0 0x9F 0x98 0x87..0x88  #E1.0   [2] (😇..😈)    smiling face with h...
+      | 0xF0 0x9F 0x98 0x89..0x8D  #E0.6   [5] (😉..😍)    winking face..smili...
+      | 0xF0 0x9F 0x98 0x8E     #E1.0   [1] (😎)       smiling face with sung...
+      | 0xF0 0x9F 0x98 0x8F     #E0.6   [1] (😏)       smirking face
+      | 0xF0 0x9F 0x98 0x90     #E0.7   [1] (😐)       neutral face
+      | 0xF0 0x9F 0x98 0x91     #E1.0   [1] (😑)       expressionless face
+      | 0xF0 0x9F 0x98 0x92..0x94  #E0.6   [3] (😒..😔)    unamused face..pens...
+      | 0xF0 0x9F 0x98 0x95     #E1.0   [1] (😕)       confused face
+      | 0xF0 0x9F 0x98 0x96     #E0.6   [1] (😖)       confounded face
+      | 0xF0 0x9F 0x98 0x97     #E1.0   [1] (😗)       kissing face
+      | 0xF0 0x9F 0x98 0x98     #E0.6   [1] (😘)       face blowing a kiss
+      | 0xF0 0x9F 0x98 0x99     #E1.0   [1] (😙)       kissing face with smil...
+      | 0xF0 0x9F 0x98 0x9A     #E0.6   [1] (😚)       kissing face with clos...
+      | 0xF0 0x9F 0x98 0x9B     #E1.0   [1] (😛)       face with tongue
+      | 0xF0 0x9F 0x98 0x9C..0x9E  #E0.6   [3] (😜..😞)    winking face with t...
+      | 0xF0 0x9F 0x98 0x9F     #E1.0   [1] (😟)       worried face
+      | 0xF0 0x9F 0x98 0xA0..0xA5  #E0.6   [6] (😠..😥)    angry face..sad but...
+      | 0xF0 0x9F 0x98 0xA6..0xA7  #E1.0   [2] (😦..😧)    frowning face with ...
+      | 0xF0 0x9F 0x98 0xA8..0xAB  #E0.6   [4] (😨..😫)    fearful face..tired...
+      | 0xF0 0x9F 0x98 0xAC     #E1.0   [1] (😬)       grimacing face
+      | 0xF0 0x9F 0x98 0xAD     #E0.6   [1] (😭)       loudly crying face
+      | 0xF0 0x9F 0x98 0xAE..0xAF  #E1.0   [2] (😮..😯)    face with open mout...
+      | 0xF0 0x9F 0x98 0xB0..0xB3  #E0.6   [4] (😰..😳)    anxious face with s...
+      | 0xF0 0x9F 0x98 0xB4     #E1.0   [1] (😴)       sleeping face
+      | 0xF0 0x9F 0x98 0xB5     #E0.6   [1] (😵)       face with crossed-out ...
+      | 0xF0 0x9F 0x98 0xB6     #E1.0   [1] (😶)       face without mouth
+      | 0xF0 0x9F 0x98 0xB7..0xFF  #E0.6  [10] (😷..🙀)    face with medical m...
+      | 0xF0 0x9F 0x99 0x00..0x80  #
+      | 0xF0 0x9F 0x99 0x81..0x84  #E1.0   [4] (🙁..🙄)    slightly frowning f...
+      | 0xF0 0x9F 0x99 0x85..0x8F  #E0.6  [11] (🙅..🙏)    person gesturing NO...
+      | 0xF0 0x9F 0x9A 0x80     #E0.6   [1] (🚀)       rocket
+      | 0xF0 0x9F 0x9A 0x81..0x82  #E1.0   [2] (🚁..🚂)    helicopter..locomotive
+      | 0xF0 0x9F 0x9A 0x83..0x85  #E0.6   [3] (🚃..🚅)    railway car..bullet...
+      | 0xF0 0x9F 0x9A 0x86     #E1.0   [1] (🚆)       train
+      | 0xF0 0x9F 0x9A 0x87     #E0.6   [1] (🚇)       metro
+      | 0xF0 0x9F 0x9A 0x88     #E1.0   [1] (🚈)       light rail
+      | 0xF0 0x9F 0x9A 0x89     #E0.6   [1] (🚉)       station
+      | 0xF0 0x9F 0x9A 0x8A..0x8B  #E1.0   [2] (🚊..🚋)    tram..tram car
+      | 0xF0 0x9F 0x9A 0x8C     #E0.6   [1] (🚌)       bus
+      | 0xF0 0x9F 0x9A 0x8D     #E0.7   [1] (🚍)       oncoming bus
+      | 0xF0 0x9F 0x9A 0x8E     #E1.0   [1] (🚎)       trolleybus
+      | 0xF0 0x9F 0x9A 0x8F     #E0.6   [1] (🚏)       bus stop
+      | 0xF0 0x9F 0x9A 0x90     #E1.0   [1] (🚐)       minibus
+      | 0xF0 0x9F 0x9A 0x91..0x93  #E0.6   [3] (🚑..🚓)    ambulance..police car
+      | 0xF0 0x9F 0x9A 0x94     #E0.7   [1] (🚔)       oncoming police car
+      | 0xF0 0x9F 0x9A 0x95     #E0.6   [1] (🚕)       taxi
+      | 0xF0 0x9F 0x9A 0x96     #E1.0   [1] (🚖)       oncoming taxi
+      | 0xF0 0x9F 0x9A 0x97     #E0.6   [1] (🚗)       automobile
+      | 0xF0 0x9F 0x9A 0x98     #E0.7   [1] (🚘)       oncoming automobile
+      | 0xF0 0x9F 0x9A 0x99..0x9A  #E0.6   [2] (🚙..🚚)    sport utility vehic...
+      | 0xF0 0x9F 0x9A 0x9B..0xA1  #E1.0   [7] (🚛..🚡)    articulated lorry.....
+      | 0xF0 0x9F 0x9A 0xA2     #E0.6   [1] (🚢)       ship
+      | 0xF0 0x9F 0x9A 0xA3     #E1.0   [1] (🚣)       person rowing boat
+      | 0xF0 0x9F 0x9A 0xA4..0xA5  #E0.6   [2] (🚤..🚥)    speedboat..horizont...
+      | 0xF0 0x9F 0x9A 0xA6     #E1.0   [1] (🚦)       vertical traffic light
+      | 0xF0 0x9F 0x9A 0xA7..0xAD  #E0.6   [7] (🚧..🚭)    construction..no sm...
+      | 0xF0 0x9F 0x9A 0xAE..0xB1  #E1.0   [4] (🚮..🚱)    litter in bin sign....
+      | 0xF0 0x9F 0x9A 0xB2     #E0.6   [1] (🚲)       bicycle
+      | 0xF0 0x9F 0x9A 0xB3..0xB5  #E1.0   [3] (🚳..🚵)    no bicycles..person...
+      | 0xF0 0x9F 0x9A 0xB6     #E0.6   [1] (🚶)       person walking
+      | 0xF0 0x9F 0x9A 0xB7..0xB8  #E1.0   [2] (🚷..🚸)    no pedestrians..chi...
+      | 0xF0 0x9F 0x9A 0xB9..0xBE  #E0.6   [6] (🚹..🚾)    men’s room..water c...
+      | 0xF0 0x9F 0x9A 0xBF     #E1.0   [1] (🚿)       shower
+      | 0xF0 0x9F 0x9B 0x80     #E0.6   [1] (🛀)       person taking bath
+      | 0xF0 0x9F 0x9B 0x81..0x85  #E1.0   [5] (🛁..🛅)    bathtub..left luggage
+      | 0xF0 0x9F 0x9B 0x86..0x8A  #E0.0   [5] (🛆..🛊)    TRIANGLE WITH ROUND...
+      | 0xF0 0x9F 0x9B 0x8B     #E0.7   [1] (🛋️)       couch and lamp
+      | 0xF0 0x9F 0x9B 0x8C     #E1.0   [1] (🛌)       person in bed
+      | 0xF0 0x9F 0x9B 0x8D..0x8F  #E0.7   [3] (🛍️..🛏️)    shopping bags..bed
+      | 0xF0 0x9F 0x9B 0x90     #E1.0   [1] (🛐)       place of worship
+      | 0xF0 0x9F 0x9B 0x91..0x92  #E3.0   [2] (🛑..🛒)    stop sign..shopping...
+      | 0xF0 0x9F 0x9B 0x93..0x94  #E0.0   [2] (🛓..🛔)    STUPA..PAGODA
+      | 0xF0 0x9F 0x9B 0x95     #E12.0  [1] (🛕)       hindu temple
+      | 0xF0 0x9F 0x9B 0x96..0x97  #E13.0  [2] (🛖..🛗)    hut..elevator
+      | 0xF0 0x9F 0x9B 0x98..0x9B  #E0.0   [4] (🛘..🛛)    <reserved-1F6D8>..<...
+      | 0xF0 0x9F 0x9B 0x9C     #E15.0  [1] (🛜)       wireless
+      | 0xF0 0x9F 0x9B 0x9D..0x9F  #E14.0  [3] (🛝..🛟)    playground slide..r...
+      | 0xF0 0x9F 0x9B 0xA0..0xA5  #E0.7   [6] (🛠️..🛥️)    hammer and wrench...
+      | 0xF0 0x9F 0x9B 0xA6..0xA8  #E0.0   [3] (🛦..🛨)    UP-POINTING MILITAR...
+      | 0xF0 0x9F 0x9B 0xA9     #E0.7   [1] (🛩️)       small airplane
+      | 0xF0 0x9F 0x9B 0xAA     #E0.0   [1] (🛪)       NORTHEAST-POINTING AIR...
+      | 0xF0 0x9F 0x9B 0xAB..0xAC  #E1.0   [2] (🛫..🛬)    airplane departure....
+      | 0xF0 0x9F 0x9B 0xAD..0xAF  #E0.0   [3] (🛭..🛯)    <reserved-1F6ED>..<...
+      | 0xF0 0x9F 0x9B 0xB0     #E0.7   [1] (🛰️)       satellite
+      | 0xF0 0x9F 0x9B 0xB1..0xB2  #E0.0   [2] (🛱..🛲)    ONCOMING FIRE ENGIN...
+      | 0xF0 0x9F 0x9B 0xB3     #E0.7   [1] (🛳️)       passenger ship
+      | 0xF0 0x9F 0x9B 0xB4..0xB6  #E3.0   [3] (🛴..🛶)    kick scooter..canoe
+      | 0xF0 0x9F 0x9B 0xB7..0xB8  #E5.0   [2] (🛷..🛸)    sled..flying saucer
+      | 0xF0 0x9F 0x9B 0xB9     #E11.0  [1] (🛹)       skateboard
+      | 0xF0 0x9F 0x9B 0xBA     #E12.0  [1] (🛺)       auto rickshaw
+      | 0xF0 0x9F 0x9B 0xBB..0xBC  #E13.0  [2] (🛻..🛼)    pickup truck..rolle...
+      | 0xF0 0x9F 0x9B 0xBD..0xBF  #E0.0   [3] (🛽..🛿)    <reserved-1F6FD>..<...
+      | 0xF0 0x9F 0x9D 0xB4..0xBF  #E0.0  [12] (🝴..🝿)    LOT OF FORTUNE..ORCUS
+      | 0xF0 0x9F 0x9F 0x95..0x9F  #E0.0  [11] (🟕..🟟)    CIRCLED TRIANGLE..<...
+      | 0xF0 0x9F 0x9F 0xA0..0xAB  #E12.0 [12] (🟠..🟫)    orange circle..brow...
+      | 0xF0 0x9F 0x9F 0xAC..0xAF  #E0.0   [4] (🟬..🟯)    <reserved-1F7EC>..<...
+      | 0xF0 0x9F 0x9F 0xB0     #E14.0  [1] (🟰)       heavy equals sign
+      | 0xF0 0x9F 0x9F 0xB1..0xBF  #E0.0  [15] (🟱..🟿)    <reserved-1F7F1>..<...
+      | 0xF0 0x9F 0xA0 0x8C..0x8F  #E0.0   [4] (🠌..🠏)    <reserved-1F80C>..<...
+      | 0xF0 0x9F 0xA1 0x88..0x8F  #E0.0   [8] (🡈..🡏)    <reserved-1F848>..<...
+      | 0xF0 0x9F 0xA1 0x9A..0x9F  #E0.0   [6] (🡚..🡟)    <reserved-1F85A>..<...
+      | 0xF0 0x9F 0xA2 0x88..0x8F  #E0.0   [8] (🢈..🢏)    <reserved-1F888>..<...
+      | 0xF0 0x9F 0xA2 0xAE..0xFF  #E0.0  [82] (🢮..🣿)    <reserved-1F8AE>..<...
+      | 0xF0 0x9F 0xA3 0x00..0xBF  #
+      | 0xF0 0x9F 0xA4 0x8C     #E13.0  [1] (🤌)       pinched fingers
+      | 0xF0 0x9F 0xA4 0x8D..0x8F  #E12.0  [3] (🤍..🤏)    white heart..pinchi...
+      | 0xF0 0x9F 0xA4 0x90..0x98  #E1.0   [9] (🤐..🤘)    zipper-mouth face.....
+      | 0xF0 0x9F 0xA4 0x99..0x9E  #E3.0   [6] (🤙..🤞)    call me hand..cross...
+      | 0xF0 0x9F 0xA4 0x9F     #E5.0   [1] (🤟)       love-you gesture
+      | 0xF0 0x9F 0xA4 0xA0..0xA7  #E3.0   [8] (🤠..🤧)    cowboy hat face..sn...
+      | 0xF0 0x9F 0xA4 0xA8..0xAF  #E5.0   [8] (🤨..🤯)    face with raised ey...
+      | 0xF0 0x9F 0xA4 0xB0     #E3.0   [1] (🤰)       pregnant woman
+      | 0xF0 0x9F 0xA4 0xB1..0xB2  #E5.0   [2] (🤱..🤲)    breast-feeding..pal...
+      | 0xF0 0x9F 0xA4 0xB3..0xBA  #E3.0   [8] (🤳..🤺)    selfie..person fencing
+      | 0xF0 0x9F 0xA4 0xBC..0xBE  #E3.0   [3] (🤼..🤾)    people wrestling..p...
+      | 0xF0 0x9F 0xA4 0xBF     #E12.0  [1] (🤿)       diving mask
+      | 0xF0 0x9F 0xA5 0x80..0x85  #E3.0   [6] (🥀..🥅)    wilted flower..goal...
+      | 0xF0 0x9F 0xA5 0x87..0x8B  #E3.0   [5] (🥇..🥋)    1st place medal..ma...
+      | 0xF0 0x9F 0xA5 0x8C     #E5.0   [1] (🥌)       curling stone
+      | 0xF0 0x9F 0xA5 0x8D..0x8F  #E11.0  [3] (🥍..🥏)    lacrosse..flying disc
+      | 0xF0 0x9F 0xA5 0x90..0x9E  #E3.0  [15] (🥐..🥞)    croissant..pancakes
+      | 0xF0 0x9F 0xA5 0x9F..0xAB  #E5.0  [13] (🥟..🥫)    dumpling..canned food
+      | 0xF0 0x9F 0xA5 0xAC..0xB0  #E11.0  [5] (🥬..🥰)    leafy green..smilin...
+      | 0xF0 0x9F 0xA5 0xB1     #E12.0  [1] (🥱)       yawning face
+      | 0xF0 0x9F 0xA5 0xB2     #E13.0  [1] (🥲)       smiling face with tear
+      | 0xF0 0x9F 0xA5 0xB3..0xB6  #E11.0  [4] (🥳..🥶)    partying face..cold...
+      | 0xF0 0x9F 0xA5 0xB7..0xB8  #E13.0  [2] (🥷..🥸)    ninja..disguised face
+      | 0xF0 0x9F 0xA5 0xB9     #E14.0  [1] (🥹)       face holding back tears
+      | 0xF0 0x9F 0xA5 0xBA     #E11.0  [1] (🥺)       pleading face
+      | 0xF0 0x9F 0xA5 0xBB     #E12.0  [1] (🥻)       sari
+      | 0xF0 0x9F 0xA5 0xBC..0xBF  #E11.0  [4] (🥼..🥿)    lab coat..flat shoe
+      | 0xF0 0x9F 0xA6 0x80..0x84  #E1.0   [5] (🦀..🦄)    crab..unicorn
+      | 0xF0 0x9F 0xA6 0x85..0x91  #E3.0  [13] (🦅..🦑)    eagle..squid
+      | 0xF0 0x9F 0xA6 0x92..0x97  #E5.0   [6] (🦒..🦗)    giraffe..cricket
+      | 0xF0 0x9F 0xA6 0x98..0xA2  #E11.0 [11] (🦘..🦢)    kangaroo..swan
+      | 0xF0 0x9F 0xA6 0xA3..0xA4  #E13.0  [2] (🦣..🦤)    mammoth..dodo
+      | 0xF0 0x9F 0xA6 0xA5..0xAA  #E12.0  [6] (🦥..🦪)    sloth..oyster
+      | 0xF0 0x9F 0xA6 0xAB..0xAD  #E13.0  [3] (🦫..🦭)    beaver..seal
+      | 0xF0 0x9F 0xA6 0xAE..0xAF  #E12.0  [2] (🦮..🦯)    guide dog..white cane
+      | 0xF0 0x9F 0xA6 0xB0..0xB9  #E11.0 [10] (🦰..🦹)    red hair..supervillain
+      | 0xF0 0x9F 0xA6 0xBA..0xBF  #E12.0  [6] (🦺..🦿)    safety vest..mechan...
+      | 0xF0 0x9F 0xA7 0x80     #E1.0   [1] (🧀)       cheese wedge
+      | 0xF0 0x9F 0xA7 0x81..0x82  #E11.0  [2] (🧁..🧂)    cupcake..salt
+      | 0xF0 0x9F 0xA7 0x83..0x8A  #E12.0  [8] (🧃..🧊)    beverage box..ice
+      | 0xF0 0x9F 0xA7 0x8B     #E13.0  [1] (🧋)       bubble tea
+      | 0xF0 0x9F 0xA7 0x8C     #E14.0  [1] (🧌)       troll
+      | 0xF0 0x9F 0xA7 0x8D..0x8F  #E12.0  [3] (🧍..🧏)    person standing..de...
+      | 0xF0 0x9F 0xA7 0x90..0xA6  #E5.0  [23] (🧐..🧦)    face with monocle.....
+      | 0xF0 0x9F 0xA7 0xA7..0xBF  #E11.0 [25] (🧧..🧿)    red envelope..nazar...
+      | 0xF0 0x9F 0xA8 0x80..0xFF  #E0.0 [112] (🨀..🩯)    NEUTRAL CHESS KING....
+      | 0xF0 0x9F 0xA9 0x00..0xAF  #
+      | 0xF0 0x9F 0xA9 0xB0..0xB3  #E12.0  [4] (🩰..🩳)    ballet shoes..shorts
+      | 0xF0 0x9F 0xA9 0xB4     #E13.0  [1] (🩴)       thong sandal
+      | 0xF0 0x9F 0xA9 0xB5..0xB7  #E15.0  [3] (🩵..🩷)    light blue heart..p...
+      | 0xF0 0x9F 0xA9 0xB8..0xBA  #E12.0  [3] (🩸..🩺)    drop of blood..stet...
+      | 0xF0 0x9F 0xA9 0xBB..0xBC  #E14.0  [2] (🩻..🩼)    x-ray..crutch
+      | 0xF0 0x9F 0xA9 0xBD..0xBF  #E0.0   [3] (🩽..🩿)    <reserved-1FA7D>..<...
+      | 0xF0 0x9F 0xAA 0x80..0x82  #E12.0  [3] (🪀..🪂)    yo-yo..parachute
+      | 0xF0 0x9F 0xAA 0x83..0x86  #E13.0  [4] (🪃..🪆)    boomerang..nesting ...
+      | 0xF0 0x9F 0xAA 0x87..0x88  #E15.0  [2] (🪇..🪈)    maracas..flute
+      | 0xF0 0x9F 0xAA 0x89..0x8F  #E0.0   [7] (🪉..🪏)    <reserved-1FA89>..<...
+      | 0xF0 0x9F 0xAA 0x90..0x95  #E12.0  [6] (🪐..🪕)    ringed planet..banjo
+      | 0xF0 0x9F 0xAA 0x96..0xA8  #E13.0 [19] (🪖..🪨)    military helmet..rock
+      | 0xF0 0x9F 0xAA 0xA9..0xAC  #E14.0  [4] (🪩..🪬)    mirror ball..hamsa
+      | 0xF0 0x9F 0xAA 0xAD..0xAF  #E15.0  [3] (🪭..🪯)    folding hand fan..k...
+      | 0xF0 0x9F 0xAA 0xB0..0xB6  #E13.0  [7] (🪰..🪶)    fly..feather
+      | 0xF0 0x9F 0xAA 0xB7..0xBA  #E14.0  [4] (🪷..🪺)    lotus..nest with eggs
+      | 0xF0 0x9F 0xAA 0xBB..0xBD  #E15.0  [3] (🪻..🪽)    hyacinth..wing
+      | 0xF0 0x9F 0xAA 0xBE     #E0.0   [1] (🪾)       <reserved-1FABE>
+      | 0xF0 0x9F 0xAA 0xBF     #E15.0  [1] (🪿)       goose
+      | 0xF0 0x9F 0xAB 0x80..0x82  #E13.0  [3] (🫀..🫂)    anatomical heart..p...
+      | 0xF0 0x9F 0xAB 0x83..0x85  #E14.0  [3] (🫃..🫅)    pregnant man..perso...
+      | 0xF0 0x9F 0xAB 0x86..0x8D  #E0.0   [8] (🫆..🫍)    <reserved-1FAC6>..<...
+      | 0xF0 0x9F 0xAB 0x8E..0x8F  #E15.0  [2] (🫎..🫏)    moose..donkey
+      | 0xF0 0x9F 0xAB 0x90..0x96  #E13.0  [7] (🫐..🫖)    blueberries..teapot
+      | 0xF0 0x9F 0xAB 0x97..0x99  #E14.0  [3] (🫗..🫙)    pouring liquid..jar
+      | 0xF0 0x9F 0xAB 0x9A..0x9B  #E15.0  [2] (🫚..🫛)    ginger root..pea pod
+      | 0xF0 0x9F 0xAB 0x9C..0x9F  #E0.0   [4] (🫜..🫟)    <reserved-1FADC>..<...
+      | 0xF0 0x9F 0xAB 0xA0..0xA7  #E14.0  [8] (🫠..🫧)    melting face..bubbles
+      | 0xF0 0x9F 0xAB 0xA8     #E15.0  [1] (🫨)       shaking face
+      | 0xF0 0x9F 0xAB 0xA9..0xAF  #E0.0   [7] (🫩..🫯)    <reserved-1FAE9>..<...
+      | 0xF0 0x9F 0xAB 0xB0..0xB6  #E14.0  [7] (🫰..🫶)    hand with index fin...
+      | 0xF0 0x9F 0xAB 0xB7..0xB8  #E15.0  [2] (🫷..🫸)    leftwards pushing h...
+      | 0xF0 0x9F 0xAB 0xB9..0xBF  #E0.0   [7] (🫹..🫿)    <reserved-1FAF9>..<...
+      | 0xF0 0x9F 0xB0 0x80..0xFF        #E0.0[1022] (🰀..🿽)    <reserved-1FC...
+      | 0xF0 0x9F 0xB1..0xBE 0x00..0xFF  #
+      | 0xF0 0x9F 0xBF 0x00..0xBD        #
+      ;
+
+}%%
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/generate.go 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/generate.go
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/generate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/generate.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,8 @@
+package textseg
+
+//go:generate go run make_tables.go -output tables.go
+//go:generate go run make_test_tables.go -output tables_test.go
+//go:generate ruby unicode2ragel.rb --url=https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,ZWJ" -o grapheme_clusters_table.rl
+//go:generate ruby unicode2ragel.rb --url=https://www.unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt -m Emoji -p "Extended_Pictographic" -o emoji_table.rl
+//go:generate ragel -Z grapheme_clusters.rl
+//go:generate gofmt -w grapheme_clusters.go
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.go 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.go
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,4349 @@
+//line grapheme_clusters.rl:1
+package textseg
+
+import (
+	"errors"
+	"unicode/utf8"
+)
+
+// Generated from grapheme_clusters.rl. DO NOT EDIT
+
+//line grapheme_clusters.go:13
+var _graphclust_actions []byte = []byte{
+	0, 1, 0, 1, 4, 1, 11, 1, 12,
+	1, 13, 1, 14, 1, 15, 1, 16,
+	1, 17, 1, 18, 1, 19, 1, 20,
+	1, 21, 1, 22, 1, 23, 2, 1,
+	9, 2, 1, 10, 2, 2, 3, 2,
+	5, 1, 3, 0, 1, 10, 3, 5,
+	1, 6, 3, 5, 1, 7, 3, 5,
+	1, 8, 4, 5, 0, 1, 8,
+}
+
+var _graphclust_key_offsets []int16 = []int16{
+	0, 0, 1, 3, 5, 7, 10, 15,
+	17, 20, 28, 31, 33, 35, 38, 69,
+	77, 79, 81, 85, 88, 93, 98, 110,
+	122, 130, 135, 145, 148, 155, 160, 168,
+	179, 185, 193, 195, 203, 206, 208, 211,
+	213, 220, 222, 230, 231, 253, 257, 263,
+	268, 270, 274, 278, 280, 284, 287, 290,
+	294, 296, 303, 305, 307, 311, 315, 319,
+	321, 323, 331, 333, 338, 340, 342, 344,
+	345, 347, 349, 351, 353, 368, 372, 374,
+	376, 382, 386, 392, 394, 396, 400, 404,
+	406, 410, 417, 422, 426, 429, 430, 434,
+	443, 453, 454, 455, 457, 466, 468, 470,
+	472, 474, 476, 478, 516, 520, 526, 530,
+	531, 535, 538, 542, 547, 550, 551, 553,
+	559, 572, 574, 577, 579, 583, 587, 589,
+	591, 595, 597, 603, 606, 611, 617, 620,
+	622, 626, 630, 637, 640, 646, 648, 655,
+	657, 658, 661, 666, 668, 670, 673, 677,
+	680, 681, 683, 685, 689, 691, 697, 703,
+	709, 711, 715, 719, 724, 732, 742, 743,
+	744, 746, 748, 750, 751, 753, 754, 760,
+	762, 764, 764, 771, 773, 775, 777, 780,
+	785, 787, 790, 798, 801, 803, 805, 808,
+	839, 847, 849, 851, 855, 858, 863, 868,
+	880, 892, 900, 905, 915, 918, 925, 930,
+	938, 949, 955, 963, 965, 973, 976, 978,
+	981, 983, 990, 992, 1000, 1001, 1023, 1027,
+	1033, 1038, 1040, 1044, 1048, 1050, 1054, 1057,
+	1060, 1064, 1066, 1073, 1075, 1077, 1081, 1085,
+	1089, 1091, 1093, 1101, 1103, 1108, 1110, 1112,
+	1136, 1139, 1140, 1142, 1144, 1148, 1151, 1152,
+	1157, 1158, 1161, 1164, 1170, 1174, 1174, 1188,
+	1197, 1202, 1204, 1208, 1210, 1212, 1213, 1215,
+	1218, 1221, 1223, 1225, 1240, 1244, 1246, 1248,
+	1254, 1258, 1264, 1266, 1268, 1272, 1276, 1278,
+	1282, 1289, 1294, 1298, 1301, 1302, 1306, 1315,
+	1325, 1326, 1327, 1329, 1338, 1340, 1342, 1344,
+	1346, 1348, 1350, 1388, 1392, 1398, 1402, 1406,
+	1409, 1413, 1418, 1421, 1422, 1424, 1430, 1443,
+	1445, 1448, 1450, 1454, 1458, 1460, 1462, 1466,
+	1468, 1474, 1477, 1482, 1488, 1491, 1493, 1497,
+	1501, 1508, 1511, 1517, 1519, 1526, 1528, 1529,
+	1532, 1537, 1539, 1541, 1544, 1548, 1551, 1552,
+	1554, 1556, 1560, 1562, 1568, 1574, 1580, 1582,
+	1586, 1590, 1595, 1603, 1613, 1614, 1615, 1617,
+	1619, 1621, 1661, 1663, 1666, 1670, 1675, 1677,
+	1685, 1687, 1689, 1691, 1693, 1695, 1697, 1699,
+	1703, 1707, 1711, 1715, 1716, 1722, 1724, 1726,
+	1728, 1737, 1738, 1740, 1745, 1747, 1749, 1751,
+	1754, 1759, 1761, 1764, 1772, 1775, 1777, 1779,
+	1782, 1813, 1821, 1823, 1825, 1829, 1832, 1837,
+	1842, 1854, 1866, 1874, 1879, 1889, 1892, 1899,
+	1904, 1912, 1923, 1929, 1937, 1939, 1947, 1950,
+	1952, 1955, 1957, 1964, 1966, 1974, 1975, 1997,
+	2001, 2007, 2012, 2014, 2018, 2022, 2024, 2028,
+	2031, 2034, 2038, 2040, 2047, 2049, 2051, 2055,
+	2059, 2063, 2065, 2067, 2075, 2077, 2082, 2084,
+	2086, 2088, 2089, 2091, 2093, 2095, 2097, 2112,
+	2116, 2118, 2120, 2126, 2130, 2136, 2138, 2140,
+	2144, 2148, 2150, 2154, 2161, 2166, 2170, 2173,
+	2174, 2178, 2187, 2197, 2198, 2199, 2201, 2210,
+	2212, 2214, 2216, 2218, 2220, 2222, 2260, 2264,
+	2270, 2274, 2275, 2279, 2282, 2286, 2291, 2294,
+	2295, 2297, 2303, 2316, 2318, 2321, 2323, 2327,
+	2331, 2333, 2335, 2339, 2341, 2347, 2350, 2355,
+	2361, 2364, 2366, 2370, 2374, 2381, 2384, 2390,
+	2392, 2399, 2401, 2402, 2405, 2410, 2412, 2414,
+	2417, 2421, 2424, 2425, 2427, 2429, 2433, 2435,
+	2441, 2447, 2453, 2455, 2459, 2463, 2468, 2476,
+	2486, 2487, 2488, 2490, 2492, 2494, 2495, 2497,
+	2498, 2504, 2506, 2508, 2508, 2514, 2516, 2518,
+	2520, 2523, 2528, 2530, 2533, 2541, 2544, 2546,
+	2548, 2551, 2582, 2590, 2592, 2594, 2598, 2601,
+	2606, 2611, 2623, 2635, 2643, 2648, 2658, 2661,
+	2668, 2673, 2681, 2692, 2698, 2706, 2708, 2716,
+	2719, 2721, 2724, 2726, 2733, 2735, 2743, 2744,
+	2766, 2770, 2776, 2781, 2783, 2787, 2791, 2793,
+	2797, 2800, 2803, 2807, 2809, 2816, 2818, 2820,
+	2824, 2828, 2832, 2834, 2836, 2844, 2846, 2851,
+	2853, 2855, 2879, 2882, 2883, 2885, 2887, 2891,
+	2894, 2895, 2900, 2901, 2904, 2907, 2913, 2917,
+	2917, 2931, 2940, 2945, 2947, 2951, 2953, 2955,
+	2956, 2958, 2961, 2964, 2966, 2968, 2983, 2987,
+	2989, 2991, 2997, 3001, 3007, 3009, 3011, 3015,
+	3019, 3021, 3025, 3032, 3037, 3041, 3044, 3045,
+	3049, 3058, 3068, 3069, 3070, 3072, 3081, 3083,
+	3085, 3087, 3089, 3091, 3093, 3131, 3135, 3141,
+	3145, 3149, 3152, 3156, 3161, 3164, 3165, 3167,
+	3173, 3186, 3188, 3191, 3193, 3197, 3201, 3203,
+	3205, 3209, 3211, 3217, 3220, 3225, 3231, 3234,
+	3236, 3240, 3244, 3251, 3254, 3260, 3262, 3269,
+	3271, 3272, 3275, 3280, 3282, 3284, 3287, 3291,
+	3294, 3295, 3297, 3299, 3303, 3305, 3311, 3317,
+	3323, 3325, 3329, 3333, 3338, 3346, 3356, 3357,
+	3358, 3360, 3362, 3364, 3404, 3406, 3409, 3413,
+	3418, 3420, 3428, 3430, 3432, 3434, 3436, 3438,
+	3440, 3442, 3446, 3450, 3454, 3458, 3459, 3465,
+	3467, 3469, 3471, 3480, 3481, 3483, 3489, 3492,
+	3495, 3500, 3506, 3509, 3512, 3519, 3521, 3546,
+	3548, 3573, 3575, 3577, 3601, 3603, 3605, 3606,
+	3608, 3610, 3612, 3618, 3620, 3652, 3656, 3661,
+	3685, 3687, 3689, 3691, 3693, 3696, 3698, 3700,
+	3704, 3704, 3760, 3816, 3847, 3852, 3856, 3878,
+	3887, 3892, 3896, 3906, 3913, 3916, 3927, 3930,
+	3937, 3943, 3947, 3953, 3969, 3984, 3993, 3999,
+	4009, 4013, 4017, 4021, 4025, 4027, 4047, 4053,
+	4058, 4060, 4062, 4065, 4067, 4069, 4073, 4129,
+	4185, 4218, 4223, 4231, 4235, 4237, 4242, 4249,
+	4260, 4263, 4266, 4272, 4275, 4278, 4281, 4287,
+	4290, 4293, 4296, 4298, 4301, 4305, 4308, 4312,
+	4354, 4361, 4369, 4378, 4382, 4391, 4393, 4395,
+	4405, 4409, 4413, 4417, 4421, 4425, 4429, 4433,
+	4439, 4449, 4457, 4462, 4465, 4467, 4470, 4475,
+	4478, 4481, 4486, 4492, 4495, 4498, 4505, 4507,
+	4509, 4511, 4513, 4516, 4521, 4523, 4526, 4534,
+	4537, 4539, 4541, 4544, 4575, 4583, 4585, 4587,
+	4591, 4594, 4599, 4604, 4616, 4628, 4636, 4641,
+	4651, 4654, 4661, 4666, 4674, 4685, 4691, 4699,
+	4701, 4709, 4712, 4714, 4717, 4719, 4726, 4728,
+	4736, 4737, 4759, 4763, 4769, 4774, 4776, 4780,
+	4784, 4786, 4790, 4793, 4796, 4800, 4802, 4809,
+	4811, 4813, 4817, 4821, 4825, 4827, 4829, 4837,
+	4839, 4844, 4846, 4848, 4872, 4875, 4876, 4878,
+	4880, 4884, 4887, 4888, 4893, 4894, 4897, 4900,
+	4906, 4910, 4910, 4924, 4933, 4938, 4940, 4944,
+	4946, 4948, 4949, 4951, 4954, 4957, 4959, 4961,
+	4976, 4980, 4982, 4984, 4990, 4994, 5000, 5002,
+	5004, 5008, 5012, 5014, 5018, 5025, 5030, 5034,
+	5037, 5038, 5042, 5051, 5061, 5062, 5063, 5065,
+	5074, 5076, 5078, 5080, 5082, 5084, 5086, 5124,
+	5128, 5134, 5138, 5142, 5145, 5149, 5154, 5157,
+	5158, 5160, 5166, 5179, 5181, 5184, 5186, 5190,
+	5194, 5196, 5198, 5202, 5204, 5210, 5213, 5218,
+	5224, 5227, 5229, 5233, 5237, 5244, 5247, 5253,
+	5255, 5262, 5264, 5265, 5268, 5273, 5275, 5277,
+	5280, 5284, 5287, 5288, 5290, 5292, 5296, 5298,
+	5304, 5310, 5316, 5318, 5322, 5326, 5331, 5339,
+	5349, 5350, 5351, 5353, 5355, 5357, 5397, 5399,
+	5402, 5406, 5411, 5413, 5421, 5423, 5425, 5427,
+	5429, 5431, 5433, 5435, 5439, 5443, 5447, 5451,
+	5452, 5458, 5460, 5462, 5464, 5473, 5474, 5476,
+	5501, 5503, 5528, 5530, 5532, 5556, 5558, 5560,
+	5561, 5563, 5565, 5567, 5573, 5575, 5607, 5611,
+	5616, 5640, 5642, 5644, 5646, 5648, 5651, 5653,
+	5655, 5659, 5659, 5715, 5771, 5802, 5807, 5810,
+	5832, 5845, 5847, 5849, 5851, 5854, 5859, 5861,
+	5864, 5872, 5875, 5877, 5879, 5882, 5913, 5921,
+	5923, 5925, 5929, 5932, 5937, 5942, 5954, 5966,
+	5974, 5979, 5989, 5992, 5999, 6004, 6012, 6023,
+	6029, 6037, 6039, 6047, 6050, 6052, 6055, 6057,
+	6064, 6066, 6074, 6075, 6097, 6101, 6107, 6112,
+	6114, 6118, 6122, 6124, 6128, 6131, 6134, 6138,
+	6140, 6147, 6149, 6151, 6155, 6159, 6163, 6165,
+	6167, 6175, 6177, 6182, 6184, 6186, 6188, 6189,
+	6191, 6193, 6195, 6197, 6212, 6216, 6218, 6220,
+	6226, 6230, 6236, 6238, 6240, 6244, 6248, 6250,
+	6254, 6261, 6266, 6270, 6273, 6274, 6278, 6287,
+	6297, 6298, 6299, 6301, 6310, 6312, 6314, 6316,
+	6318, 6320, 6322, 6360, 6364, 6370, 6374, 6375,
+	6379, 6382, 6386, 6391, 6394, 6395, 6397, 6403,
+	6416, 6418, 6421, 6423, 6427, 6431, 6433, 6435,
+	6439, 6441, 6447, 6450, 6455, 6461, 6464, 6466,
+	6470, 6474, 6481, 6484, 6490, 6492, 6499, 6501,
+	6502, 6505, 6510, 6512, 6514, 6517, 6521, 6524,
+	6525, 6527, 6529, 6533, 6535, 6541, 6547, 6553,
+	6555, 6559, 6563, 6568, 6576, 6586, 6587, 6588,
+	6590, 6592, 6594, 6595, 6597, 6598, 6604, 6606,
+	6608, 6608, 6615, 6619, 6629, 6636, 6639, 6650,
+	6653, 6660, 6666, 6670, 6676, 6692, 6707, 6716,
+	6722, 6732, 6736, 6740, 6744, 6748, 6750, 6770,
+	6776, 6781, 6783, 6785, 6788, 6790, 6792, 6796,
+	6852, 6908, 6941, 6946, 6954, 6958, 6961, 6968,
+	6975, 6986, 6989, 6992, 6998, 7001, 7004, 7007,
+	7013, 7016, 7019, 7022, 7026, 7029, 7035, 7038,
+	7044, 7086, 7093, 7101, 7110, 7114, 7116, 7118,
+	7120, 7123, 7128, 7130, 7133, 7141, 7144, 7146,
+	7148, 7151, 7182, 7190, 7192, 7194, 7198, 7201,
+	7206, 7211, 7223, 7235, 7243, 7248, 7258, 7261,
+	7268, 7273, 7281, 7292, 7298, 7306, 7308, 7316,
+	7319, 7321, 7324, 7326, 7333, 7335, 7343, 7344,
+	7366, 7370, 7376, 7381, 7383, 7387, 7391, 7393,
+	7397, 7400, 7403, 7407, 7409, 7416, 7418, 7420,
+	7424, 7428, 7432, 7434, 7436, 7444, 7446, 7451,
+	7453, 7455, 7479, 7482, 7483, 7485, 7487, 7491,
+	7494, 7495, 7500, 7501, 7504, 7507, 7513, 7517,
+	7517, 7531, 7540, 7545, 7547, 7551, 7553, 7555,
+	7556, 7558, 7561, 7564, 7566, 7568, 7583, 7587,
+	7589, 7591, 7597, 7601, 7607, 7609, 7611, 7615,
+	7619, 7621, 7625, 7632, 7637, 7641, 7644, 7645,
+	7649, 7658, 7668, 7669, 7670, 7672, 7681, 7683,
+	7685, 7687, 7689, 7691, 7693, 7731, 7735, 7741,
+	7745, 7749, 7752, 7756, 7761, 7764, 7765, 7767,
+	7773, 7786, 7788, 7791, 7793, 7797, 7801, 7803,
+	7805, 7809, 7811, 7817, 7820, 7825, 7831, 7834,
+	7836, 7840, 7844, 7851, 7854, 7860, 7862, 7869,
+	7871, 7872, 7875, 7880, 7882, 7884, 7887, 7891,
+	7894, 7895, 7897, 7899, 7903, 7905, 7911, 7917,
+	7923, 7925, 7929, 7933, 7938, 7946, 7956, 7957,
+	7958, 7960, 7962, 7964, 8004, 8006, 8009, 8013,
+	8018, 8020, 8028, 8030, 8032, 8034, 8036, 8038,
+	8040, 8042, 8046, 8050, 8054, 8058, 8059, 8065,
+	8067, 8069, 8071, 8080, 8081, 8083, 8092, 8094,
+	8096, 8106, 8110, 8114, 8118, 8122, 8126, 8130,
+	8134, 8140, 8150, 8158, 8163, 8166, 8168, 8171,
+	8180, 8184, 8186, 8188, 8192, 8192, 8222, 8242,
+	8262, 8283, 8306, 8326, 8346, 8367, 8390, 8411,
+	8432, 8453, 8473, 8496, 8516, 8537, 8558, 8579,
+	8600, 8620, 8640, 8660,
+}
+
+var _graphclust_trans_keys []byte = []byte{
+	10, 128, 255, 176, 255, 131, 137, 191,
+	145, 189, 135, 129, 130, 132, 133, 144,
+	154, 176, 139, 159, 150, 156, 159, 164,
+	167, 168, 170, 173, 145, 176, 255, 139,
+	255, 166, 176, 189, 171, 179, 160, 161,
+	162, 163, 164, 165, 167, 169, 171, 173,
+	174, 175, 176, 177, 179, 180, 181, 182,
+	183, 184, 185, 186, 187, 188, 189, 190,
+	191, 166, 170, 172, 178, 150, 153, 155,
+	163, 165, 167, 169, 173, 153, 155, 152,
+	159, 138, 161, 163, 255, 189, 132, 185,
+	144, 152, 161, 164, 255, 188, 129, 131,
+	190, 255, 133, 134, 137, 138, 142, 150,
+	152, 161, 164, 189, 191, 255, 131, 134,
+	137, 138, 142, 144, 146, 175, 178, 180,
+	182, 255, 134, 138, 142, 161, 164, 185,
+	192, 255, 188, 129, 131, 190, 191, 128,
+	132, 135, 136, 139, 141, 149, 151, 162,
+	163, 130, 190, 191, 151, 128, 130, 134,
+	136, 138, 141, 188, 128, 132, 190, 255,
+	133, 137, 142, 148, 151, 161, 164, 255,
+	179, 128, 132, 134, 136, 138, 141, 149,
+	150, 162, 163, 128, 131, 187, 188, 190,
+	255, 133, 137, 142, 150, 152, 161, 164,
+	255, 129, 131, 138, 150, 143, 148, 152,
+	159, 178, 179, 177, 179, 186, 135, 142,
+	177, 179, 188, 136, 142, 181, 183, 185,
+	152, 153, 190, 191, 177, 191, 128, 132,
+	134, 135, 141, 151, 153, 188, 134, 128,
+	129, 130, 141, 156, 157, 158, 159, 160,
+	162, 164, 168, 169, 170, 171, 172, 173,
+	174, 175, 176, 179, 183, 173, 183, 185,
+	190, 150, 153, 158, 160, 177, 180, 130,
+	141, 157, 132, 134, 157, 159, 146, 149,
+	178, 180, 146, 147, 178, 179, 180, 255,
+	148, 156, 158, 255, 143, 139, 141, 169,
+	133, 134, 160, 171, 176, 187, 151, 155,
+	160, 162, 191, 149, 158, 165, 188, 176,
+	255, 143, 255, 128, 132, 180, 255, 133,
+	170, 180, 255, 128, 130, 161, 173, 166,
+	179, 164, 183, 173, 180, 144, 146, 148,
+	168, 183, 185, 128, 191, 128, 131, 179,
+	181, 183, 140, 141, 144, 176, 175, 177,
+	191, 160, 191, 128, 130, 170, 175, 153,
+	154, 153, 154, 155, 160, 162, 163, 164,
+	165, 166, 167, 168, 169, 170, 171, 175,
+	175, 178, 180, 189, 158, 159, 176, 177,
+	130, 134, 139, 172, 163, 167, 128, 129,
+	180, 255, 134, 159, 178, 190, 192, 255,
+	166, 173, 135, 147, 128, 131, 179, 255,
+	129, 164, 166, 255, 169, 182, 131, 188,
+	140, 141, 176, 178, 180, 183, 184, 190,
+	191, 129, 171, 175, 181, 182, 163, 170,
+	172, 173, 172, 184, 190, 158, 128, 143,
+	160, 175, 144, 145, 147, 150, 155, 156,
+	157, 158, 159, 135, 139, 141, 168, 171,
+	180, 186, 187, 189, 190, 189, 160, 182,
+	186, 191, 129, 131, 133, 134, 140, 143,
+	184, 186, 165, 166, 164, 167, 171, 172,
+	189, 191, 134, 144, 130, 133, 128, 129,
+	130, 131, 132, 133, 134, 135, 136, 137,
+	139, 140, 141, 144, 145, 146, 147, 150,
+	151, 152, 153, 154, 156, 160, 164, 165,
+	167, 168, 169, 170, 176, 178, 180, 181,
+	182, 187, 188, 189, 128, 130, 184, 255,
+	135, 175, 177, 178, 181, 190, 131, 175,
+	187, 255, 130, 128, 130, 167, 180, 179,
+	133, 134, 128, 130, 179, 255, 141, 129,
+	136, 144, 255, 190, 172, 183, 129, 159,
+	170, 128, 131, 187, 188, 190, 191, 151,
+	128, 132, 135, 136, 139, 141, 162, 163,
+	166, 172, 176, 180, 181, 191, 158, 128,
+	134, 132, 255, 175, 181, 184, 255, 129,
+	155, 158, 255, 129, 255, 171, 183, 157,
+	159, 162, 171, 172, 186, 176, 181, 183,
+	184, 187, 190, 128, 130, 131, 164, 145,
+	151, 154, 160, 129, 138, 179, 185, 187,
+	190, 135, 145, 155, 138, 153, 175, 182,
+	184, 191, 146, 167, 169, 182, 186, 177,
+	182, 188, 189, 191, 255, 134, 136, 255,
+	138, 142, 144, 145, 147, 151, 179, 182,
+	131, 128, 129, 180, 186, 190, 191, 128,
+	130, 145, 128, 135, 149, 171, 172, 189,
+	190, 191, 176, 180, 176, 182, 143, 145,
+	255, 136, 142, 147, 255, 164, 176, 177,
+	178, 157, 158, 188, 189, 128, 173, 176,
+	255, 135, 255, 133, 134, 137, 168, 169,
+	170, 165, 169, 173, 178, 187, 255, 131,
+	132, 140, 169, 174, 255, 130, 132, 128,
+	182, 187, 255, 173, 180, 182, 255, 132,
+	155, 159, 161, 175, 128, 130, 132, 138,
+	139, 147, 163, 165, 128, 134, 136, 152,
+	155, 161, 163, 164, 166, 170, 143, 174,
+	172, 175, 144, 150, 132, 138, 143, 187,
+	191, 160, 128, 129, 132, 135, 133, 134,
+	160, 255, 192, 255, 169, 173, 174, 128,
+	159, 160, 191, 0, 127, 176, 255, 131,
+	137, 191, 145, 189, 135, 129, 130, 132,
+	133, 144, 154, 176, 139, 159, 150, 156,
+	159, 164, 167, 168, 170, 173, 145, 176,
+	255, 139, 255, 166, 176, 189, 171, 179,
+	160, 161, 162, 163, 164, 165, 167, 169,
+	171, 173, 174, 175, 176, 177, 179, 180,
+	181, 182, 183, 184, 185, 186, 187, 188,
+	189, 190, 191, 166, 170, 172, 178, 150,
+	153, 155, 163, 165, 167, 169, 173, 153,
+	155, 152, 159, 138, 161, 163, 255, 189,
+	132, 185, 144, 152, 161, 164, 255, 188,
+	129, 131, 190, 255, 133, 134, 137, 138,
+	142, 150, 152, 161, 164, 189, 191, 255,
+	131, 134, 137, 138, 142, 144, 146, 175,
+	178, 180, 182, 255, 134, 138, 142, 161,
+	164, 185, 192, 255, 188, 129, 131, 190,
+	191, 128, 132, 135, 136, 139, 141, 149,
+	151, 162, 163, 130, 190, 191, 151, 128,
+	130, 134, 136, 138, 141, 188, 128, 132,
+	190, 255, 133, 137, 142, 148, 151, 161,
+	164, 255, 179, 128, 132, 134, 136, 138,
+	141, 149, 150, 162, 163, 128, 131, 187,
+	188, 190, 255, 133, 137, 142, 150, 152,
+	161, 164, 255, 129, 131, 138, 150, 143,
+	148, 152, 159, 178, 179, 177, 179, 186,
+	135, 142, 177, 179, 188, 136, 142, 181,
+	183, 185, 152, 153, 190, 191, 177, 191,
+	128, 132, 134, 135, 141, 151, 153, 188,
+	134, 128, 129, 130, 141, 156, 157, 158,
+	159, 160, 162, 164, 168, 169, 170, 171,
+	172, 173, 174, 175, 176, 179, 183, 173,
+	183, 185, 190, 150, 153, 158, 160, 177,
+	180, 130, 141, 157, 132, 134, 157, 159,
+	146, 149, 178, 180, 146, 147, 178, 179,
+	180, 255, 148, 156, 158, 255, 143, 139,
+	141, 169, 133, 134, 160, 171, 176, 187,
+	151, 155, 160, 162, 191, 149, 158, 165,
+	188, 176, 255, 143, 255, 128, 132, 180,
+	255, 133, 170, 180, 255, 128, 130, 161,
+	173, 166, 179, 164, 183, 173, 180, 144,
+	146, 148, 168, 183, 185, 128, 191, 128,
+	131, 179, 181, 183, 140, 141, 169, 174,
+	128, 129, 131, 132, 134, 140, 142, 143,
+	147, 150, 151, 152, 153, 154, 155, 156,
+	157, 158, 164, 172, 173, 179, 181, 183,
+	140, 141, 188, 137, 144, 176, 162, 185,
+	148, 153, 169, 170, 168, 154, 155, 136,
+	143, 169, 179, 184, 186, 130, 182, 170,
+	171, 128, 187, 190, 128, 133, 135, 146,
+	148, 191, 128, 133, 144, 255, 147, 149,
+	134, 135, 151, 156, 158, 160, 162, 167,
+	169, 178, 181, 255, 132, 135, 140, 142,
+	151, 147, 149, 163, 167, 161, 176, 191,
+	149, 151, 180, 181, 133, 135, 155, 156,
+	144, 149, 175, 177, 191, 160, 191, 128,
+	130, 138, 189, 170, 176, 153, 154, 151,
+	153, 153, 154, 155, 160, 162, 163, 164,
+	165, 166, 167, 168, 169, 170, 171, 175,
+	175, 178, 180, 189, 158, 159, 176, 177,
+	130, 134, 139, 172, 163, 167, 128, 129,
+	180, 255, 134, 159, 178, 190, 192, 255,
+	166, 173, 135, 147, 128, 131, 179, 255,
+	129, 164, 166, 255, 169, 182, 131, 188,
+	140, 141, 176, 178, 180, 183, 184, 190,
+	191, 129, 171, 175, 181, 182, 163, 170,
+	172, 173, 172, 184, 190, 158, 128, 143,
+	160, 175, 144, 145, 147, 150, 155, 156,
+	157, 158, 159, 135, 139, 141, 168, 171,
+	180, 186, 187, 189, 190, 189, 160, 182,
+	186, 191, 129, 131, 133, 134, 140, 143,
+	184, 186, 165, 166, 164, 167, 171, 172,
+	189, 191, 134, 144, 130, 133, 128, 129,
+	130, 131, 132, 133, 134, 135, 136, 137,
+	139, 140, 141, 144, 145, 146, 147, 150,
+	151, 152, 153, 154, 156, 160, 164, 165,
+	167, 168, 169, 170, 176, 178, 180, 181,
+	182, 187, 188, 189, 128, 130, 184, 255,
+	135, 175, 177, 178, 181, 190, 131, 175,
+	187, 255, 128, 130, 167, 180, 179, 133,
+	134, 128, 130, 179, 255, 141, 129, 136,
+	144, 255, 190, 172, 183, 129, 159, 170,
+	128, 131, 187, 188, 190, 191, 151, 128,
+	132, 135, 136, 139, 141, 162, 163, 166,
+	172, 176, 180, 181, 191, 158, 128, 134,
+	132, 255, 175, 181, 184, 255, 129, 155,
+	158, 255, 129, 255, 171, 183, 157, 159,
+	162, 171, 172, 186, 176, 181, 183, 184,
+	187, 190, 128, 130, 131, 164, 145, 151,
+	154, 160, 129, 138, 179, 185, 187, 190,
+	135, 145, 155, 138, 153, 175, 182, 184,
+	191, 146, 167, 169, 182, 186, 177, 182,
+	188, 189, 191, 255, 134, 136, 255, 138,
+	142, 144, 145, 147, 151, 179, 182, 131,
+	128, 129, 180, 186, 190, 191, 128, 130,
+	145, 128, 135, 149, 171, 172, 189, 190,
+	191, 176, 180, 176, 182, 143, 145, 255,
+	136, 142, 147, 255, 164, 176, 177, 178,
+	157, 158, 188, 189, 128, 173, 176, 255,
+	135, 255, 133, 134, 137, 168, 169, 170,
+	165, 169, 173, 178, 187, 255, 131, 132,
+	140, 169, 174, 255, 130, 132, 128, 182,
+	187, 255, 173, 180, 182, 255, 132, 155,
+	159, 161, 175, 128, 130, 132, 138, 139,
+	147, 163, 165, 128, 134, 136, 152, 155,
+	161, 163, 164, 166, 170, 143, 174, 172,
+	175, 144, 150, 132, 138, 128, 131, 132,
+	133, 134, 135, 136, 137, 139, 140, 141,
+	142, 143, 144, 145, 148, 149, 151, 152,
+	153, 157, 159, 160, 161, 162, 163, 164,
+	165, 168, 169, 176, 191, 129, 150, 154,
+	155, 166, 171, 177, 190, 192, 255, 175,
+	141, 143, 172, 177, 190, 191, 142, 145,
+	154, 173, 255, 166, 255, 154, 175, 129,
+	143, 178, 186, 188, 191, 137, 255, 190,
+	255, 134, 255, 144, 255, 180, 191, 149,
+	191, 140, 143, 136, 143, 154, 159, 136,
+	143, 174, 255, 140, 186, 188, 191, 128,
+	133, 135, 191, 160, 128, 129, 132, 135,
+	133, 134, 160, 255, 128, 130, 170, 175,
+	144, 145, 147, 150, 155, 156, 157, 158,
+	159, 143, 187, 191, 156, 128, 133, 134,
+	191, 128, 255, 176, 255, 131, 137, 191,
+	145, 189, 135, 129, 130, 132, 133, 144,
+	154, 176, 139, 159, 150, 156, 159, 164,
+	167, 168, 170, 173, 145, 176, 255, 139,
+	255, 166, 176, 189, 171, 179, 160, 161,
+	162, 163, 164, 165, 167, 169, 171, 173,
+	174, 175, 176, 177, 179, 180, 181, 182,
+	183, 184, 185, 186, 187, 188, 189, 190,
+	191, 166, 170, 172, 178, 150, 153, 155,
+	163, 165, 167, 169, 173, 153, 155, 152,
+	159, 138, 161, 163, 255, 189, 132, 185,
+	144, 152, 161, 164, 255, 188, 129, 131,
+	190, 255, 133, 134, 137, 138, 142, 150,
+	152, 161, 164, 189, 191, 255, 131, 134,
+	137, 138, 142, 144, 146, 175, 178, 180,
+	182, 255, 134, 138, 142, 161, 164, 185,
+	192, 255, 188, 129, 131, 190, 191, 128,
+	132, 135, 136, 139, 141, 149, 151, 162,
+	163, 130, 190, 191, 151, 128, 130, 134,
+	136, 138, 141, 188, 128, 132, 190, 255,
+	133, 137, 142, 148, 151, 161, 164, 255,
+	179, 128, 132, 134, 136, 138, 141, 149,
+	150, 162, 163, 128, 131, 187, 188, 190,
+	255, 133, 137, 142, 150, 152, 161, 164,
+	255, 129, 131, 138, 150, 143, 148, 152,
+	159, 178, 179, 177, 179, 186, 135, 142,
+	177, 179, 188, 136, 142, 181, 183, 185,
+	152, 153, 190, 191, 177, 191, 128, 132,
+	134, 135, 141, 151, 153, 188, 134, 128,
+	129, 130, 141, 156, 157, 158, 159, 160,
+	162, 164, 168, 169, 170, 171, 172, 173,
+	174, 175, 176, 179, 183, 173, 183, 185,
+	190, 150, 153, 158, 160, 177, 180, 130,
+	141, 157, 132, 134, 157, 159, 146, 149,
+	178, 180, 146, 147, 178, 179, 180, 255,
+	148, 156, 158, 255, 143, 139, 141, 169,
+	133, 134, 160, 171, 176, 187, 151, 155,
+	160, 162, 191, 149, 158, 165, 188, 176,
+	255, 143, 255, 128, 132, 180, 255, 133,
+	170, 180, 255, 128, 130, 161, 173, 166,
+	179, 164, 183, 173, 180, 144, 146, 148,
+	168, 183, 185, 128, 191, 128, 131, 179,
+	181, 183, 140, 141, 144, 176, 175, 177,
+	191, 160, 191, 128, 130, 170, 175, 153,
+	154, 153, 154, 155, 160, 162, 163, 164,
+	165, 166, 167, 168, 169, 170, 171, 175,
+	175, 178, 180, 189, 158, 159, 176, 177,
+	130, 134, 139, 172, 163, 167, 128, 129,
+	180, 255, 134, 159, 178, 190, 192, 255,
+	166, 173, 135, 147, 128, 131, 179, 255,
+	129, 164, 166, 255, 169, 182, 131, 188,
+	140, 141, 176, 178, 180, 183, 184, 190,
+	191, 129, 171, 175, 181, 182, 163, 170,
+	172, 173, 172, 184, 190, 158, 128, 143,
+	160, 175, 144, 145, 147, 150, 155, 156,
+	157, 158, 159, 135, 139, 141, 168, 171,
+	180, 186, 187, 189, 190, 189, 160, 182,
+	186, 191, 129, 131, 133, 134, 140, 143,
+	184, 186, 165, 166, 164, 167, 171, 172,
+	189, 191, 134, 144, 130, 133, 128, 129,
+	130, 131, 132, 133, 134, 135, 136, 137,
+	139, 140, 141, 144, 145, 146, 147, 150,
+	151, 152, 153, 154, 156, 160, 164, 165,
+	167, 168, 169, 170, 176, 178, 180, 181,
+	182, 187, 188, 189, 128, 130, 184, 255,
+	135, 175, 177, 178, 181, 190, 131, 175,
+	187, 255, 130, 128, 130, 167, 180, 179,
+	133, 134, 128, 130, 179, 255, 141, 129,
+	136, 144, 255, 190, 172, 183, 129, 159,
+	170, 128, 131, 187, 188, 190, 191, 151,
+	128, 132, 135, 136, 139, 141, 162, 163,
+	166, 172, 176, 180, 181, 191, 158, 128,
+	134, 132, 255, 175, 181, 184, 255, 129,
+	155, 158, 255, 129, 255, 171, 183, 157,
+	159, 162, 171, 172, 186, 176, 181, 183,
+	184, 187, 190, 128, 130, 131, 164, 145,
+	151, 154, 160, 129, 138, 179, 185, 187,
+	190, 135, 145, 155, 138, 153, 175, 182,
+	184, 191, 146, 167, 169, 182, 186, 177,
+	182, 188, 189, 191, 255, 134, 136, 255,
+	138, 142, 144, 145, 147, 151, 179, 182,
+	131, 128, 129, 180, 186, 190, 191, 128,
+	130, 145, 128, 135, 149, 171, 172, 189,
+	190, 191, 176, 180, 176, 182, 143, 145,
+	255, 136, 142, 147, 255, 164, 176, 177,
+	178, 157, 158, 188, 189, 128, 173, 176,
+	255, 135, 255, 133, 134, 137, 168, 169,
+	170, 165, 169, 173, 178, 187, 255, 131,
+	132, 140, 169, 174, 255, 130, 132, 128,
+	182, 187, 255, 173, 180, 182, 255, 132,
+	155, 159, 161, 175, 128, 130, 132, 138,
+	139, 147, 163, 165, 128, 134, 136, 152,
+	155, 161, 163, 164, 166, 170, 143, 174,
+	172, 175, 144, 150, 132, 138, 143, 187,
+	191, 160, 128, 129, 132, 135, 133, 134,
+	160, 255, 192, 255, 169, 174, 160, 172,
+	175, 191, 128, 255, 176, 255, 131, 137,
+	191, 145, 189, 135, 129, 130, 132, 133,
+	144, 154, 176, 139, 159, 150, 156, 159,
+	164, 167, 168, 170, 173, 145, 176, 255,
+	139, 255, 166, 176, 189, 171, 179, 160,
+	161, 162, 163, 164, 165, 167, 169, 171,
+	173, 174, 175, 176, 177, 179, 180, 181,
+	182, 183, 184, 185, 186, 187, 188, 189,
+	190, 191, 166, 170, 172, 178, 150, 153,
+	155, 163, 165, 167, 169, 173, 153, 155,
+	152, 159, 138, 161, 163, 255, 189, 132,
+	185, 144, 152, 161, 164, 255, 188, 129,
+	131, 190, 255, 133, 134, 137, 138, 142,
+	150, 152, 161, 164, 189, 191, 255, 131,
+	134, 137, 138, 142, 144, 146, 175, 178,
+	180, 182, 255, 134, 138, 142, 161, 164,
+	185, 192, 255, 188, 129, 131, 190, 191,
+	128, 132, 135, 136, 139, 141, 149, 151,
+	162, 163, 130, 190, 191, 151, 128, 130,
+	134, 136, 138, 141, 188, 128, 132, 190,
+	255, 133, 137, 142, 148, 151, 161, 164,
+	255, 179, 128, 132, 134, 136, 138, 141,
+	149, 150, 162, 163, 128, 131, 187, 188,
+	190, 255, 133, 137, 142, 150, 152, 161,
+	164, 255, 129, 131, 138, 150, 143, 148,
+	152, 159, 178, 179, 177, 179, 186, 135,
+	142, 177, 179, 188, 136, 142, 181, 183,
+	185, 152, 153, 190, 191, 177, 191, 128,
+	132, 134, 135, 141, 151, 153, 188, 134,
+	128, 129, 130, 141, 156, 157, 158, 159,
+	160, 162, 164, 168, 169, 170, 171, 172,
+	173, 174, 175, 176, 179, 183, 173, 183,
+	185, 190, 150, 153, 158, 160, 177, 180,
+	130, 141, 157, 132, 134, 157, 159, 146,
+	149, 178, 180, 146, 147, 178, 179, 180,
+	255, 148, 156, 158, 255, 143, 139, 141,
+	169, 133, 134, 160, 171, 176, 187, 151,
+	155, 160, 162, 191, 149, 158, 165, 188,
+	176, 255, 143, 255, 128, 132, 180, 255,
+	133, 170, 180, 255, 128, 130, 161, 173,
+	166, 179, 164, 183, 173, 180, 144, 146,
+	148, 168, 183, 185, 128, 191, 128, 131,
+	179, 181, 183, 140, 141, 169, 174, 128,
+	129, 131, 132, 134, 140, 142, 143, 147,
+	150, 151, 152, 153, 154, 155, 156, 157,
+	158, 164, 172, 173, 179, 181, 183, 140,
+	141, 188, 137, 144, 176, 162, 185, 148,
+	153, 169, 170, 168, 154, 155, 136, 143,
+	169, 179, 184, 186, 130, 182, 170, 171,
+	128, 187, 190, 128, 133, 135, 146, 148,
+	191, 128, 133, 144, 255, 147, 149, 134,
+	135, 151, 156, 158, 160, 162, 167, 169,
+	178, 181, 255, 132, 135, 140, 142, 151,
+	147, 149, 163, 167, 161, 176, 191, 149,
+	151, 180, 181, 133, 135, 155, 156, 144,
+	149, 175, 177, 191, 160, 191, 128, 130,
+	138, 189, 170, 176, 153, 154, 151, 153,
+	153, 154, 155, 160, 162, 163, 164, 165,
+	166, 167, 168, 169, 170, 171, 175, 175,
+	178, 180, 189, 158, 159, 176, 177, 130,
+	134, 139, 172, 163, 167, 128, 129, 180,
+	255, 134, 159, 178, 190, 192, 255, 166,
+	173, 135, 147, 128, 131, 179, 255, 129,
+	164, 166, 255, 169, 182, 131, 188, 140,
+	141, 176, 178, 180, 183, 184, 190, 191,
+	129, 171, 175, 181, 182, 163, 170, 172,
+	173, 172, 184, 190, 158, 128, 143, 160,
+	175, 144, 145, 147, 150, 155, 156, 157,
+	158, 159, 135, 139, 141, 168, 171, 180,
+	186, 187, 189, 190, 189, 160, 182, 186,
+	191, 129, 131, 133, 134, 140, 143, 184,
+	186, 165, 166, 164, 167, 171, 172, 189,
+	191, 134, 144, 130, 133, 128, 129, 130,
+	131, 132, 133, 134, 135, 136, 137, 139,
+	140, 141, 144, 145, 146, 147, 150, 151,
+	152, 153, 154, 156, 160, 164, 165, 167,
+	168, 169, 170, 176, 178, 180, 181, 182,
+	187, 188, 189, 128, 130, 184, 255, 135,
+	175, 177, 178, 181, 190, 131, 175, 187,
+	255, 128, 130, 167, 180, 179, 133, 134,
+	128, 130, 179, 255, 141, 129, 136, 144,
+	255, 190, 172, 183, 129, 159, 170, 128,
+	131, 187, 188, 190, 191, 151, 128, 132,
+	135, 136, 139, 141, 162, 163, 166, 172,
+	176, 180, 181, 191, 158, 128, 134, 132,
+	255, 175, 181, 184, 255, 129, 155, 158,
+	255, 129, 255, 171, 183, 157, 159, 162,
+	171, 172, 186, 176, 181, 183, 184, 187,
+	190, 128, 130, 131, 164, 145, 151, 154,
+	160, 129, 138, 179, 185, 187, 190, 135,
+	145, 155, 138, 153, 175, 182, 184, 191,
+	146, 167, 169, 182, 186, 177, 182, 188,
+	189, 191, 255, 134, 136, 255, 138, 142,
+	144, 145, 147, 151, 179, 182, 131, 128,
+	129, 180, 186, 190, 191, 128, 130, 145,
+	128, 135, 149, 171, 172, 189, 190, 191,
+	176, 180, 176, 182, 143, 145, 255, 136,
+	142, 147, 255, 164, 176, 177, 178, 157,
+	158, 188, 189, 128, 173, 176, 255, 135,
+	255, 133, 134, 137, 168, 169, 170, 165,
+	169, 173, 178, 187, 255, 131, 132, 140,
+	169, 174, 255, 130, 132, 128, 182, 187,
+	255, 173, 180, 182, 255, 132, 155, 159,
+	161, 175, 128, 130, 132, 138, 139, 147,
+	163, 165, 128, 134, 136, 152, 155, 161,
+	163, 164, 166, 170, 143, 174, 172, 175,
+	144, 150, 132, 138, 128, 131, 132, 133,
+	134, 135, 136, 137, 139, 140, 141, 142,
+	143, 144, 145, 148, 149, 151, 152, 153,
+	157, 159, 160, 161, 162, 163, 164, 165,
+	168, 169, 176, 191, 129, 150, 154, 155,
+	166, 171, 177, 190, 192, 255, 175, 141,
+	143, 172, 177, 190, 191, 142, 145, 154,
+	173, 255, 166, 255, 154, 175, 129, 143,
+	178, 186, 188, 191, 137, 255, 190, 255,
+	134, 255, 144, 255, 180, 191, 149, 191,
+	140, 143, 136, 143, 154, 159, 136, 143,
+	174, 255, 140, 186, 188, 191, 128, 133,
+	135, 191, 160, 128, 129, 132, 135, 133,
+	134, 160, 255, 128, 130, 170, 175, 144,
+	145, 147, 150, 155, 156, 157, 158, 159,
+	143, 187, 191, 128, 133, 134, 155, 157,
+	191, 157, 128, 191, 143, 128, 191, 162,
+	163, 181, 128, 191, 128, 143, 144, 145,
+	146, 191, 162, 128, 191, 142, 128, 191,
+	132, 133, 134, 135, 160, 128, 191, 128,
+	255, 128, 129, 130, 132, 133, 134, 141,
+	156, 157, 158, 159, 160, 162, 164, 168,
+	169, 170, 171, 172, 173, 174, 175, 176,
+	179, 183, 160, 255, 128, 129, 130, 133,
+	134, 135, 141, 156, 157, 158, 159, 160,
+	162, 164, 168, 169, 170, 171, 172, 173,
+	174, 175, 176, 179, 183, 160, 255, 168,
+	255, 128, 129, 130, 134, 135, 141, 156,
+	157, 158, 159, 160, 162, 164, 168, 169,
+	170, 171, 172, 173, 174, 175, 176, 179,
+	183, 168, 255, 192, 255, 159, 139, 187,
+	158, 159, 176, 255, 135, 138, 139, 187,
+	188, 255, 168, 255, 153, 154, 155, 160,
+	162, 163, 164, 165, 166, 167, 168, 169,
+	170, 171, 175, 177, 178, 179, 180, 181,
+	182, 184, 185, 186, 187, 188, 189, 191,
+	176, 190, 192, 255, 135, 147, 160, 188,
+	128, 156, 184, 129, 255, 128, 129, 130,
+	133, 134, 141, 156, 157, 158, 159, 160,
+	162, 164, 168, 169, 170, 171, 172, 173,
+	174, 175, 176, 179, 183, 158, 159, 135,
+	255, 148, 176, 140, 168, 132, 160, 188,
+	152, 180, 144, 172, 136, 164, 192, 255,
+	129, 130, 131, 132, 133, 134, 136, 137,
+	138, 139, 140, 141, 143, 144, 145, 146,
+	147, 148, 150, 151, 152, 153, 154, 155,
+	157, 158, 159, 160, 161, 162, 164, 165,
+	166, 167, 168, 169, 171, 172, 173, 174,
+	175, 176, 178, 179, 180, 181, 182, 183,
+	185, 186, 187, 188, 189, 190, 128, 191,
+	129, 130, 131, 132, 133, 134, 136, 137,
+	138, 139, 140, 141, 143, 144, 145, 146,
+	147, 148, 150, 151, 152, 153, 154, 155,
+	157, 158, 159, 160, 161, 162, 164, 165,
+	166, 167, 168, 169, 171, 172, 173, 174,
+	175, 176, 178, 179, 180, 181, 182, 183,
+	185, 186, 187, 188, 189, 190, 128, 191,
+	129, 130, 131, 132, 133, 134, 136, 137,
+	138, 139, 140, 141, 143, 144, 145, 146,
+	147, 148, 150, 151, 152, 153, 154, 155,
+	157, 158, 159, 128, 156, 160, 255, 136,
+	164, 175, 176, 255, 128, 141, 143, 191,
+	128, 129, 132, 134, 140, 142, 143, 147,
+	150, 151, 152, 153, 154, 155, 156, 157,
+	158, 164, 172, 173, 130, 191, 188, 128,
+	138, 140, 141, 144, 167, 175, 191, 137,
+	128, 159, 176, 191, 162, 185, 128, 191,
+	128, 147, 148, 153, 154, 168, 169, 170,
+	171, 191, 168, 128, 153, 154, 155, 156,
+	191, 136, 128, 191, 143, 128, 168, 169,
+	179, 180, 183, 184, 186, 187, 191, 130,
+	128, 191, 182, 128, 169, 170, 171, 172,
+	191, 128, 191, 129, 186, 187, 190, 134,
+	147, 128, 191, 128, 133, 134, 143, 144,
+	255, 147, 149, 134, 135, 151, 156, 158,
+	160, 162, 167, 169, 178, 181, 191, 192,
+	255, 132, 135, 140, 142, 150, 128, 146,
+	147, 151, 152, 162, 163, 167, 168, 191,
+	161, 176, 191, 128, 148, 149, 151, 152,
+	190, 128, 179, 180, 181, 182, 191, 128,
+	132, 133, 135, 136, 154, 155, 156, 157,
+	191, 144, 149, 128, 191, 128, 138, 129,
+	191, 176, 189, 128, 191, 151, 153, 128,
+	191, 128, 191, 165, 177, 178, 179, 180,
+	181, 182, 184, 185, 186, 187, 188, 189,
+	191, 128, 175, 176, 190, 192, 255, 128,
+	159, 160, 188, 189, 191, 128, 156, 184,
+	129, 255, 148, 176, 140, 168, 132, 160,
+	188, 152, 180, 144, 172, 136, 164, 192,
+	255, 129, 130, 131, 132, 133, 134, 136,
+	137, 138, 139, 140, 141, 143, 144, 145,
+	146, 147, 148, 150, 151, 152, 153, 154,
+	155, 157, 158, 159, 160, 161, 162, 164,
+	165, 166, 167, 168, 169, 171, 172, 173,
+	174, 175, 176, 178, 179, 180, 181, 182,
+	183, 185, 186, 187, 188, 189, 190, 128,
+	191, 129, 130, 131, 132, 133, 134, 136,
+	137, 138, 139, 140, 141, 143, 144, 145,
+	146, 147, 148, 150, 151, 152, 153, 154,
+	155, 157, 158, 159, 160, 161, 162, 164,
+	165, 166, 167, 168, 169, 171, 172, 173,
+	174, 175, 176, 178, 179, 180, 181, 182,
+	183, 185, 186, 187, 188, 189, 190, 128,
+	191, 129, 130, 131, 132, 133, 134, 136,
+	137, 138, 139, 140, 141, 143, 144, 145,
+	146, 147, 148, 150, 151, 152, 153, 154,
+	155, 157, 158, 159, 128, 156, 160, 191,
+	192, 255, 136, 164, 175, 176, 255, 135,
+	138, 139, 187, 188, 191, 192, 255, 187,
+	191, 128, 190, 128, 190, 188, 128, 175,
+	190, 191, 145, 147, 155, 157, 159, 128,
+	191, 130, 131, 135, 164, 165, 168, 170,
+	181, 188, 128, 191, 189, 128, 191, 141,
+	128, 191, 128, 129, 130, 131, 132, 191,
+	191, 128, 190, 129, 128, 191, 186, 128,
+	191, 128, 131, 132, 137, 138, 191, 134,
+	128, 191, 130, 128, 191, 144, 128, 191,
+	128, 175, 178, 128, 191, 128, 159, 164,
+	191, 133, 128, 191, 128, 178, 187, 191,
+	128, 131, 132, 133, 134, 135, 136, 137,
+	139, 140, 141, 142, 143, 144, 145, 148,
+	149, 151, 152, 153, 156, 157, 158, 159,
+	160, 161, 162, 163, 164, 165, 168, 169,
+	176, 191, 129, 150, 154, 171, 172, 175,
+	177, 190, 175, 128, 140, 141, 143, 144,
+	191, 128, 171, 172, 177, 178, 189, 190,
+	191, 142, 128, 144, 145, 154, 155, 172,
+	173, 255, 166, 191, 192, 255, 144, 145,
+	147, 150, 155, 156, 157, 158, 159, 135,
+	143, 166, 191, 128, 154, 175, 187, 129,
+	143, 144, 177, 178, 191, 128, 136, 137,
+	255, 187, 191, 192, 255, 190, 191, 192,
+	255, 128, 133, 134, 255, 144, 191, 192,
+	255, 128, 179, 180, 191, 128, 148, 149,
+	191, 128, 139, 140, 143, 144, 191, 128,
+	135, 136, 143, 144, 153, 154, 159, 160,
+	191, 128, 135, 136, 143, 144, 173, 174,
+	255, 187, 128, 139, 140, 191, 134, 128,
+	191, 128, 191, 160, 128, 191, 128, 129,
+	135, 132, 134, 157, 128, 191, 143, 128,
+	191, 162, 163, 181, 128, 191, 128, 143,
+	144, 145, 146, 191, 162, 128, 191, 142,
+	128, 191, 132, 133, 134, 135, 160, 128,
+	191, 0, 127, 128, 255, 176, 255, 131,
+	137, 191, 145, 189, 135, 129, 130, 132,
+	133, 144, 154, 176, 139, 159, 150, 156,
+	159, 164, 167, 168, 170, 173, 145, 176,
+	255, 139, 255, 166, 176, 189, 171, 179,
+	160, 161, 162, 163, 164, 165, 167, 169,
+	171, 173, 174, 175, 176, 177, 179, 180,
+	181, 182, 183, 184, 185, 186, 187, 188,
+	189, 190, 191, 166, 170, 172, 178, 150,
+	153, 155, 163, 165, 167, 169, 173, 153,
+	155, 152, 159, 138, 161, 163, 255, 189,
+	132, 185, 144, 152, 161, 164, 255, 188,
+	129, 131, 190, 255, 133, 134, 137, 138,
+	142, 150, 152, 161, 164, 189, 191, 255,
+	131, 134, 137, 138, 142, 144, 146, 175,
+	178, 180, 182, 255, 134, 138, 142, 161,
+	164, 185, 192, 255, 188, 129, 131, 190,
+	191, 128, 132, 135, 136, 139, 141, 149,
+	151, 162, 163, 130, 190, 191, 151, 128,
+	130, 134, 136, 138, 141, 188, 128, 132,
+	190, 255, 133, 137, 142, 148, 151, 161,
+	164, 255, 179, 128, 132, 134, 136, 138,
+	141, 149, 150, 162, 163, 128, 131, 187,
+	188, 190, 255, 133, 137, 142, 150, 152,
+	161, 164, 255, 129, 131, 138, 150, 143,
+	148, 152, 159, 178, 179, 177, 179, 186,
+	135, 142, 177, 179, 188, 136, 142, 181,
+	183, 185, 152, 153, 190, 191, 177, 191,
+	128, 132, 134, 135, 141, 151, 153, 188,
+	134, 128, 129, 130, 141, 156, 157, 158,
+	159, 160, 162, 164, 168, 169, 170, 171,
+	172, 173, 174, 175, 176, 179, 183, 173,
+	183, 185, 190, 150, 153, 158, 160, 177,
+	180, 130, 141, 157, 132, 134, 157, 159,
+	146, 149, 178, 180, 146, 147, 178, 179,
+	180, 255, 148, 156, 158, 255, 143, 139,
+	141, 169, 133, 134, 160, 171, 176, 187,
+	151, 155, 160, 162, 191, 149, 158, 165,
+	188, 176, 255, 143, 255, 128, 132, 180,
+	255, 133, 170, 180, 255, 128, 130, 161,
+	173, 166, 179, 164, 183, 173, 180, 144,
+	146, 148, 168, 183, 185, 128, 191, 128,
+	131, 179, 181, 183, 140, 141, 169, 174,
+	128, 129, 131, 132, 134, 140, 142, 143,
+	147, 150, 151, 152, 153, 154, 155, 156,
+	157, 158, 164, 172, 173, 179, 181, 183,
+	140, 141, 188, 137, 144, 176, 162, 185,
+	148, 153, 169, 170, 168, 154, 155, 136,
+	143, 169, 179, 184, 186, 130, 182, 170,
+	171, 128, 187, 190, 128, 133, 135, 146,
+	148, 191, 128, 133, 144, 255, 147, 149,
+	134, 135, 151, 156, 158, 160, 162, 167,
+	169, 178, 181, 255, 132, 135, 140, 142,
+	151, 147, 149, 163, 167, 161, 176, 191,
+	149, 151, 180, 181, 133, 135, 155, 156,
+	144, 149, 175, 177, 191, 160, 191, 128,
+	130, 138, 189, 170, 176, 153, 154, 151,
+	153, 153, 154, 155, 160, 162, 163, 164,
+	165, 166, 167, 168, 169, 170, 171, 175,
+	175, 178, 180, 189, 158, 159, 176, 177,
+	130, 134, 139, 172, 163, 167, 128, 129,
+	180, 255, 134, 159, 178, 190, 192, 255,
+	166, 173, 135, 147, 128, 131, 179, 255,
+	129, 164, 166, 255, 169, 182, 131, 188,
+	140, 141, 176, 178, 180, 183, 184, 190,
+	191, 129, 171, 175, 181, 182, 163, 170,
+	172, 173, 172, 184, 190, 158, 128, 143,
+	160, 175, 144, 145, 147, 150, 155, 156,
+	157, 158, 159, 135, 139, 141, 168, 171,
+	180, 186, 187, 189, 190, 189, 160, 182,
+	186, 191, 129, 131, 133, 134, 140, 143,
+	184, 186, 165, 166, 164, 167, 171, 172,
+	189, 191, 134, 144, 130, 133, 128, 129,
+	130, 131, 132, 133, 134, 135, 136, 137,
+	139, 140, 141, 144, 145, 146, 147, 150,
+	151, 152, 153, 154, 156, 160, 164, 165,
+	167, 168, 169, 170, 176, 178, 180, 181,
+	182, 187, 188, 189, 128, 130, 184, 255,
+	135, 175, 177, 178, 181, 190, 131, 175,
+	187, 255, 128, 130, 167, 180, 179, 133,
+	134, 128, 130, 179, 255, 141, 129, 136,
+	144, 255, 190, 172, 183, 129, 159, 170,
+	128, 131, 187, 188, 190, 191, 151, 128,
+	132, 135, 136, 139, 141, 162, 163, 166,
+	172, 176, 180, 181, 191, 158, 128, 134,
+	132, 255, 175, 181, 184, 255, 129, 155,
+	158, 255, 129, 255, 171, 183, 157, 159,
+	162, 171, 172, 186, 176, 181, 183, 184,
+	187, 190, 128, 130, 131, 164, 145, 151,
+	154, 160, 129, 138, 179, 185, 187, 190,
+	135, 145, 155, 138, 153, 175, 182, 184,
+	191, 146, 167, 169, 182, 186, 177, 182,
+	188, 189, 191, 255, 134, 136, 255, 138,
+	142, 144, 145, 147, 151, 179, 182, 131,
+	128, 129, 180, 186, 190, 191, 128, 130,
+	145, 128, 135, 149, 171, 172, 189, 190,
+	191, 176, 180, 176, 182, 143, 145, 255,
+	136, 142, 147, 255, 164, 176, 177, 178,
+	157, 158, 188, 189, 128, 173, 176, 255,
+	135, 255, 133, 134, 137, 168, 169, 170,
+	165, 169, 173, 178, 187, 255, 131, 132,
+	140, 169, 174, 255, 130, 132, 128, 182,
+	187, 255, 173, 180, 182, 255, 132, 155,
+	159, 161, 175, 128, 130, 132, 138, 139,
+	147, 163, 165, 128, 134, 136, 152, 155,
+	161, 163, 164, 166, 170, 143, 174, 172,
+	175, 144, 150, 132, 138, 128, 131, 132,
+	133, 134, 135, 136, 137, 139, 140, 141,
+	142, 143, 144, 145, 148, 149, 151, 152,
+	153, 157, 159, 160, 161, 162, 163, 164,
+	165, 168, 169, 176, 191, 129, 150, 154,
+	155, 166, 171, 177, 190, 192, 255, 175,
+	141, 143, 172, 177, 190, 191, 142, 145,
+	154, 173, 255, 166, 255, 154, 175, 129,
+	143, 178, 186, 188, 191, 137, 255, 190,
+	255, 134, 255, 144, 255, 180, 191, 149,
+	191, 140, 143, 136, 143, 154, 159, 136,
+	143, 174, 255, 140, 186, 188, 191, 128,
+	133, 135, 191, 160, 128, 129, 132, 135,
+	133, 134, 160, 255, 128, 130, 170, 175,
+	144, 145, 147, 150, 155, 156, 157, 158,
+	159, 143, 187, 191, 128, 129, 130, 132,
+	133, 134, 141, 156, 157, 158, 159, 160,
+	162, 164, 168, 169, 170, 171, 172, 173,
+	174, 175, 176, 179, 183, 160, 255, 128,
+	129, 130, 133, 134, 135, 141, 156, 157,
+	158, 159, 160, 162, 164, 168, 169, 170,
+	171, 172, 173, 174, 175, 176, 179, 183,
+	160, 255, 168, 255, 128, 129, 130, 134,
+	135, 141, 156, 157, 158, 159, 160, 162,
+	164, 168, 169, 170, 171, 172, 173, 174,
+	175, 176, 179, 183, 168, 255, 192, 255,
+	159, 139, 187, 158, 159, 176, 255, 135,
+	138, 139, 187, 188, 255, 168, 255, 153,
+	154, 155, 160, 162, 163, 164, 165, 166,
+	167, 168, 169, 170, 171, 175, 177, 178,
+	179, 180, 181, 182, 184, 185, 186, 187,
+	188, 189, 191, 176, 190, 192, 255, 135,
+	147, 160, 188, 128, 156, 184, 129, 255,
+	128, 129, 130, 133, 134, 141, 156, 157,
+	158, 159, 160, 162, 164, 168, 169, 170,
+	171, 172, 173, 174, 175, 176, 179, 183,
+	158, 159, 135, 255, 148, 176, 140, 168,
+	132, 160, 188, 152, 180, 144, 172, 136,
+	164, 192, 255, 129, 130, 131, 132, 133,
+	134, 136, 137, 138, 139, 140, 141, 143,
+	144, 145, 146, 147, 148, 150, 151, 152,
+	153, 154, 155, 157, 158, 159, 160, 161,
+	162, 164, 165, 166, 167, 168, 169, 171,
+	172, 173, 174, 175, 176, 178, 179, 180,
+	181, 182, 183, 185, 186, 187, 188, 189,
+	190, 128, 191, 129, 130, 131, 132, 133,
+	134, 136, 137, 138, 139, 140, 141, 143,
+	144, 145, 146, 147, 148, 150, 151, 152,
+	153, 154, 155, 157, 158, 159, 160, 161,
+	162, 164, 165, 166, 167, 168, 169, 171,
+	172, 173, 174, 175, 176, 178, 179, 180,
+	181, 182, 183, 185, 186, 187, 188, 189,
+	190, 128, 191, 129, 130, 131, 132, 133,
+	134, 136, 137, 138, 139, 140, 141, 143,
+	144, 145, 146, 147, 148, 150, 151, 152,
+	153, 154, 155, 157, 158, 159, 128, 156,
+	160, 255, 136, 164, 175, 176, 255, 142,
+	128, 191, 128, 129, 132, 134, 140, 142,
+	143, 147, 150, 151, 152, 153, 154, 155,
+	156, 157, 158, 164, 172, 173, 130, 191,
+	139, 141, 188, 128, 140, 142, 143, 144,
+	167, 168, 174, 175, 191, 128, 255, 176,
+	255, 131, 137, 191, 145, 189, 135, 129,
+	130, 132, 133, 144, 154, 176, 139, 159,
+	150, 156, 159, 164, 167, 168, 170, 173,
+	145, 176, 255, 139, 255, 166, 176, 189,
+	171, 179, 160, 161, 162, 163, 164, 165,
+	167, 169, 171, 173, 174, 175, 176, 177,
+	179, 180, 181, 182, 183, 184, 185, 186,
+	187, 188, 189, 190, 191, 166, 170, 172,
+	178, 150, 153, 155, 163, 165, 167, 169,
+	173, 153, 155, 152, 159, 138, 161, 163,
+	255, 189, 132, 185, 144, 152, 161, 164,
+	255, 188, 129, 131, 190, 255, 133, 134,
+	137, 138, 142, 150, 152, 161, 164, 189,
+	191, 255, 131, 134, 137, 138, 142, 144,
+	146, 175, 178, 180, 182, 255, 134, 138,
+	142, 161, 164, 185, 192, 255, 188, 129,
+	131, 190, 191, 128, 132, 135, 136, 139,
+	141, 149, 151, 162, 163, 130, 190, 191,
+	151, 128, 130, 134, 136, 138, 141, 188,
+	128, 132, 190, 255, 133, 137, 142, 148,
+	151, 161, 164, 255, 179, 128, 132, 134,
+	136, 138, 141, 149, 150, 162, 163, 128,
+	131, 187, 188, 190, 255, 133, 137, 142,
+	150, 152, 161, 164, 255, 129, 131, 138,
+	150, 143, 148, 152, 159, 178, 179, 177,
+	179, 186, 135, 142, 177, 179, 188, 136,
+	142, 181, 183, 185, 152, 153, 190, 191,
+	177, 191, 128, 132, 134, 135, 141, 151,
+	153, 188, 134, 128, 129, 130, 141, 156,
+	157, 158, 159, 160, 162, 164, 168, 169,
+	170, 171, 172, 173, 174, 175, 176, 179,
+	183, 173, 183, 185, 190, 150, 153, 158,
+	160, 177, 180, 130, 141, 157, 132, 134,
+	157, 159, 146, 149, 178, 180, 146, 147,
+	178, 179, 180, 255, 148, 156, 158, 255,
+	143, 139, 141, 169, 133, 134, 160, 171,
+	176, 187, 151, 155, 160, 162, 191, 149,
+	158, 165, 188, 176, 255, 143, 255, 128,
+	132, 180, 255, 133, 170, 180, 255, 128,
+	130, 161, 173, 166, 179, 164, 183, 173,
+	180, 144, 146, 148, 168, 183, 185, 128,
+	191, 128, 131, 179, 181, 183, 140, 141,
+	144, 176, 175, 177, 191, 160, 191, 128,
+	130, 170, 175, 153, 154, 153, 154, 155,
+	160, 162, 163, 164, 165, 166, 167, 168,
+	169, 170, 171, 175, 175, 178, 180, 189,
+	158, 159, 176, 177, 130, 134, 139, 172,
+	163, 167, 128, 129, 180, 255, 134, 159,
+	178, 190, 192, 255, 166, 173, 135, 147,
+	128, 131, 179, 255, 129, 164, 166, 255,
+	169, 182, 131, 188, 140, 141, 176, 178,
+	180, 183, 184, 190, 191, 129, 171, 175,
+	181, 182, 163, 170, 172, 173, 172, 184,
+	190, 158, 128, 143, 160, 175, 144, 145,
+	147, 150, 155, 156, 157, 158, 159, 135,
+	139, 141, 168, 171, 180, 186, 187, 189,
+	190, 189, 160, 182, 186, 191, 129, 131,
+	133, 134, 140, 143, 184, 186, 165, 166,
+	164, 167, 171, 172, 189, 191, 134, 144,
+	130, 133, 128, 129, 130, 131, 132, 133,
+	134, 135, 136, 137, 139, 140, 141, 144,
+	145, 146, 147, 150, 151, 152, 153, 154,
+	156, 160, 164, 165, 167, 168, 169, 170,
+	176, 178, 180, 181, 182, 187, 188, 189,
+	128, 130, 184, 255, 135, 175, 177, 178,
+	181, 190, 131, 175, 187, 255, 130, 128,
+	130, 167, 180, 179, 133, 134, 128, 130,
+	179, 255, 141, 129, 136, 144, 255, 190,
+	172, 183, 129, 159, 170, 128, 131, 187,
+	188, 190, 191, 151, 128, 132, 135, 136,
+	139, 141, 162, 163, 166, 172, 176, 180,
+	181, 191, 158, 128, 134, 132, 255, 175,
+	181, 184, 255, 129, 155, 158, 255, 129,
+	255, 171, 183, 157, 159, 162, 171, 172,
+	186, 176, 181, 183, 184, 187, 190, 128,
+	130, 131, 164, 145, 151, 154, 160, 129,
+	138, 179, 185, 187, 190, 135, 145, 155,
+	138, 153, 175, 182, 184, 191, 146, 167,
+	169, 182, 186, 177, 182, 188, 189, 191,
+	255, 134, 136, 255, 138, 142, 144, 145,
+	147, 151, 179, 182, 131, 128, 129, 180,
+	186, 190, 191, 128, 130, 145, 128, 135,
+	149, 171, 172, 189, 190, 191, 176, 180,
+	176, 182, 143, 145, 255, 136, 142, 147,
+	255, 164, 176, 177, 178, 157, 158, 188,
+	189, 128, 173, 176, 255, 135, 255, 133,
+	134, 137, 168, 169, 170, 165, 169, 173,
+	178, 187, 255, 131, 132, 140, 169, 174,
+	255, 130, 132, 128, 182, 187, 255, 173,
+	180, 182, 255, 132, 155, 159, 161, 175,
+	128, 130, 132, 138, 139, 147, 163, 165,
+	128, 134, 136, 152, 155, 161, 163, 164,
+	166, 170, 143, 174, 172, 175, 144, 150,
+	132, 138, 143, 187, 191, 160, 128, 129,
+	132, 135, 133, 134, 160, 255, 192, 255,
+	137, 128, 159, 160, 175, 176, 191, 162,
+	185, 128, 191, 128, 147, 148, 153, 154,
+	168, 169, 170, 171, 191, 168, 128, 153,
+	154, 155, 156, 191, 136, 128, 191, 143,
+	128, 168, 169, 179, 180, 183, 184, 186,
+	187, 191, 130, 128, 191, 182, 128, 169,
+	170, 171, 172, 191, 128, 191, 129, 186,
+	187, 190, 134, 147, 128, 191, 128, 133,
+	134, 143, 144, 255, 147, 149, 134, 135,
+	151, 156, 158, 160, 162, 167, 169, 178,
+	181, 191, 192, 255, 132, 135, 140, 142,
+	150, 128, 146, 147, 151, 152, 162, 163,
+	167, 168, 191, 161, 176, 191, 128, 148,
+	149, 151, 152, 190, 128, 179, 180, 181,
+	182, 191, 128, 132, 133, 135, 136, 154,
+	155, 156, 157, 191, 144, 149, 128, 191,
+	128, 138, 129, 191, 176, 189, 128, 191,
+	151, 153, 128, 191, 128, 191, 165, 177,
+	178, 179, 180, 181, 182, 184, 185, 186,
+	187, 188, 189, 191, 128, 175, 176, 190,
+	192, 255, 128, 159, 160, 188, 189, 191,
+	128, 156, 184, 129, 255, 148, 176, 140,
+	168, 132, 160, 188, 152, 180, 144, 172,
+	136, 164, 192, 255, 129, 130, 131, 132,
+	133, 134, 136, 137, 138, 139, 140, 141,
+	143, 144, 145, 146, 147, 148, 150, 151,
+	152, 153, 154, 155, 157, 158, 159, 160,
+	161, 162, 164, 165, 166, 167, 168, 169,
+	171, 172, 173, 174, 175, 176, 178, 179,
+	180, 181, 182, 183, 185, 186, 187, 188,
+	189, 190, 128, 191, 129, 130, 131, 132,
+	133, 134, 136, 137, 138, 139, 140, 141,
+	143, 144, 145, 146, 147, 148, 150, 151,
+	152, 153, 154, 155, 157, 158, 159, 160,
+	161, 162, 164, 165, 166, 167, 168, 169,
+	171, 172, 173, 174, 175, 176, 178, 179,
+	180, 181, 182, 183, 185, 186, 187, 188,
+	189, 190, 128, 191, 129, 130, 131, 132,
+	133, 134, 136, 137, 138, 139, 140, 141,
+	143, 144, 145, 146, 147, 148, 150, 151,
+	152, 153, 154, 155, 157, 158, 159, 128,
+	156, 160, 191, 192, 255, 136, 164, 175,
+	176, 255, 135, 138, 139, 187, 188, 191,
+	192, 255, 187, 191, 128, 190, 191, 128,
+	190, 188, 128, 175, 176, 189, 190, 191,
+	145, 147, 155, 157, 159, 128, 191, 130,
+	131, 135, 164, 165, 168, 170, 181, 188,
+	128, 191, 189, 128, 191, 141, 128, 191,
+	128, 129, 130, 131, 132, 191, 191, 128,
+	190, 129, 128, 191, 186, 128, 191, 128,
+	131, 132, 137, 138, 191, 134, 128, 191,
+	130, 128, 191, 144, 128, 191, 128, 175,
+	176, 191, 178, 128, 191, 128, 159, 160,
+	163, 164, 191, 133, 128, 191, 128, 178,
+	179, 186, 187, 191, 128, 131, 132, 133,
+	134, 135, 136, 137, 139, 140, 141, 142,
+	143, 144, 145, 148, 149, 151, 152, 153,
+	156, 157, 158, 159, 160, 161, 162, 163,
+	164, 165, 168, 169, 176, 191, 129, 150,
+	154, 171, 172, 175, 177, 190, 175, 128,
+	140, 141, 143, 144, 191, 128, 171, 172,
+	177, 178, 189, 190, 191, 142, 128, 144,
+	145, 154, 155, 172, 173, 255, 166, 191,
+	192, 255, 128, 255, 176, 255, 131, 137,
+	191, 145, 189, 135, 129, 130, 132, 133,
+	144, 154, 176, 139, 159, 150, 156, 159,
+	164, 167, 168, 170, 173, 145, 176, 255,
+	139, 255, 166, 176, 189, 171, 179, 160,
+	161, 162, 163, 164, 165, 167, 169, 171,
+	173, 174, 175, 176, 177, 179, 180, 181,
+	182, 183, 184, 185, 186, 187, 188, 189,
+	190, 191, 166, 170, 172, 178, 150, 153,
+	155, 163, 165, 167, 169, 173, 153, 155,
+	152, 159, 138, 161, 163, 255, 189, 132,
+	185, 144, 152, 161, 164, 255, 188, 129,
+	131, 190, 255, 133, 134, 137, 138, 142,
+	150, 152, 161, 164, 189, 191, 255, 131,
+	134, 137, 138, 142, 144, 146, 175, 178,
+	180, 182, 255, 134, 138, 142, 161, 164,
+	185, 192, 255, 188, 129, 131, 190, 191,
+	128, 132, 135, 136, 139, 141, 149, 151,
+	162, 163, 130, 190, 191, 151, 128, 130,
+	134, 136, 138, 141, 188, 128, 132, 190,
+	255, 133, 137, 142, 148, 151, 161, 164,
+	255, 179, 128, 132, 134, 136, 138, 141,
+	149, 150, 162, 163, 128, 131, 187, 188,
+	190, 255, 133, 137, 142, 150, 152, 161,
+	164, 255, 129, 131, 138, 150, 143, 148,
+	152, 159, 178, 179, 177, 179, 186, 135,
+	142, 177, 179, 188, 136, 142, 181, 183,
+	185, 152, 153, 190, 191, 177, 191, 128,
+	132, 134, 135, 141, 151, 153, 188, 134,
+	128, 129, 130, 141, 156, 157, 158, 159,
+	160, 162, 164, 168, 169, 170, 171, 172,
+	173, 174, 175, 176, 179, 183, 173, 183,
+	185, 190, 150, 153, 158, 160, 177, 180,
+	130, 141, 157, 132, 134, 157, 159, 146,
+	149, 178, 180, 146, 147, 178, 179, 180,
+	255, 148, 156, 158, 255, 143, 139, 141,
+	169, 133, 134, 160, 171, 176, 187, 151,
+	155, 160, 162, 191, 149, 158, 165, 188,
+	176, 255, 143, 255, 128, 132, 180, 255,
+	133, 170, 180, 255, 128, 130, 161, 173,
+	166, 179, 164, 183, 173, 180, 144, 146,
+	148, 168, 183, 185, 128, 191, 128, 131,
+	179, 181, 183, 140, 141, 169, 174, 128,
+	129, 131, 132, 134, 140, 142, 143, 147,
+	150, 151, 152, 153, 154, 155, 156, 157,
+	158, 164, 172, 173, 179, 181, 183, 140,
+	141, 188, 137, 144, 176, 162, 185, 148,
+	153, 169, 170, 168, 154, 155, 136, 143,
+	169, 179, 184, 186, 130, 182, 170, 171,
+	128, 187, 190, 128, 133, 135, 146, 148,
+	191, 128, 133, 144, 255, 147, 149, 134,
+	135, 151, 156, 158, 160, 162, 167, 169,
+	178, 181, 255, 132, 135, 140, 142, 151,
+	147, 149, 163, 167, 161, 176, 191, 149,
+	151, 180, 181, 133, 135, 155, 156, 144,
+	149, 175, 177, 191, 160, 191, 128, 130,
+	138, 189, 170, 176, 153, 154, 151, 153,
+	153, 154, 155, 160, 162, 163, 164, 165,
+	166, 167, 168, 169, 170, 171, 175, 175,
+	178, 180, 189, 158, 159, 176, 177, 130,
+	134, 139, 172, 163, 167, 128, 129, 180,
+	255, 134, 159, 178, 190, 192, 255, 166,
+	173, 135, 147, 128, 131, 179, 255, 129,
+	164, 166, 255, 169, 182, 131, 188, 140,
+	141, 176, 178, 180, 183, 184, 190, 191,
+	129, 171, 175, 181, 182, 163, 170, 172,
+	173, 172, 184, 190, 158, 128, 143, 160,
+	175, 144, 145, 147, 150, 155, 156, 157,
+	158, 159, 135, 139, 141, 168, 171, 180,
+	186, 187, 189, 190, 189, 160, 182, 186,
+	191, 129, 131, 133, 134, 140, 143, 184,
+	186, 165, 166, 164, 167, 171, 172, 189,
+	191, 134, 144, 130, 133, 128, 129, 130,
+	131, 132, 133, 134, 135, 136, 137, 139,
+	140, 141, 144, 145, 146, 147, 150, 151,
+	152, 153, 154, 156, 160, 164, 165, 167,
+	168, 169, 170, 176, 178, 180, 181, 182,
+	187, 188, 189, 128, 130, 184, 255, 135,
+	175, 177, 178, 181, 190, 131, 175, 187,
+	255, 128, 130, 167, 180, 179, 133, 134,
+	128, 130, 179, 255, 141, 129, 136, 144,
+	255, 190, 172, 183, 129, 159, 170, 128,
+	131, 187, 188, 190, 191, 151, 128, 132,
+	135, 136, 139, 141, 162, 163, 166, 172,
+	176, 180, 181, 191, 158, 128, 134, 132,
+	255, 175, 181, 184, 255, 129, 155, 158,
+	255, 129, 255, 171, 183, 157, 159, 162,
+	171, 172, 186, 176, 181, 183, 184, 187,
+	190, 128, 130, 131, 164, 145, 151, 154,
+	160, 129, 138, 179, 185, 187, 190, 135,
+	145, 155, 138, 153, 175, 182, 184, 191,
+	146, 167, 169, 182, 186, 177, 182, 188,
+	189, 191, 255, 134, 136, 255, 138, 142,
+	144, 145, 147, 151, 179, 182, 131, 128,
+	129, 180, 186, 190, 191, 128, 130, 145,
+	128, 135, 149, 171, 172, 189, 190, 191,
+	176, 180, 176, 182, 143, 145, 255, 136,
+	142, 147, 255, 164, 176, 177, 178, 157,
+	158, 188, 189, 128, 173, 176, 255, 135,
+	255, 133, 134, 137, 168, 169, 170, 165,
+	169, 173, 178, 187, 255, 131, 132, 140,
+	169, 174, 255, 130, 132, 128, 182, 187,
+	255, 173, 180, 182, 255, 132, 155, 159,
+	161, 175, 128, 130, 132, 138, 139, 147,
+	163, 165, 128, 134, 136, 152, 155, 161,
+	163, 164, 166, 170, 143, 174, 172, 175,
+	144, 150, 132, 138, 128, 131, 132, 133,
+	134, 135, 136, 137, 139, 140, 141, 142,
+	143, 144, 145, 148, 149, 151, 152, 153,
+	157, 159, 160, 161, 162, 163, 164, 165,
+	168, 169, 176, 191, 129, 150, 154, 155,
+	166, 171, 177, 190, 192, 255, 175, 141,
+	143, 172, 177, 190, 191, 142, 145, 154,
+	173, 255, 166, 255, 154, 175, 129, 143,
+	178, 186, 188, 191, 137, 255, 190, 255,
+	134, 255, 144, 255, 180, 191, 149, 191,
+	140, 143, 136, 143, 154, 159, 136, 143,
+	174, 255, 140, 186, 188, 191, 128, 133,
+	135, 191, 160, 128, 129, 132, 135, 133,
+	134, 160, 255, 128, 130, 170, 175, 144,
+	145, 147, 150, 155, 156, 157, 158, 159,
+	143, 187, 191, 144, 145, 147, 150, 155,
+	156, 157, 158, 159, 135, 143, 166, 191,
+	128, 154, 175, 187, 129, 143, 144, 177,
+	178, 191, 128, 136, 137, 255, 187, 191,
+	192, 255, 190, 191, 192, 255, 128, 133,
+	134, 255, 144, 191, 192, 255, 128, 179,
+	180, 191, 128, 148, 149, 191, 128, 139,
+	140, 143, 144, 191, 128, 135, 136, 143,
+	144, 153, 154, 159, 160, 191, 128, 135,
+	136, 143, 144, 173, 174, 255, 187, 128,
+	139, 140, 191, 134, 128, 191, 128, 191,
+	160, 128, 191, 128, 130, 131, 135, 191,
+	129, 134, 136, 190, 128, 159, 160, 191,
+	0, 127, 192, 255, 128, 175, 176, 255,
+	10, 13, 127, 194, 216, 219, 220, 224,
+	225, 226, 227, 234, 235, 236, 237, 239,
+	240, 243, 0, 31, 128, 191, 192, 223,
+	228, 238, 241, 247, 248, 255, 204, 205,
+	210, 214, 215, 216, 217, 219, 220, 221,
+	222, 223, 224, 225, 226, 227, 234, 239,
+	240, 243, 204, 205, 210, 214, 215, 216,
+	217, 219, 220, 221, 222, 223, 224, 225,
+	226, 227, 234, 239, 240, 243, 194, 204,
+	205, 210, 214, 215, 216, 217, 219, 220,
+	221, 222, 223, 224, 225, 226, 227, 234,
+	239, 240, 243, 194, 216, 219, 220, 224,
+	225, 226, 227, 234, 235, 236, 237, 239,
+	240, 243, 32, 126, 192, 223, 228, 238,
+	241, 247, 204, 205, 210, 214, 215, 216,
+	217, 219, 220, 221, 222, 223, 224, 225,
+	226, 227, 234, 239, 240, 243, 204, 205,
+	210, 214, 215, 216, 217, 219, 220, 221,
+	222, 223, 224, 225, 226, 227, 234, 239,
+	240, 243, 194, 204, 205, 210, 214, 215,
+	216, 217, 219, 220, 221, 222, 223, 224,
+	225, 226, 227, 234, 239, 240, 243, 204,
+	205, 210, 214, 215, 216, 217, 219, 220,
+	221, 222, 223, 224, 225, 226, 227, 234,
+	235, 236, 237, 239, 240, 243, 204, 205,
+	210, 214, 215, 216, 217, 219, 220, 221,
+	222, 223, 224, 225, 226, 227, 234, 237,
+	239, 240, 243, 204, 205, 210, 214, 215,
+	216, 217, 219, 220, 221, 222, 223, 224,
+	225, 226, 227, 234, 237, 239, 240, 243,
+	204, 205, 210, 214, 215, 216, 217, 219,
+	220, 221, 222, 223, 224, 225, 226, 227,
+	234, 237, 239, 240, 243, 204, 205, 210,
+	214, 215, 216, 217, 219, 220, 221, 222,
+	223, 224, 225, 226, 227, 234, 239, 240,
+	243, 204, 205, 210, 214, 215, 216, 217,
+	219, 220, 221, 222, 223, 224, 225, 226,
+	227, 234, 235, 236, 237, 239, 240, 243,
+	204, 205, 210, 214, 215, 216, 217, 219,
+	220, 221, 222, 223, 224, 225, 226, 227,
+	234, 239, 240, 243, 194, 204, 205, 210,
+	214, 215, 216, 217, 219, 220, 221, 222,
+	223, 224, 225, 226, 227, 234, 239, 240,
+	243, 204, 205, 210, 214, 215, 216, 217,
+	219, 220, 221, 222, 223, 224, 225, 226,
+	227, 234, 237, 239, 240, 243, 204, 205,
+	210, 214, 215, 216, 217, 219, 220, 221,
+	222, 223, 224, 225, 226, 227, 234, 237,
+	239, 240, 243, 204, 205, 210, 214, 215,
+	216, 217, 219, 220, 221, 222, 223, 224,
+	225, 226, 227, 234, 237, 239, 240, 243,
+	204, 205, 210, 214, 215, 216, 217, 219,
+	220, 221, 222, 223, 224, 225, 226, 227,
+	234, 239, 240, 243, 204, 205, 210, 214,
+	215, 216, 217, 219, 220, 221, 222, 223,
+	224, 225, 226, 227, 234, 239, 240, 243,
+	204, 205, 210, 214, 215, 216, 217, 219,
+	220, 221, 222, 223, 224, 225, 226, 227,
+	234, 239, 240, 243, 194, 204, 205, 210,
+	214, 215, 216, 217, 219, 220, 221, 222,
+	223, 224, 225, 226, 227, 234, 239, 240,
+	243,
+}
+
+var _graphclust_single_lengths []byte = []byte{
+	0, 1, 0, 0, 0, 1, 1, 0,
+	1, 0, 1, 0, 0, 1, 27, 0,
+	0, 0, 0, 1, 1, 1, 0, 0,
+	2, 1, 0, 1, 1, 1, 2, 1,
+	0, 2, 0, 2, 1, 0, 1, 0,
+	3, 0, 0, 1, 22, 0, 0, 3,
+	0, 0, 0, 0, 0, 1, 1, 0,
+	0, 3, 0, 0, 0, 0, 0, 0,
+	0, 2, 0, 5, 0, 0, 0, 1,
+	0, 2, 0, 0, 15, 0, 0, 0,
+	4, 0, 0, 0, 0, 0, 0, 0,
+	2, 1, 1, 0, 3, 1, 0, 9,
+	10, 1, 1, 0, 1, 0, 0, 0,
+	0, 0, 0, 38, 0, 0, 0, 1,
+	0, 1, 0, 1, 1, 1, 0, 0,
+	1, 0, 1, 0, 0, 0, 0, 0,
+	0, 0, 0, 1, 1, 0, 1, 0,
+	0, 0, 1, 1, 0, 0, 1, 0,
+	1, 1, 5, 0, 0, 1, 0, 1,
+	1, 0, 2, 0, 0, 6, 0, 0,
+	0, 0, 0, 1, 8, 0, 1, 1,
+	0, 0, 0, 1, 0, 1, 4, 0,
+	0, 0, 3, 0, 0, 0, 1, 1,
+	0, 1, 0, 1, 0, 0, 1, 27,
+	0, 0, 0, 0, 1, 1, 1, 0,
+	0, 2, 1, 0, 1, 1, 1, 2,
+	1, 0, 2, 0, 2, 1, 0, 1,
+	0, 3, 0, 0, 1, 22, 0, 0,
+	3, 0, 0, 0, 0, 0, 1, 1,
+	0, 0, 3, 0, 0, 0, 0, 0,
+	0, 0, 2, 0, 5, 2, 2, 24,
+	3, 1, 0, 2, 0, 1, 1, 1,
+	1, 1, 1, 0, 0, 0, 2, 5,
+	3, 0, 0, 2, 0, 1, 0, 3,
+	1, 0, 2, 15, 0, 0, 0, 4,
+	0, 0, 0, 0, 0, 0, 0, 2,
+	1, 1, 0, 3, 1, 0, 9, 10,
+	1, 1, 0, 1, 0, 0, 0, 0,
+	0, 0, 38, 0, 0, 0, 0, 1,
+	0, 1, 1, 1, 0, 0, 1, 0,
+	1, 0, 0, 0, 0, 0, 0, 0,
+	0, 1, 1, 0, 1, 0, 0, 0,
+	1, 1, 0, 0, 1, 0, 1, 1,
+	5, 0, 0, 1, 0, 1, 1, 0,
+	2, 0, 0, 6, 0, 0, 0, 0,
+	0, 1, 8, 0, 1, 1, 0, 0,
+	0, 32, 0, 1, 0, 1, 0, 2,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 1, 4, 0, 2, 0,
+	9, 1, 0, 1, 0, 0, 0, 1,
+	1, 0, 1, 0, 1, 0, 0, 1,
+	27, 0, 0, 0, 0, 1, 1, 1,
+	0, 0, 2, 1, 0, 1, 1, 1,
+	2, 1, 0, 2, 0, 2, 1, 0,
+	1, 0, 3, 0, 0, 1, 22, 0,
+	0, 3, 0, 0, 0, 0, 0, 1,
+	1, 0, 0, 3, 0, 0, 0, 0,
+	0, 0, 0, 2, 0, 5, 0, 0,
+	0, 1, 0, 2, 0, 0, 15, 0,
+	0, 0, 4, 0, 0, 0, 0, 0,
+	0, 0, 2, 1, 1, 0, 3, 1,
+	0, 9, 10, 1, 1, 0, 1, 0,
+	0, 0, 0, 0, 0, 38, 0, 0,
+	0, 1, 0, 1, 0, 1, 1, 1,
+	0, 0, 1, 0, 1, 0, 0, 0,
+	0, 0, 0, 0, 0, 1, 1, 0,
+	1, 0, 0, 0, 1, 1, 0, 0,
+	1, 0, 1, 1, 5, 0, 0, 1,
+	0, 1, 1, 0, 2, 0, 0, 6,
+	0, 0, 0, 0, 0, 1, 8, 0,
+	1, 1, 0, 0, 0, 1, 0, 1,
+	4, 0, 0, 0, 2, 0, 0, 0,
+	1, 1, 0, 1, 0, 1, 0, 0,
+	1, 27, 0, 0, 0, 0, 1, 1,
+	1, 0, 0, 2, 1, 0, 1, 1,
+	1, 2, 1, 0, 2, 0, 2, 1,
+	0, 1, 0, 3, 0, 0, 1, 22,
+	0, 0, 3, 0, 0, 0, 0, 0,
+	1, 1, 0, 0, 3, 0, 0, 0,
+	0, 0, 0, 0, 2, 0, 5, 2,
+	2, 24, 3, 1, 0, 2, 0, 1,
+	1, 1, 1, 1, 1, 0, 0, 0,
+	2, 5, 3, 0, 0, 2, 0, 1,
+	0, 3, 1, 0, 2, 15, 0, 0,
+	0, 4, 0, 0, 0, 0, 0, 0,
+	0, 2, 1, 1, 0, 3, 1, 0,
+	9, 10, 1, 1, 0, 1, 0, 0,
+	0, 0, 0, 0, 38, 0, 0, 0,
+	0, 1, 0, 1, 1, 1, 0, 0,
+	1, 0, 1, 0, 0, 0, 0, 0,
+	0, 0, 0, 1, 1, 0, 1, 0,
+	0, 0, 1, 1, 0, 0, 1, 0,
+	1, 1, 5, 0, 0, 1, 0, 1,
+	1, 0, 2, 0, 0, 6, 0, 0,
+	0, 0, 0, 1, 8, 0, 1, 1,
+	0, 0, 0, 32, 0, 1, 0, 1,
+	0, 2, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 1, 4, 0,
+	2, 0, 9, 1, 0, 0, 1, 1,
+	3, 0, 1, 1, 5, 0, 25, 0,
+	25, 0, 0, 24, 0, 0, 1, 0,
+	2, 0, 0, 0, 28, 0, 3, 24,
+	2, 0, 2, 2, 3, 2, 2, 2,
+	0, 54, 54, 27, 1, 0, 20, 1,
+	1, 2, 0, 1, 1, 1, 1, 1,
+	2, 2, 0, 2, 5, 3, 0, 0,
+	2, 2, 2, 2, 0, 14, 0, 3,
+	2, 2, 3, 2, 2, 2, 54, 54,
+	27, 1, 0, 2, 0, 1, 5, 9,
+	1, 1, 0, 1, 1, 1, 0, 1,
+	1, 1, 0, 1, 0, 1, 0, 34,
+	1, 0, 1, 0, 9, 2, 0, 4,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 1, 1, 0, 1, 3, 1,
+	1, 3, 0, 1, 1, 5, 0, 0,
+	0, 0, 1, 1, 0, 1, 0, 1,
+	0, 0, 1, 27, 0, 0, 0, 0,
+	1, 1, 1, 0, 0, 2, 1, 0,
+	1, 1, 1, 2, 1, 0, 2, 0,
+	2, 1, 0, 1, 0, 3, 0, 0,
+	1, 22, 0, 0, 3, 0, 0, 0,
+	0, 0, 1, 1, 0, 0, 3, 0,
+	0, 0, 0, 0, 0, 0, 2, 0,
+	5, 2, 2, 24, 3, 1, 0, 2,
+	0, 1, 1, 1, 1, 1, 1, 0,
+	0, 0, 2, 5, 3, 0, 0, 2,
+	0, 1, 0, 3, 1, 0, 2, 15,
+	0, 0, 0, 4, 0, 0, 0, 0,
+	0, 0, 0, 2, 1, 1, 0, 3,
+	1, 0, 9, 10, 1, 1, 0, 1,
+	0, 0, 0, 0, 0, 0, 38, 0,
+	0, 0, 0, 1, 0, 1, 1, 1,
+	0, 0, 1, 0, 1, 0, 0, 0,
+	0, 0, 0, 0, 0, 1, 1, 0,
+	1, 0, 0, 0, 1, 1, 0, 0,
+	1, 0, 1, 1, 5, 0, 0, 1,
+	0, 1, 1, 0, 2, 0, 0, 6,
+	0, 0, 0, 0, 0, 1, 8, 0,
+	1, 1, 0, 0, 0, 32, 0, 1,
+	0, 1, 0, 2, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 1,
+	4, 0, 2, 0, 9, 1, 0, 25,
+	0, 25, 0, 0, 24, 0, 0, 1,
+	0, 2, 0, 0, 0, 28, 0, 3,
+	24, 2, 0, 2, 2, 3, 2, 2,
+	2, 0, 54, 54, 27, 1, 1, 20,
+	3, 0, 0, 0, 1, 1, 0, 1,
+	0, 1, 0, 0, 1, 27, 0, 0,
+	0, 0, 1, 1, 1, 0, 0, 2,
+	1, 0, 1, 1, 1, 2, 1, 0,
+	2, 0, 2, 1, 0, 1, 0, 3,
+	0, 0, 1, 22, 0, 0, 3, 0,
+	0, 0, 0, 0, 1, 1, 0, 0,
+	3, 0, 0, 0, 0, 0, 0, 0,
+	2, 0, 5, 0, 0, 0, 1, 0,
+	2, 0, 0, 15, 0, 0, 0, 4,
+	0, 0, 0, 0, 0, 0, 0, 2,
+	1, 1, 0, 3, 1, 0, 9, 10,
+	1, 1, 0, 1, 0, 0, 0, 0,
+	0, 0, 38, 0, 0, 0, 1, 0,
+	1, 0, 1, 1, 1, 0, 0, 1,
+	0, 1, 0, 0, 0, 0, 0, 0,
+	0, 0, 1, 1, 0, 1, 0, 0,
+	0, 1, 1, 0, 0, 1, 0, 1,
+	1, 5, 0, 0, 1, 0, 1, 1,
+	0, 2, 0, 0, 6, 0, 0, 0,
+	0, 0, 1, 8, 0, 1, 1, 0,
+	0, 0, 1, 0, 1, 4, 0, 0,
+	0, 1, 2, 0, 1, 1, 1, 1,
+	1, 2, 2, 0, 2, 5, 3, 0,
+	0, 2, 2, 2, 2, 0, 14, 0,
+	3, 2, 2, 3, 2, 2, 2, 54,
+	54, 27, 1, 0, 2, 1, 1, 5,
+	9, 1, 1, 0, 1, 1, 1, 0,
+	1, 1, 1, 0, 1, 0, 1, 0,
+	34, 1, 0, 1, 0, 0, 0, 0,
+	1, 1, 0, 1, 0, 1, 0, 0,
+	1, 27, 0, 0, 0, 0, 1, 1,
+	1, 0, 0, 2, 1, 0, 1, 1,
+	1, 2, 1, 0, 2, 0, 2, 1,
+	0, 1, 0, 3, 0, 0, 1, 22,
+	0, 0, 3, 0, 0, 0, 0, 0,
+	1, 1, 0, 0, 3, 0, 0, 0,
+	0, 0, 0, 0, 2, 0, 5, 2,
+	2, 24, 3, 1, 0, 2, 0, 1,
+	1, 1, 1, 1, 1, 0, 0, 0,
+	2, 5, 3, 0, 0, 2, 0, 1,
+	0, 3, 1, 0, 2, 15, 0, 0,
+	0, 4, 0, 0, 0, 0, 0, 0,
+	0, 2, 1, 1, 0, 3, 1, 0,
+	9, 10, 1, 1, 0, 1, 0, 0,
+	0, 0, 0, 0, 38, 0, 0, 0,
+	0, 1, 0, 1, 1, 1, 0, 0,
+	1, 0, 1, 0, 0, 0, 0, 0,
+	0, 0, 0, 1, 1, 0, 1, 0,
+	0, 0, 1, 1, 0, 0, 1, 0,
+	1, 1, 5, 0, 0, 1, 0, 1,
+	1, 0, 2, 0, 0, 6, 0, 0,
+	0, 0, 0, 1, 8, 0, 1, 1,
+	0, 0, 0, 32, 0, 1, 0, 1,
+	0, 2, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 1, 4, 0,
+	2, 0, 9, 1, 0, 9, 2, 0,
+	4, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 1, 1, 0, 1, 5,
+	0, 0, 0, 0, 0, 18, 20, 20,
+	21, 15, 20, 20, 21, 23, 21, 21,
+	21, 20, 23, 20, 21, 21, 21, 21,
+	20, 20, 20, 21,
+}
+
+var _graphclust_range_lengths []byte = []byte{
+	0, 0, 1, 1, 1, 1, 2, 1,
+	1, 4, 1, 1, 1, 1, 2, 4,
+	1, 1, 2, 1, 2, 2, 6, 6,
+	3, 2, 5, 1, 3, 2, 3, 5,
+	3, 3, 1, 3, 1, 1, 1, 1,
+	2, 1, 4, 0, 0, 2, 3, 1,
+	1, 2, 2, 1, 2, 1, 1, 2,
+	1, 2, 1, 1, 2, 2, 2, 1,
+	1, 3, 1, 0, 1, 1, 1, 0,
+	1, 0, 1, 1, 0, 2, 1, 1,
+	1, 2, 3, 1, 1, 2, 2, 1,
+	1, 3, 2, 2, 0, 0, 2, 0,
+	0, 0, 0, 1, 4, 1, 1, 1,
+	1, 1, 1, 0, 2, 3, 2, 0,
+	2, 1, 2, 2, 1, 0, 1, 3,
+	6, 1, 1, 1, 2, 2, 1, 1,
+	2, 1, 3, 1, 2, 3, 1, 1,
+	2, 2, 3, 1, 3, 1, 3, 1,
+	0, 1, 0, 1, 1, 1, 2, 1,
+	0, 1, 0, 2, 1, 0, 3, 3,
+	1, 2, 2, 2, 0, 5, 0, 0,
+	1, 1, 1, 0, 1, 0, 1, 1,
+	1, 0, 2, 1, 1, 1, 1, 2,
+	1, 1, 4, 1, 1, 1, 1, 2,
+	4, 1, 1, 2, 1, 2, 2, 6,
+	6, 3, 2, 5, 1, 3, 2, 3,
+	5, 3, 3, 1, 3, 1, 1, 1,
+	1, 2, 1, 4, 0, 0, 2, 3,
+	1, 1, 2, 2, 1, 2, 1, 1,
+	2, 1, 2, 1, 1, 2, 2, 2,
+	1, 1, 3, 1, 0, 0, 0, 0,
+	0, 0, 1, 0, 2, 1, 0, 2,
+	0, 1, 1, 3, 2, 0, 6, 2,
+	1, 1, 2, 0, 1, 0, 1, 0,
+	1, 1, 0, 0, 2, 1, 1, 1,
+	2, 3, 1, 1, 2, 2, 1, 1,
+	3, 2, 2, 0, 0, 2, 0, 0,
+	0, 0, 1, 4, 1, 1, 1, 1,
+	1, 1, 0, 2, 3, 2, 2, 1,
+	2, 2, 1, 0, 1, 3, 6, 1,
+	1, 1, 2, 2, 1, 1, 2, 1,
+	3, 1, 2, 3, 1, 1, 2, 2,
+	3, 1, 3, 1, 3, 1, 0, 1,
+	0, 1, 1, 1, 2, 1, 0, 1,
+	0, 2, 1, 0, 3, 3, 1, 2,
+	2, 2, 0, 5, 0, 0, 1, 1,
+	1, 4, 1, 1, 2, 2, 1, 3,
+	1, 1, 1, 1, 1, 1, 1, 2,
+	2, 2, 2, 0, 1, 1, 0, 1,
+	0, 0, 1, 2, 1, 1, 1, 1,
+	2, 1, 1, 4, 1, 1, 1, 1,
+	2, 4, 1, 1, 2, 1, 2, 2,
+	6, 6, 3, 2, 5, 1, 3, 2,
+	3, 5, 3, 3, 1, 3, 1, 1,
+	1, 1, 2, 1, 4, 0, 0, 2,
+	3, 1, 1, 2, 2, 1, 2, 1,
+	1, 2, 1, 2, 1, 1, 2, 2,
+	2, 1, 1, 3, 1, 0, 1, 1,
+	1, 0, 1, 0, 1, 1, 0, 2,
+	1, 1, 1, 2, 3, 1, 1, 2,
+	2, 1, 1, 3, 2, 2, 0, 0,
+	2, 0, 0, 0, 0, 1, 4, 1,
+	1, 1, 1, 1, 1, 0, 2, 3,
+	2, 0, 2, 1, 2, 2, 1, 0,
+	1, 3, 6, 1, 1, 1, 2, 2,
+	1, 1, 2, 1, 3, 1, 2, 3,
+	1, 1, 2, 2, 3, 1, 3, 1,
+	3, 1, 0, 1, 0, 1, 1, 1,
+	2, 1, 0, 1, 0, 2, 1, 0,
+	3, 3, 1, 2, 2, 2, 0, 5,
+	0, 0, 1, 1, 1, 0, 1, 0,
+	1, 1, 1, 0, 2, 1, 1, 1,
+	1, 2, 1, 1, 4, 1, 1, 1,
+	1, 2, 4, 1, 1, 2, 1, 2,
+	2, 6, 6, 3, 2, 5, 1, 3,
+	2, 3, 5, 3, 3, 1, 3, 1,
+	1, 1, 1, 2, 1, 4, 0, 0,
+	2, 3, 1, 1, 2, 2, 1, 2,
+	1, 1, 2, 1, 2, 1, 1, 2,
+	2, 2, 1, 1, 3, 1, 0, 0,
+	0, 0, 0, 0, 1, 0, 2, 1,
+	0, 2, 0, 1, 1, 3, 2, 0,
+	6, 2, 1, 1, 2, 0, 1, 0,
+	1, 0, 1, 1, 0, 0, 2, 1,
+	1, 1, 2, 3, 1, 1, 2, 2,
+	1, 1, 3, 2, 2, 0, 0, 2,
+	0, 0, 0, 0, 1, 4, 1, 1,
+	1, 1, 1, 1, 0, 2, 3, 2,
+	2, 1, 2, 2, 1, 0, 1, 3,
+	6, 1, 1, 1, 2, 2, 1, 1,
+	2, 1, 3, 1, 2, 3, 1, 1,
+	2, 2, 3, 1, 3, 1, 3, 1,
+	0, 1, 0, 1, 1, 1, 2, 1,
+	0, 1, 0, 2, 1, 0, 3, 3,
+	1, 2, 2, 2, 0, 5, 0, 0,
+	1, 1, 1, 4, 1, 1, 2, 2,
+	1, 3, 1, 1, 1, 1, 1, 1,
+	1, 2, 2, 2, 2, 0, 1, 1,
+	0, 1, 0, 0, 1, 3, 1, 1,
+	1, 3, 1, 1, 1, 1, 0, 1,
+	0, 1, 1, 0, 1, 1, 0, 1,
+	0, 1, 3, 1, 2, 2, 1, 0,
+	0, 1, 0, 0, 0, 0, 0, 1,
+	0, 1, 1, 2, 2, 2, 1, 4,
+	2, 1, 5, 3, 1, 5, 1, 3,
+	2, 1, 3, 7, 5, 3, 3, 5,
+	1, 1, 1, 1, 1, 3, 3, 1,
+	0, 0, 0, 0, 0, 1, 1, 1,
+	3, 2, 4, 1, 1, 2, 1, 1,
+	1, 1, 3, 1, 1, 1, 3, 1,
+	1, 1, 1, 1, 2, 1, 2, 4,
+	3, 4, 4, 2, 0, 0, 1, 3,
+	2, 2, 2, 2, 2, 2, 2, 3,
+	5, 4, 2, 1, 1, 1, 1, 1,
+	1, 1, 3, 1, 1, 1, 1, 1,
+	1, 1, 1, 2, 1, 1, 4, 1,
+	1, 1, 1, 2, 4, 1, 1, 2,
+	1, 2, 2, 6, 6, 3, 2, 5,
+	1, 3, 2, 3, 5, 3, 3, 1,
+	3, 1, 1, 1, 1, 2, 1, 4,
+	0, 0, 2, 3, 1, 1, 2, 2,
+	1, 2, 1, 1, 2, 1, 2, 1,
+	1, 2, 2, 2, 1, 1, 3, 1,
+	0, 0, 0, 0, 0, 0, 1, 0,
+	2, 1, 0, 2, 0, 1, 1, 3,
+	2, 0, 6, 2, 1, 1, 2, 0,
+	1, 0, 1, 0, 1, 1, 0, 0,
+	2, 1, 1, 1, 2, 3, 1, 1,
+	2, 2, 1, 1, 3, 2, 2, 0,
+	0, 2, 0, 0, 0, 0, 1, 4,
+	1, 1, 1, 1, 1, 1, 0, 2,
+	3, 2, 2, 1, 2, 2, 1, 0,
+	1, 3, 6, 1, 1, 1, 2, 2,
+	1, 1, 2, 1, 3, 1, 2, 3,
+	1, 1, 2, 2, 3, 1, 3, 1,
+	3, 1, 0, 1, 0, 1, 1, 1,
+	2, 1, 0, 1, 0, 2, 1, 0,
+	3, 3, 1, 2, 2, 2, 0, 5,
+	0, 0, 1, 1, 1, 4, 1, 1,
+	2, 2, 1, 3, 1, 1, 1, 1,
+	1, 1, 1, 2, 2, 2, 2, 0,
+	1, 1, 0, 1, 0, 0, 1, 0,
+	1, 0, 1, 1, 0, 1, 1, 0,
+	1, 0, 1, 3, 1, 2, 2, 1,
+	0, 0, 1, 0, 0, 0, 0, 0,
+	1, 0, 1, 1, 2, 2, 1, 1,
+	5, 1, 1, 1, 1, 2, 1, 1,
+	4, 1, 1, 1, 1, 2, 4, 1,
+	1, 2, 1, 2, 2, 6, 6, 3,
+	2, 5, 1, 3, 2, 3, 5, 3,
+	3, 1, 3, 1, 1, 1, 1, 2,
+	1, 4, 0, 0, 2, 3, 1, 1,
+	2, 2, 1, 2, 1, 1, 2, 1,
+	2, 1, 1, 2, 2, 2, 1, 1,
+	3, 1, 0, 1, 1, 1, 0, 1,
+	0, 1, 1, 0, 2, 1, 1, 1,
+	2, 3, 1, 1, 2, 2, 1, 1,
+	3, 2, 2, 0, 0, 2, 0, 0,
+	0, 0, 1, 4, 1, 1, 1, 1,
+	1, 1, 0, 2, 3, 2, 0, 2,
+	1, 2, 2, 1, 0, 1, 3, 6,
+	1, 1, 1, 2, 2, 1, 1, 2,
+	1, 3, 1, 2, 3, 1, 1, 2,
+	2, 3, 1, 3, 1, 3, 1, 0,
+	1, 0, 1, 1, 1, 2, 1, 0,
+	1, 0, 2, 1, 0, 3, 3, 1,
+	2, 2, 2, 0, 5, 0, 0, 1,
+	1, 1, 0, 1, 0, 1, 1, 1,
+	0, 3, 1, 5, 3, 1, 5, 1,
+	3, 2, 1, 3, 7, 5, 3, 3,
+	5, 1, 1, 1, 1, 1, 3, 3,
+	1, 0, 0, 0, 0, 0, 1, 1,
+	1, 3, 2, 4, 1, 1, 3, 1,
+	1, 1, 1, 3, 1, 1, 1, 3,
+	1, 1, 1, 2, 1, 3, 1, 3,
+	4, 3, 4, 4, 2, 1, 1, 1,
+	1, 2, 1, 1, 4, 1, 1, 1,
+	1, 2, 4, 1, 1, 2, 1, 2,
+	2, 6, 6, 3, 2, 5, 1, 3,
+	2, 3, 5, 3, 3, 1, 3, 1,
+	1, 1, 1, 2, 1, 4, 0, 0,
+	2, 3, 1, 1, 2, 2, 1, 2,
+	1, 1, 2, 1, 2, 1, 1, 2,
+	2, 2, 1, 1, 3, 1, 0, 0,
+	0, 0, 0, 0, 1, 0, 2, 1,
+	0, 2, 0, 1, 1, 3, 2, 0,
+	6, 2, 1, 1, 2, 0, 1, 0,
+	1, 0, 1, 1, 0, 0, 2, 1,
+	1, 1, 2, 3, 1, 1, 2, 2,
+	1, 1, 3, 2, 2, 0, 0, 2,
+	0, 0, 0, 0, 1, 4, 1, 1,
+	1, 1, 1, 1, 0, 2, 3, 2,
+	2, 1, 2, 2, 1, 0, 1, 3,
+	6, 1, 1, 1, 2, 2, 1, 1,
+	2, 1, 3, 1, 2, 3, 1, 1,
+	2, 2, 3, 1, 3, 1, 3, 1,
+	0, 1, 0, 1, 1, 1, 2, 1,
+	0, 1, 0, 2, 1, 0, 3, 3,
+	1, 2, 2, 2, 0, 5, 0, 0,
+	1, 1, 1, 4, 1, 1, 2, 2,
+	1, 3, 1, 1, 1, 1, 1, 1,
+	1, 2, 2, 2, 2, 0, 1, 1,
+	0, 1, 0, 0, 1, 0, 0, 1,
+	3, 2, 2, 2, 2, 2, 2, 2,
+	3, 5, 4, 2, 1, 1, 1, 2,
+	2, 1, 1, 2, 0, 6, 0, 0,
+	0, 4, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0,
+}
+
+var _graphclust_index_offsets []int16 = []int16{
+	0, 0, 2, 4, 6, 8, 11, 15,
+	17, 20, 25, 28, 30, 32, 35, 65,
+	70, 72, 74, 77, 80, 84, 88, 95,
+	102, 108, 112, 118, 121, 126, 130, 136,
+	143, 147, 153, 155, 161, 164, 166, 169,
+	171, 177, 179, 184, 186, 209, 212, 216,
+	221, 223, 226, 229, 231, 234, 237, 240,
+	243, 245, 251, 253, 255, 258, 261, 264,
+	266, 268, 274, 276, 282, 284, 286, 288,
+	290, 292, 295, 297, 299, 315, 318, 320,
+	322, 328, 331, 335, 337, 339, 342, 345,
+	347, 351, 356, 360, 363, 367, 369, 372,
+	382, 393, 395, 397, 399, 405, 407, 409,
+	411, 413, 415, 417, 456, 459, 463, 466,
+	468, 471, 474, 477, 481, 484, 486, 488,
+	492, 500, 502, 505, 507, 510, 513, 515,
+	517, 520, 522, 526, 529, 533, 537, 540,
+	542, 545, 548, 553, 556, 560, 562, 567,
+	569, 571, 574, 580, 582, 584, 587, 590,
+	593, 595, 597, 600, 603, 605, 612, 616,
+	620, 622, 625, 628, 632, 641, 647, 649,
+	651, 653, 655, 657, 659, 661, 663, 669,
+	671, 673, 674, 680, 682, 684, 686, 689,
+	693, 695, 698, 703, 706, 708, 710, 713,
+	743, 748, 750, 752, 755, 758, 762, 766,
+	773, 780, 786, 790, 796, 799, 804, 808,
+	814, 821, 825, 831, 833, 839, 842, 844,
+	847, 849, 855, 857, 862, 864, 887, 890,
+	894, 899, 901, 904, 907, 909, 912, 915,
+	918, 921, 923, 929, 931, 933, 936, 939,
+	942, 944, 946, 952, 954, 960, 963, 966,
+	991, 995, 997, 999, 1002, 1005, 1008, 1010,
+	1014, 1016, 1019, 1022, 1026, 1029, 1030, 1039,
+	1047, 1052, 1054, 1057, 1060, 1062, 1064, 1066,
+	1070, 1073, 1075, 1078, 1094, 1097, 1099, 1101,
+	1107, 1110, 1114, 1116, 1118, 1121, 1124, 1126,
+	1130, 1135, 1139, 1142, 1146, 1148, 1151, 1161,
+	1172, 1174, 1176, 1178, 1184, 1186, 1188, 1190,
+	1192, 1194, 1196, 1235, 1238, 1242, 1245, 1248,
+	1251, 1254, 1258, 1261, 1263, 1265, 1269, 1277,
+	1279, 1282, 1284, 1287, 1290, 1292, 1294, 1297,
+	1299, 1303, 1306, 1310, 1314, 1317, 1319, 1322,
+	1325, 1330, 1333, 1337, 1339, 1344, 1346, 1348,
+	1351, 1357, 1359, 1361, 1364, 1367, 1370, 1372,
+	1374, 1377, 1380, 1382, 1389, 1393, 1397, 1399,
+	1402, 1405, 1409, 1418, 1424, 1426, 1428, 1430,
+	1432, 1434, 1471, 1473, 1476, 1479, 1483, 1485,
+	1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505,
+	1508, 1511, 1514, 1517, 1519, 1525, 1527, 1530,
+	1532, 1542, 1544, 1546, 1550, 1552, 1554, 1556,
+	1559, 1563, 1565, 1568, 1573, 1576, 1578, 1580,
+	1583, 1613, 1618, 1620, 1622, 1625, 1628, 1632,
+	1636, 1643, 1650, 1656, 1660, 1666, 1669, 1674,
+	1678, 1684, 1691, 1695, 1701, 1703, 1709, 1712,
+	1714, 1717, 1719, 1725, 1727, 1732, 1734, 1757,
+	1760, 1764, 1769, 1771, 1774, 1777, 1779, 1782,
+	1785, 1788, 1791, 1793, 1799, 1801, 1803, 1806,
+	1809, 1812, 1814, 1816, 1822, 1824, 1830, 1832,
+	1834, 1836, 1838, 1840, 1843, 1845, 1847, 1863,
+	1866, 1868, 1870, 1876, 1879, 1883, 1885, 1887,
+	1890, 1893, 1895, 1899, 1904, 1908, 1911, 1915,
+	1917, 1920, 1930, 1941, 1943, 1945, 1947, 1953,
+	1955, 1957, 1959, 1961, 1963, 1965, 2004, 2007,
+	2011, 2014, 2016, 2019, 2022, 2025, 2029, 2032,
+	2034, 2036, 2040, 2048, 2050, 2053, 2055, 2058,
+	2061, 2063, 2065, 2068, 2070, 2074, 2077, 2081,
+	2085, 2088, 2090, 2093, 2096, 2101, 2104, 2108,
+	2110, 2115, 2117, 2119, 2122, 2128, 2130, 2132,
+	2135, 2138, 2141, 2143, 2145, 2148, 2151, 2153,
+	2160, 2164, 2168, 2170, 2173, 2176, 2180, 2189,
+	2195, 2197, 2199, 2201, 2203, 2205, 2207, 2209,
+	2211, 2217, 2219, 2221, 2222, 2227, 2229, 2231,
+	2233, 2236, 2240, 2242, 2245, 2250, 2253, 2255,
+	2257, 2260, 2290, 2295, 2297, 2299, 2302, 2305,
+	2309, 2313, 2320, 2327, 2333, 2337, 2343, 2346,
+	2351, 2355, 2361, 2368, 2372, 2378, 2380, 2386,
+	2389, 2391, 2394, 2396, 2402, 2404, 2409, 2411,
+	2434, 2437, 2441, 2446, 2448, 2451, 2454, 2456,
+	2459, 2462, 2465, 2468, 2470, 2476, 2478, 2480,
+	2483, 2486, 2489, 2491, 2493, 2499, 2501, 2507,
+	2510, 2513, 2538, 2542, 2544, 2546, 2549, 2552,
+	2555, 2557, 2561, 2563, 2566, 2569, 2573, 2576,
+	2577, 2586, 2594, 2599, 2601, 2604, 2607, 2609,
+	2611, 2613, 2617, 2620, 2622, 2625, 2641, 2644,
+	2646, 2648, 2654, 2657, 2661, 2663, 2665, 2668,
+	2671, 2673, 2677, 2682, 2686, 2689, 2693, 2695,
+	2698, 2708, 2719, 2721, 2723, 2725, 2731, 2733,
+	2735, 2737, 2739, 2741, 2743, 2782, 2785, 2789,
+	2792, 2795, 2798, 2801, 2805, 2808, 2810, 2812,
+	2816, 2824, 2826, 2829, 2831, 2834, 2837, 2839,
+	2841, 2844, 2846, 2850, 2853, 2857, 2861, 2864,
+	2866, 2869, 2872, 2877, 2880, 2884, 2886, 2891,
+	2893, 2895, 2898, 2904, 2906, 2908, 2911, 2914,
+	2917, 2919, 2921, 2924, 2927, 2929, 2936, 2940,
+	2944, 2946, 2949, 2952, 2956, 2965, 2971, 2973,
+	2975, 2977, 2979, 2981, 3018, 3020, 3023, 3026,
+	3030, 3032, 3038, 3040, 3042, 3044, 3046, 3048,
+	3050, 3052, 3055, 3058, 3061, 3064, 3066, 3072,
+	3074, 3077, 3079, 3089, 3091, 3093, 3097, 3100,
+	3103, 3108, 3112, 3115, 3118, 3125, 3127, 3153,
+	3155, 3181, 3183, 3185, 3210, 3212, 3214, 3216,
+	3218, 3221, 3223, 3227, 3229, 3260, 3263, 3268,
+	3293, 3296, 3298, 3301, 3304, 3308, 3311, 3314,
+	3318, 3319, 3375, 3431, 3461, 3465, 3468, 3490,
+	3496, 3500, 3504, 3510, 3515, 3518, 3525, 3528,
+	3533, 3538, 3542, 3546, 3556, 3567, 3574, 3578,
+	3584, 3588, 3592, 3596, 3600, 3602, 3620, 3624,
+	3629, 3632, 3635, 3639, 3642, 3645, 3649, 3705,
+	3761, 3792, 3796, 3801, 3805, 3807, 3811, 3818,
+	3829, 3832, 3835, 3839, 3842, 3845, 3848, 3852,
+	3855, 3858, 3861, 3863, 3866, 3869, 3872, 3875,
+	3914, 3919, 3924, 3930, 3933, 3943, 3946, 3948,
+	3956, 3959, 3962, 3965, 3968, 3971, 3974, 3977,
+	3981, 3987, 3992, 3996, 3999, 4001, 4004, 4009,
+	4012, 4015, 4020, 4024, 4027, 4030, 4037, 4039,
+	4041, 4043, 4045, 4048, 4052, 4054, 4057, 4062,
+	4065, 4067, 4069, 4072, 4102, 4107, 4109, 4111,
+	4114, 4117, 4121, 4125, 4132, 4139, 4145, 4149,
+	4155, 4158, 4163, 4167, 4173, 4180, 4184, 4190,
+	4192, 4198, 4201, 4203, 4206, 4208, 4214, 4216,
+	4221, 4223, 4246, 4249, 4253, 4258, 4260, 4263,
+	4266, 4268, 4271, 4274, 4277, 4280, 4282, 4288,
+	4290, 4292, 4295, 4298, 4301, 4303, 4305, 4311,
+	4313, 4319, 4322, 4325, 4350, 4354, 4356, 4358,
+	4361, 4364, 4367, 4369, 4373, 4375, 4378, 4381,
+	4385, 4388, 4389, 4398, 4406, 4411, 4413, 4416,
+	4419, 4421, 4423, 4425, 4429, 4432, 4434, 4437,
+	4453, 4456, 4458, 4460, 4466, 4469, 4473, 4475,
+	4477, 4480, 4483, 4485, 4489, 4494, 4498, 4501,
+	4505, 4507, 4510, 4520, 4531, 4533, 4535, 4537,
+	4543, 4545, 4547, 4549, 4551, 4553, 4555, 4594,
+	4597, 4601, 4604, 4607, 4610, 4613, 4617, 4620,
+	4622, 4624, 4628, 4636, 4638, 4641, 4643, 4646,
+	4649, 4651, 4653, 4656, 4658, 4662, 4665, 4669,
+	4673, 4676, 4678, 4681, 4684, 4689, 4692, 4696,
+	4698, 4703, 4705, 4707, 4710, 4716, 4718, 4720,
+	4723, 4726, 4729, 4731, 4733, 4736, 4739, 4741,
+	4748, 4752, 4756, 4758, 4761, 4764, 4768, 4777,
+	4783, 4785, 4787, 4789, 4791, 4793, 4830, 4832,
+	4835, 4838, 4842, 4844, 4850, 4852, 4854, 4856,
+	4858, 4860, 4862, 4864, 4867, 4870, 4873, 4876,
+	4878, 4884, 4886, 4889, 4891, 4901, 4903, 4905,
+	4931, 4933, 4959, 4961, 4963, 4988, 4990, 4992,
+	4994, 4996, 4999, 5001, 5005, 5007, 5038, 5041,
+	5046, 5071, 5074, 5076, 5079, 5082, 5086, 5089,
+	5092, 5096, 5097, 5153, 5209, 5239, 5243, 5246,
+	5268, 5277, 5279, 5281, 5283, 5286, 5290, 5292,
+	5295, 5300, 5303, 5305, 5307, 5310, 5340, 5345,
+	5347, 5349, 5352, 5355, 5359, 5363, 5370, 5377,
+	5383, 5387, 5393, 5396, 5401, 5405, 5411, 5418,
+	5422, 5428, 5430, 5436, 5439, 5441, 5444, 5446,
+	5452, 5454, 5459, 5461, 5484, 5487, 5491, 5496,
+	5498, 5501, 5504, 5506, 5509, 5512, 5515, 5518,
+	5520, 5526, 5528, 5530, 5533, 5536, 5539, 5541,
+	5543, 5549, 5551, 5557, 5559, 5561, 5563, 5565,
+	5567, 5570, 5572, 5574, 5590, 5593, 5595, 5597,
+	5603, 5606, 5610, 5612, 5614, 5617, 5620, 5622,
+	5626, 5631, 5635, 5638, 5642, 5644, 5647, 5657,
+	5668, 5670, 5672, 5674, 5680, 5682, 5684, 5686,
+	5688, 5690, 5692, 5731, 5734, 5738, 5741, 5743,
+	5746, 5749, 5752, 5756, 5759, 5761, 5763, 5767,
+	5775, 5777, 5780, 5782, 5785, 5788, 5790, 5792,
+	5795, 5797, 5801, 5804, 5808, 5812, 5815, 5817,
+	5820, 5823, 5828, 5831, 5835, 5837, 5842, 5844,
+	5846, 5849, 5855, 5857, 5859, 5862, 5865, 5868,
+	5870, 5872, 5875, 5878, 5880, 5887, 5891, 5895,
+	5897, 5900, 5903, 5907, 5916, 5922, 5924, 5926,
+	5928, 5930, 5932, 5934, 5936, 5938, 5944, 5946,
+	5948, 5949, 5954, 5958, 5964, 5969, 5972, 5979,
+	5982, 5987, 5992, 5996, 6000, 6010, 6021, 6028,
+	6032, 6038, 6042, 6046, 6050, 6054, 6056, 6074,
+	6078, 6083, 6086, 6089, 6093, 6096, 6099, 6103,
+	6159, 6215, 6246, 6250, 6255, 6259, 6262, 6267,
+	6274, 6285, 6288, 6291, 6295, 6298, 6301, 6304,
+	6308, 6311, 6314, 6317, 6320, 6323, 6327, 6330,
+	6334, 6373, 6378, 6383, 6389, 6392, 6394, 6396,
+	6398, 6401, 6405, 6407, 6410, 6415, 6418, 6420,
+	6422, 6425, 6455, 6460, 6462, 6464, 6467, 6470,
+	6474, 6478, 6485, 6492, 6498, 6502, 6508, 6511,
+	6516, 6520, 6526, 6533, 6537, 6543, 6545, 6551,
+	6554, 6556, 6559, 6561, 6567, 6569, 6574, 6576,
+	6599, 6602, 6606, 6611, 6613, 6616, 6619, 6621,
+	6624, 6627, 6630, 6633, 6635, 6641, 6643, 6645,
+	6648, 6651, 6654, 6656, 6658, 6664, 6666, 6672,
+	6675, 6678, 6703, 6707, 6709, 6711, 6714, 6717,
+	6720, 6722, 6726, 6728, 6731, 6734, 6738, 6741,
+	6742, 6751, 6759, 6764, 6766, 6769, 6772, 6774,
+	6776, 6778, 6782, 6785, 6787, 6790, 6806, 6809,
+	6811, 6813, 6819, 6822, 6826, 6828, 6830, 6833,
+	6836, 6838, 6842, 6847, 6851, 6854, 6858, 6860,
+	6863, 6873, 6884, 6886, 6888, 6890, 6896, 6898,
+	6900, 6902, 6904, 6906, 6908, 6947, 6950, 6954,
+	6957, 6960, 6963, 6966, 6970, 6973, 6975, 6977,
+	6981, 6989, 6991, 6994, 6996, 6999, 7002, 7004,
+	7006, 7009, 7011, 7015, 7018, 7022, 7026, 7029,
+	7031, 7034, 7037, 7042, 7045, 7049, 7051, 7056,
+	7058, 7060, 7063, 7069, 7071, 7073, 7076, 7079,
+	7082, 7084, 7086, 7089, 7092, 7094, 7101, 7105,
+	7109, 7111, 7114, 7117, 7121, 7130, 7136, 7138,
+	7140, 7142, 7144, 7146, 7183, 7185, 7188, 7191,
+	7195, 7197, 7203, 7205, 7207, 7209, 7211, 7213,
+	7215, 7217, 7220, 7223, 7226, 7229, 7231, 7237,
+	7239, 7242, 7244, 7254, 7256, 7258, 7268, 7271,
+	7273, 7281, 7284, 7287, 7290, 7293, 7296, 7299,
+	7302, 7306, 7312, 7317, 7321, 7324, 7326, 7329,
+	7337, 7340, 7342, 7344, 7347, 7348, 7373, 7394,
+	7415, 7437, 7457, 7478, 7499, 7521, 7545, 7567,
+	7589, 7611, 7632, 7656, 7677, 7699, 7721, 7743,
+	7765, 7786, 7807, 7828,
+}
+
+var _graphclust_indicies []int16 = []int16{
+	0, 1, 3, 2, 2, 3, 3, 2,
+	3, 3, 2, 3, 3, 3, 2, 3,
+	2, 3, 3, 2, 3, 3, 3, 3,
+	2, 3, 3, 2, 2, 3, 3, 2,
+	3, 3, 2, 4, 5, 6, 7, 8,
+	9, 11, 12, 13, 15, 16, 17, 18,
+	19, 20, 21, 22, 23, 24, 25, 26,
+	27, 28, 29, 30, 31, 32, 10, 14,
+	2, 3, 3, 3, 3, 2, 3, 2,
+	3, 2, 3, 3, 2, 2, 2, 3,
+	2, 2, 2, 3, 3, 3, 3, 2,
+	2, 2, 2, 2, 2, 2, 3, 2,
+	2, 2, 2, 2, 2, 3, 2, 2,
+	2, 2, 2, 3, 3, 3, 3, 2,
+	3, 3, 3, 3, 3, 2, 3, 3,
+	2, 3, 3, 3, 3, 2, 3, 3,
+	3, 2, 2, 2, 2, 2, 2, 3,
+	3, 3, 3, 3, 3, 3, 2, 3,
+	3, 3, 2, 2, 2, 2, 2, 2,
+	3, 3, 2, 3, 3, 3, 3, 3,
+	2, 3, 3, 2, 3, 2, 3, 3,
+	2, 3, 2, 3, 3, 3, 3, 3,
+	2, 3, 2, 3, 3, 3, 3, 2,
+	3, 2, 33, 34, 35, 36, 37, 38,
+	39, 40, 41, 42, 43, 44, 45, 46,
+	47, 48, 49, 50, 51, 52, 53, 54,
+	2, 3, 3, 2, 3, 3, 3, 2,
+	3, 3, 3, 3, 2, 3, 2, 3,
+	3, 2, 3, 3, 2, 3, 2, 2,
+	2, 3, 3, 3, 2, 3, 3, 2,
+	3, 3, 2, 3, 2, 3, 3, 3,
+	3, 3, 2, 3, 2, 2, 3, 3,
+	3, 2, 2, 2, 3, 3, 3, 2,
+	3, 2, 3, 2, 3, 3, 3, 3,
+	3, 2, 3, 55, 56, 57, 58, 59,
+	60, 2, 3, 2, 3, 2, 3, 2,
+	3, 2, 3, 2, 61, 62, 2, 3,
+	2, 3, 2, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 74, 75,
+	76, 77, 2, 3, 3, 2, 3, 2,
+	3, 2, 3, 3, 3, 3, 3, 2,
+	3, 3, 2, 2, 2, 2, 3, 3,
+	2, 3, 2, 3, 3, 2, 2, 2,
+	3, 3, 2, 3, 3, 3, 2, 3,
+	3, 3, 3, 2, 3, 3, 3, 2,
+	3, 3, 2, 78, 79, 64, 2, 3,
+	2, 3, 3, 2, 80, 81, 82, 83,
+	84, 85, 86, 87, 88, 2, 89, 90,
+	91, 92, 93, 94, 95, 96, 97, 98,
+	2, 3, 2, 3, 2, 3, 2, 3,
+	3, 3, 3, 3, 2, 3, 2, 3,
+	2, 3, 2, 3, 2, 3, 2, 3,
+	2, 99, 100, 101, 102, 103, 104, 105,
+	106, 107, 108, 109, 110, 111, 112, 113,
+	46, 114, 115, 116, 46, 117, 118, 119,
+	120, 121, 122, 123, 124, 125, 126, 127,
+	128, 129, 130, 131, 132, 133, 134, 2,
+	3, 3, 2, 2, 2, 2, 3, 2,
+	2, 3, 3, 2, 3, 3, 2, 3,
+	3, 2, 3, 3, 2, 2, 2, 2,
+	3, 3, 3, 2, 3, 2, 3, 2,
+	3, 3, 3, 2, 3, 3, 3, 3,
+	3, 3, 3, 2, 3, 2, 3, 3,
+	2, 2, 3, 3, 3, 2, 2, 2,
+	3, 2, 3, 3, 2, 3, 3, 2,
+	3, 2, 3, 3, 3, 2, 3, 3,
+	2, 3, 3, 3, 2, 3, 3, 3,
+	2, 3, 3, 2, 3, 2, 3, 3,
+	2, 3, 3, 2, 3, 3, 3, 3,
+	2, 2, 2, 3, 3, 3, 3, 2,
+	3, 2, 3, 3, 3, 3, 2, 3,
+	2, 135, 2, 3, 3, 2, 136, 137,
+	138, 139, 140, 2, 3, 2, 3, 2,
+	3, 3, 2, 2, 2, 3, 3, 3,
+	2, 141, 2, 3, 2, 142, 143, 2,
+	3, 3, 2, 2, 3, 144, 145, 146,
+	147, 148, 149, 2, 3, 3, 3, 2,
+	2, 2, 2, 3, 3, 2, 3, 3,
+	2, 2, 2, 3, 3, 3, 3, 2,
+	150, 151, 137, 152, 153, 153, 154, 155,
+	2, 3, 3, 3, 3, 3, 2, 3,
+	2, 3, 2, 3, 2, 3, 2, 3,
+	2, 156, 2, 3, 2, 157, 2, 158,
+	159, 160, 162, 161, 2, 3, 2, 2,
+	3, 3, 164, 163, 164, 163, 3, 1,
+	55, 164, 165, 164, 164, 165, 164, 164,
+	165, 164, 164, 164, 165, 164, 165, 164,
+	164, 165, 164, 164, 164, 164, 165, 164,
+	164, 165, 165, 164, 164, 165, 164, 164,
+	165, 166, 167, 168, 169, 170, 171, 173,
+	174, 175, 177, 178, 179, 180, 181, 182,
+	183, 184, 185, 186, 187, 188, 189, 190,
+	191, 192, 193, 194, 172, 176, 165, 164,
+	164, 164, 164, 165, 164, 165, 164, 165,
+	164, 164, 165, 165, 165, 164, 165, 165,
+	165, 164, 164, 164, 164, 165, 165, 165,
+	165, 165, 165, 165, 164, 165, 165, 165,
+	165, 165, 165, 164, 165, 165, 165, 165,
+	165, 164, 164, 164, 164, 165, 164, 164,
+	164, 164, 164, 165, 164, 164, 165, 164,
+	164, 164, 164, 165, 164, 164, 164, 165,
+	165, 165, 165, 165, 165, 164, 164, 164,
+	164, 164, 164, 164, 165, 164, 164, 164,
+	165, 165, 165, 165, 165, 165, 164, 164,
+	165, 164, 164, 164, 164, 164, 165, 164,
+	164, 165, 164, 165, 164, 164, 165, 164,
+	165, 164, 164, 164, 164, 164, 165, 164,
+	165, 164, 164, 164, 164, 165, 164, 165,
+	195, 196, 197, 198, 199, 200, 201, 202,
+	203, 204, 205, 206, 207, 208, 209, 210,
+	211, 212, 213, 214, 215, 216, 165, 164,
+	164, 165, 164, 164, 164, 165, 164, 164,
+	164, 164, 165, 164, 165, 164, 164, 165,
+	164, 164, 165, 164, 165, 165, 165, 164,
+	164, 164, 165, 164, 164, 165, 164, 164,
+	165, 164, 165, 164, 164, 164, 164, 164,
+	165, 164, 165, 165, 164, 164, 164, 165,
+	165, 165, 164, 164, 164, 165, 164, 165,
+	164, 165, 164, 164, 164, 164, 164, 165,
+	164, 55, 217, 218, 219, 220, 221, 165,
+	164, 222, 165, 164, 164, 165, 223, 224,
+	218, 225, 226, 227, 228, 229, 230, 231,
+	232, 233, 216, 234, 235, 236, 237, 238,
+	239, 240, 241, 219, 220, 221, 165, 164,
+	222, 164, 165, 164, 165, 164, 165, 164,
+	164, 165, 164, 164, 165, 164, 164, 165,
+	164, 165, 164, 164, 164, 165, 164, 165,
+	164, 164, 165, 164, 164, 165, 164, 164,
+	164, 165, 164, 164, 165, 164, 165, 165,
+	165, 165, 165, 165, 165, 165, 164, 164,
+	164, 164, 164, 164, 164, 164, 165, 164,
+	164, 164, 164, 165, 164, 165, 164, 164,
+	165, 164, 164, 165, 164, 165, 164, 165,
+	164, 165, 242, 243, 244, 165, 164, 164,
+	165, 164, 165, 164, 164, 165, 245, 246,
+	247, 248, 249, 250, 251, 252, 253, 254,
+	255, 256, 257, 258, 259, 165, 164, 164,
+	165, 164, 165, 164, 165, 164, 164, 164,
+	164, 164, 165, 164, 164, 165, 165, 165,
+	165, 164, 164, 165, 164, 165, 164, 164,
+	165, 165, 165, 164, 164, 165, 164, 164,
+	164, 165, 164, 164, 164, 164, 165, 164,
+	164, 164, 165, 164, 164, 165, 260, 261,
+	246, 165, 164, 165, 164, 164, 165, 262,
+	263, 264, 265, 266, 267, 268, 269, 270,
+	165, 271, 272, 273, 274, 275, 276, 277,
+	278, 279, 280, 165, 164, 165, 164, 165,
+	164, 165, 164, 164, 164, 164, 164, 165,
+	164, 165, 164, 165, 164, 165, 164, 165,
+	164, 165, 164, 165, 281, 282, 283, 230,
+	284, 285, 286, 287, 288, 289, 290, 291,
+	292, 293, 294, 208, 295, 296, 297, 208,
+	298, 299, 300, 301, 302, 303, 304, 305,
+	306, 307, 308, 309, 310, 311, 312, 313,
+	314, 315, 165, 164, 164, 165, 165, 165,
+	165, 164, 165, 165, 164, 164, 164, 165,
+	164, 164, 165, 164, 164, 165, 165, 165,
+	165, 164, 164, 164, 165, 164, 165, 164,
+	165, 164, 164, 164, 165, 164, 164, 164,
+	164, 164, 164, 164, 165, 164, 165, 164,
+	164, 165, 165, 164, 164, 164, 165, 165,
+	165, 164, 165, 164, 164, 165, 164, 164,
+	165, 164, 165, 164, 164, 164, 165, 164,
+	164, 165, 164, 164, 164, 165, 164, 164,
+	164, 165, 164, 164, 165, 164, 165, 164,
+	164, 165, 164, 164, 165, 164, 164, 164,
+	164, 165, 165, 165, 164, 164, 164, 164,
+	165, 164, 165, 164, 164, 164, 164, 165,
+	164, 165, 316, 165, 164, 164, 165, 317,
+	318, 319, 320, 321, 165, 164, 165, 164,
+	165, 164, 164, 165, 165, 165, 164, 164,
+	164, 165, 322, 165, 164, 165, 323, 324,
+	165, 164, 164, 165, 165, 164, 325, 326,
+	327, 328, 329, 330, 165, 164, 164, 164,
+	165, 165, 165, 165, 164, 164, 165, 164,
+	164, 165, 165, 165, 164, 164, 164, 164,
+	165, 331, 332, 318, 333, 334, 334, 335,
+	336, 165, 164, 164, 164, 164, 164, 165,
+	164, 165, 164, 165, 164, 165, 164, 165,
+	164, 165, 337, 338, 339, 340, 341, 342,
+	343, 344, 338, 337, 338, 337, 338, 216,
+	337, 345, 346, 338, 337, 347, 348, 349,
+	350, 351, 352, 338, 353, 354, 337, 338,
+	337, 345, 235, 216, 216, 235, 165, 55,
+	164, 164, 164, 165, 164, 164, 165, 164,
+	164, 164, 165, 165, 164, 164, 164, 164,
+	164, 164, 165, 164, 165, 165, 164, 164,
+	165, 165, 164, 164, 165, 164, 165, 164,
+	165, 164, 164, 165, 164, 164, 165, 164,
+	164, 165, 164, 164, 165, 355, 165, 356,
+	338, 337, 357, 235, 165, 164, 165, 358,
+	243, 165, 164, 165, 262, 263, 264, 265,
+	266, 267, 268, 269, 359, 165, 360, 165,
+	164, 165, 163, 361, 3, 1, 363, 362,
+	362, 363, 363, 362, 363, 363, 362, 363,
+	363, 363, 362, 363, 362, 363, 363, 362,
+	363, 363, 363, 363, 362, 363, 363, 362,
+	362, 363, 363, 362, 363, 363, 362, 364,
+	365, 366, 367, 368, 369, 371, 372, 373,
+	375, 376, 377, 378, 379, 380, 381, 382,
+	383, 384, 385, 386, 387, 388, 389, 390,
+	391, 392, 370, 374, 362, 363, 363, 363,
+	363, 362, 363, 362, 363, 362, 363, 363,
+	362, 362, 362, 363, 362, 362, 362, 363,
+	363, 363, 363, 362, 362, 362, 362, 362,
+	362, 362, 363, 362, 362, 362, 362, 362,
+	362, 363, 362, 362, 362, 362, 362, 363,
+	363, 363, 363, 362, 363, 363, 363, 363,
+	363, 362, 363, 363, 362, 363, 363, 363,
+	363, 362, 363, 363, 363, 362, 362, 362,
+	362, 362, 362, 363, 363, 363, 363, 363,
+	363, 363, 362, 363, 363, 363, 362, 362,
+	362, 362, 362, 362, 363, 363, 362, 363,
+	363, 363, 363, 363, 362, 363, 363, 362,
+	363, 362, 363, 363, 362, 363, 362, 363,
+	363, 363, 363, 363, 362, 363, 362, 363,
+	363, 363, 363, 362, 363, 362, 393, 394,
+	395, 396, 397, 398, 399, 400, 401, 402,
+	403, 404, 405, 406, 407, 408, 409, 410,
+	411, 412, 413, 414, 362, 363, 363, 362,
+	363, 363, 363, 362, 363, 363, 363, 363,
+	362, 363, 362, 363, 363, 362, 363, 363,
+	362, 363, 362, 362, 362, 363, 363, 363,
+	362, 363, 363, 362, 363, 363, 362, 363,
+	362, 363, 363, 363, 363, 363, 362, 363,
+	362, 362, 363, 363, 363, 362, 362, 362,
+	363, 363, 363, 362, 363, 362, 363, 362,
+	363, 363, 363, 363, 363, 362, 363, 362,
+	415, 416, 417, 418, 419, 362, 363, 362,
+	363, 362, 363, 362, 363, 362, 363, 362,
+	420, 421, 362, 363, 362, 363, 362, 422,
+	423, 424, 425, 426, 427, 428, 429, 430,
+	431, 432, 433, 434, 435, 436, 362, 363,
+	363, 362, 363, 362, 363, 362, 363, 363,
+	363, 363, 363, 362, 363, 363, 362, 362,
+	362, 362, 363, 363, 362, 363, 362, 363,
+	363, 362, 362, 362, 363, 363, 362, 363,
+	363, 363, 362, 363, 363, 363, 363, 362,
+	363, 363, 363, 362, 363, 363, 362, 437,
+	438, 423, 362, 363, 362, 363, 363, 362,
+	439, 440, 441, 442, 443, 444, 445, 446,
+	447, 362, 448, 449, 450, 451, 452, 453,
+	454, 455, 456, 457, 362, 363, 362, 363,
+	362, 363, 362, 363, 363, 363, 363, 363,
+	362, 363, 362, 363, 362, 363, 362, 363,
+	362, 363, 362, 363, 362, 458, 459, 460,
+	461, 462, 463, 464, 465, 466, 467, 468,
+	469, 470, 471, 472, 406, 473, 474, 475,
+	406, 476, 477, 478, 479, 480, 481, 482,
+	483, 484, 485, 486, 487, 488, 489, 490,
+	491, 492, 493, 362, 363, 363, 362, 362,
+	362, 362, 363, 362, 362, 363, 363, 362,
+	363, 363, 362, 363, 363, 362, 363, 363,
+	362, 362, 362, 362, 363, 363, 363, 362,
+	363, 362, 363, 362, 363, 363, 363, 362,
+	363, 363, 363, 363, 363, 363, 363, 362,
+	363, 362, 363, 363, 362, 362, 363, 363,
+	363, 362, 362, 362, 363, 362, 363, 363,
+	362, 363, 363, 362, 363, 362, 363, 363,
+	363, 362, 363, 363, 362, 363, 363, 363,
+	362, 363, 363, 363, 362, 363, 363, 362,
+	363, 362, 363, 363, 362, 363, 363, 362,
+	363, 363, 363, 363, 362, 362, 362, 363,
+	363, 363, 363, 362, 363, 362, 363, 363,
+	363, 363, 362, 363, 362, 494, 362, 363,
+	363, 362, 495, 496, 497, 498, 499, 362,
+	363, 362, 363, 362, 363, 363, 362, 362,
+	362, 363, 363, 363, 362, 500, 362, 363,
+	362, 501, 502, 362, 363, 363, 362, 362,
+	363, 503, 504, 505, 506, 507, 508, 362,
+	363, 363, 363, 362, 362, 362, 362, 363,
+	363, 362, 363, 363, 362, 362, 362, 363,
+	363, 363, 363, 362, 509, 510, 496, 511,
+	512, 512, 513, 514, 362, 363, 363, 363,
+	363, 363, 362, 363, 362, 363, 362, 363,
+	362, 363, 362, 363, 362, 515, 362, 363,
+	362, 516, 362, 517, 518, 519, 521, 520,
+	362, 363, 362, 362, 363, 363, 522, 522,
+	363, 363, 362, 522, 362, 362, 522, 522,
+	362, 522, 522, 362, 522, 522, 522, 362,
+	522, 362, 522, 522, 362, 522, 522, 522,
+	522, 362, 522, 522, 362, 362, 522, 522,
+	362, 522, 522, 362, 523, 524, 525, 526,
+	527, 528, 530, 531, 532, 534, 535, 536,
+	537, 538, 539, 540, 541, 542, 543, 544,
+	545, 546, 547, 548, 549, 550, 551, 529,
+	533, 362, 522, 522, 522, 522, 362, 522,
+	362, 522, 362, 522, 522, 362, 362, 362,
+	522, 362, 362, 362, 522, 522, 522, 522,
+	362, 362, 362, 362, 362, 362, 362, 522,
+	362, 362, 362, 362, 362, 362, 522, 362,
+	362, 362, 362, 362, 522, 522, 522, 522,
+	362, 522, 522, 522, 522, 522, 362, 522,
+	522, 362, 522, 522, 522, 522, 362, 522,
+	522, 522, 362, 362, 362, 362, 362, 362,
+	522, 522, 522, 522, 522, 522, 522, 362,
+	522, 522, 522, 362, 362, 362, 362, 362,
+	362, 522, 522, 362, 522, 522, 522, 522,
+	522, 362, 522, 522, 362, 522, 362, 522,
+	522, 362, 522, 362, 522, 522, 522, 522,
+	522, 362, 522, 362, 522, 522, 522, 522,
+	362, 522, 362, 552, 553, 554, 555, 556,
+	557, 558, 559, 560, 561, 562, 563, 564,
+	565, 566, 567, 568, 569, 570, 571, 572,
+	573, 362, 522, 522, 362, 522, 522, 522,
+	362, 522, 522, 522, 522, 362, 522, 362,
+	522, 522, 362, 522, 522, 362, 522, 362,
+	362, 362, 522, 522, 522, 362, 522, 522,
+	362, 522, 522, 362, 522, 362, 522, 522,
+	522, 522, 522, 362, 522, 362, 362, 522,
+	522, 522, 362, 362, 362, 522, 522, 522,
+	362, 522, 362, 522, 362, 522, 522, 522,
+	522, 522, 362, 522, 362, 574, 575, 576,
+	577, 578, 362, 522, 579, 362, 522, 522,
+	362, 580, 581, 575, 582, 583, 584, 585,
+	586, 587, 588, 589, 590, 573, 591, 592,
+	593, 594, 595, 596, 597, 598, 576, 577,
+	578, 362, 522, 579, 522, 362, 522, 362,
+	522, 362, 522, 522, 362, 522, 522, 362,
+	522, 522, 362, 522, 362, 522, 522, 522,
+	362, 522, 362, 522, 522, 362, 522, 522,
+	362, 522, 522, 522, 362, 522, 522, 362,
+	522, 362, 362, 362, 362, 362, 362, 362,
+	362, 522, 522, 522, 522, 522, 522, 522,
+	522, 362, 522, 522, 522, 522, 362, 522,
+	362, 522, 522, 362, 522, 522, 362, 522,
+	362, 522, 362, 522, 362, 599, 600, 601,
+	362, 522, 522, 362, 522, 362, 522, 522,
+	362, 602, 603, 604, 605, 606, 607, 608,
+	609, 610, 611, 612, 613, 614, 615, 616,
+	362, 522, 522, 362, 522, 362, 522, 362,
+	522, 522, 522, 522, 522, 362, 522, 522,
+	362, 362, 362, 362, 522, 522, 362, 522,
+	362, 522, 522, 362, 362, 362, 522, 522,
+	362, 522, 522, 522, 362, 522, 522, 522,
+	522, 362, 522, 522, 522, 362, 522, 522,
+	362, 617, 618, 603, 362, 522, 362, 522,
+	522, 362, 619, 620, 621, 622, 623, 624,
+	625, 626, 627, 362, 628, 629, 630, 631,
+	632, 633, 634, 635, 636, 637, 362, 522,
+	362, 522, 362, 522, 362, 522, 522, 522,
+	522, 522, 362, 522, 362, 522, 362, 522,
+	362, 522, 362, 522, 362, 522, 362, 638,
+	639, 640, 587, 641, 642, 643, 644, 645,
+	646, 647, 648, 649, 650, 651, 565, 652,
+	653, 654, 565, 655, 656, 657, 658, 659,
+	660, 661, 662, 663, 664, 665, 666, 667,
+	668, 669, 670, 671, 672, 362, 522, 522,
+	362, 362, 362, 362, 522, 362, 362, 522,
+	522, 522, 362, 522, 522, 362, 522, 522,
+	362, 362, 362, 362, 522, 522, 522, 362,
+	522, 362, 522, 362, 522, 522, 522, 362,
+	522, 522, 522, 522, 522, 522, 522, 362,
+	522, 362, 522, 522, 362, 362, 522, 522,
+	522, 362, 362, 362, 522, 362, 522, 522,
+	362, 522, 522, 362, 522, 362, 522, 522,
+	522, 362, 522, 522, 362, 522, 522, 522,
+	362, 522, 522, 522, 362, 522, 522, 362,
+	522, 362, 522, 522, 362, 522, 522, 362,
+	522, 522, 522, 522, 362, 362, 362, 522,
+	522, 522, 522, 362, 522, 362, 522, 522,
+	522, 522, 362, 522, 362, 673, 362, 522,
+	522, 362, 674, 675, 676, 677, 678, 362,
+	522, 362, 522, 362, 522, 522, 362, 362,
+	362, 522, 522, 522, 362, 679, 362, 522,
+	362, 680, 681, 362, 522, 522, 362, 362,
+	522, 682, 683, 684, 685, 686, 687, 362,
+	522, 522, 522, 362, 362, 362, 362, 522,
+	522, 362, 522, 522, 362, 362, 362, 522,
+	522, 522, 522, 362, 688, 689, 675, 690,
+	691, 691, 692, 693, 362, 522, 522, 522,
+	522, 522, 362, 522, 362, 522, 362, 522,
+	362, 522, 362, 522, 362, 694, 695, 696,
+	697, 698, 699, 700, 701, 695, 694, 695,
+	694, 695, 573, 694, 702, 703, 695, 694,
+	704, 705, 706, 707, 708, 709, 695, 710,
+	711, 694, 695, 694, 702, 592, 573, 573,
+	592, 362, 362, 522, 522, 522, 362, 522,
+	522, 362, 522, 522, 522, 362, 362, 522,
+	522, 522, 522, 522, 522, 362, 522, 362,
+	362, 522, 522, 362, 362, 522, 522, 362,
+	522, 362, 522, 362, 522, 522, 362, 522,
+	522, 362, 522, 522, 362, 522, 522, 362,
+	712, 362, 713, 695, 694, 714, 592, 362,
+	522, 362, 715, 600, 362, 522, 362, 619,
+	620, 621, 622, 623, 624, 625, 626, 716,
+	362, 717, 362, 522, 362, 361, 363, 363,
+	362, 361, 363, 362, 361, 363, 362, 718,
+	719, 720, 414, 362, 363, 361, 363, 362,
+	361, 363, 362, 361, 363, 362, 721, 722,
+	723, 724, 725, 414, 362, 726, 362, 552,
+	553, 554, 721, 722, 727, 555, 556, 557,
+	558, 559, 560, 561, 562, 563, 564, 565,
+	566, 567, 568, 569, 570, 571, 572, 573,
+	362, 728, 726, 552, 553, 554, 729, 723,
+	724, 555, 556, 557, 558, 559, 560, 561,
+	562, 563, 564, 565, 566, 567, 568, 569,
+	570, 571, 572, 573, 362, 728, 362, 730,
+	728, 552, 553, 554, 731, 724, 555, 556,
+	557, 558, 559, 560, 561, 562, 563, 564,
+	565, 566, 567, 568, 569, 570, 571, 572,
+	573, 362, 730, 362, 362, 730, 732, 362,
+	730, 362, 733, 734, 362, 728, 362, 362,
+	730, 362, 728, 362, 728, 602, 603, 604,
+	605, 606, 607, 608, 735, 610, 611, 612,
+	613, 614, 615, 616, 737, 738, 739, 740,
+	741, 742, 737, 738, 739, 740, 741, 742,
+	737, 736, 743, 362, 522, 726, 362, 744,
+	744, 744, 730, 362, 552, 553, 554, 729,
+	727, 555, 556, 557, 558, 559, 560, 561,
+	562, 563, 564, 565, 566, 567, 568, 569,
+	570, 571, 572, 573, 362, 733, 745, 362,
+	362, 728, 744, 744, 730, 744, 744, 730,
+	744, 744, 744, 730, 744, 744, 730, 744,
+	744, 730, 744, 744, 362, 730, 730, 739,
+	740, 741, 742, 736, 737, 739, 740, 741,
+	742, 736, 737, 739, 740, 741, 742, 736,
+	737, 739, 740, 741, 742, 736, 737, 739,
+	740, 741, 742, 736, 737, 739, 740, 741,
+	742, 736, 737, 739, 740, 741, 742, 736,
+	737, 739, 740, 741, 742, 736, 737, 739,
+	740, 741, 742, 736, 737, 738, 743, 740,
+	741, 742, 736, 737, 738, 740, 741, 742,
+	736, 737, 738, 740, 741, 742, 736, 737,
+	738, 740, 741, 742, 736, 737, 738, 740,
+	741, 742, 736, 737, 738, 740, 741, 742,
+	736, 737, 738, 740, 741, 742, 736, 737,
+	738, 740, 741, 742, 736, 737, 738, 740,
+	741, 742, 736, 737, 738, 739, 743, 741,
+	742, 736, 737, 738, 739, 741, 742, 736,
+	737, 738, 739, 741, 742, 736, 737, 738,
+	739, 741, 742, 736, 737, 738, 739, 741,
+	746, 745, 740, 362, 743, 744, 362, 728,
+	730, 363, 363, 362, 747, 748, 749, 750,
+	751, 752, 753, 754, 755, 756, 757, 573,
+	758, 592, 759, 760, 761, 762, 763, 764,
+	414, 362, 522, 363, 363, 363, 363, 362,
+	522, 363, 363, 362, 522, 522, 363, 362,
+	363, 522, 363, 522, 363, 362, 522, 363,
+	522, 363, 362, 522, 363, 362, 522, 363,
+	522, 363, 522, 363, 362, 522, 363, 362,
+	522, 363, 522, 363, 362, 522, 363, 363,
+	522, 362, 363, 363, 522, 362, 522, 363,
+	522, 362, 363, 363, 363, 363, 363, 363,
+	363, 363, 362, 522, 522, 522, 522, 522,
+	363, 363, 522, 363, 522, 363, 362, 522,
+	522, 522, 363, 522, 363, 362, 363, 522,
+	363, 362, 363, 522, 363, 522, 363, 362,
+	522, 522, 363, 362, 765, 766, 414, 362,
+	522, 522, 363, 362, 522, 522, 363, 362,
+	414, 362, 767, 769, 770, 771, 772, 773,
+	774, 769, 770, 771, 772, 773, 774, 769,
+	414, 768, 743, 362, 363, 726, 363, 362,
+	728, 728, 728, 730, 362, 728, 728, 730,
+	728, 728, 730, 728, 728, 728, 730, 728,
+	728, 730, 728, 728, 730, 728, 728, 362,
+	730, 771, 772, 773, 774, 768, 769, 771,
+	772, 773, 774, 768, 769, 771, 772, 773,
+	774, 768, 769, 771, 772, 773, 774, 768,
+	769, 771, 772, 773, 774, 768, 769, 771,
+	772, 773, 774, 768, 769, 771, 772, 773,
+	774, 768, 769, 771, 772, 773, 774, 768,
+	769, 771, 772, 773, 774, 768, 769, 770,
+	743, 772, 773, 774, 768, 769, 770, 772,
+	773, 774, 768, 769, 770, 772, 773, 774,
+	768, 769, 770, 772, 773, 774, 768, 769,
+	770, 772, 773, 774, 768, 769, 770, 772,
+	773, 774, 768, 769, 770, 772, 773, 774,
+	768, 769, 770, 772, 773, 774, 768, 769,
+	770, 772, 773, 774, 768, 769, 770, 771,
+	743, 773, 774, 768, 769, 770, 771, 773,
+	774, 768, 769, 770, 771, 773, 774, 768,
+	769, 770, 771, 773, 774, 768, 769, 770,
+	771, 773, 775, 776, 772, 414, 362, 743,
+	728, 363, 728, 730, 363, 730, 363, 362,
+	728, 777, 778, 414, 362, 363, 362, 363,
+	363, 363, 362, 780, 781, 782, 783, 784,
+	779, 362, 785, 786, 787, 788, 789, 790,
+	791, 792, 793, 414, 362, 361, 363, 362,
+	361, 363, 362, 363, 361, 363, 362, 361,
+	363, 362, 361, 363, 362, 361, 363, 362,
+	363, 361, 363, 362, 361, 363, 362, 361,
+	363, 362, 794, 414, 362, 363, 362, 795,
+	414, 362, 363, 363, 362, 796, 414, 362,
+	363, 363, 362, 694, 695, 797, 798, 799,
+	800, 801, 802, 695, 694, 695, 694, 803,
+	573, 694, 804, 805, 695, 694, 806, 414,
+	807, 414, 808, 809, 810, 811, 695, 812,
+	813, 694, 695, 694, 804, 592, 573, 414,
+	592, 362, 522, 363, 522, 363, 362, 363,
+	522, 363, 522, 362, 522, 363, 522, 363,
+	522, 362, 814, 362, 522, 619, 620, 621,
+	622, 623, 624, 625, 626, 815, 362, 816,
+	717, 362, 522, 362, 363, 522, 522, 363,
+	522, 363, 522, 362, 363, 522, 362, 363,
+	362, 522, 363, 362, 522, 363, 522, 362,
+	363, 362, 522, 363, 522, 362, 363, 522,
+	362, 363, 522, 363, 362, 363, 522, 363,
+	522, 363, 362, 363, 522, 363, 522, 362,
+	363, 363, 522, 362, 363, 522, 362, 779,
+	362, 817, 779, 362, 419, 414, 794, 414,
+	362, 361, 3, 1, 361, 3, 1, 818,
+	819, 820, 54, 1, 3, 361, 3, 1,
+	361, 3, 1, 361, 3, 1, 821, 822,
+	823, 824, 825, 54, 1, 55, 826, 828,
+	827, 827, 828, 828, 827, 828, 828, 827,
+	828, 828, 828, 827, 828, 827, 828, 828,
+	827, 828, 828, 828, 828, 827, 828, 828,
+	827, 827, 828, 828, 827, 828, 828, 827,
+	829, 830, 831, 832, 833, 834, 836, 837,
+	838, 840, 841, 842, 843, 844, 845, 846,
+	847, 848, 849, 850, 851, 852, 853, 854,
+	855, 856, 857, 835, 839, 827, 828, 828,
+	828, 828, 827, 828, 827, 828, 827, 828,
+	828, 827, 827, 827, 828, 827, 827, 827,
+	828, 828, 828, 828, 827, 827, 827, 827,
+	827, 827, 827, 828, 827, 827, 827, 827,
+	827, 827, 828, 827, 827, 827, 827, 827,
+	828, 828, 828, 828, 827, 828, 828, 828,
+	828, 828, 827, 828, 828, 827, 828, 828,
+	828, 828, 827, 828, 828, 828, 827, 827,
+	827, 827, 827, 827, 828, 828, 828, 828,
+	828, 828, 828, 827, 828, 828, 828, 827,
+	827, 827, 827, 827, 827, 828, 828, 827,
+	828, 828, 828, 828, 828, 827, 828, 828,
+	827, 828, 827, 828, 828, 827, 828, 827,
+	828, 828, 828, 828, 828, 827, 828, 827,
+	828, 828, 828, 828, 827, 828, 827, 858,
+	859, 860, 861, 862, 863, 864, 865, 866,
+	867, 868, 869, 870, 871, 872, 873, 874,
+	875, 876, 877, 878, 879, 827, 828, 828,
+	827, 828, 828, 828, 827, 828, 828, 828,
+	828, 827, 828, 827, 828, 828, 827, 828,
+	828, 827, 828, 827, 827, 827, 828, 828,
+	828, 827, 828, 828, 827, 828, 828, 827,
+	828, 827, 828, 828, 828, 828, 828, 827,
+	828, 827, 827, 828, 828, 828, 827, 827,
+	827, 828, 828, 828, 827, 828, 827, 828,
+	827, 828, 828, 828, 828, 828, 827, 828,
+	827, 880, 881, 882, 883, 884, 827, 828,
+	885, 827, 828, 828, 827, 886, 887, 881,
+	888, 889, 890, 891, 892, 893, 894, 895,
+	896, 879, 897, 898, 899, 900, 901, 902,
+	903, 904, 882, 883, 884, 827, 828, 885,
+	828, 827, 828, 827, 828, 827, 828, 828,
+	827, 828, 828, 827, 828, 828, 827, 828,
+	827, 828, 828, 828, 827, 828, 827, 828,
+	828, 827, 828, 828, 827, 828, 828, 828,
+	827, 828, 828, 827, 828, 827, 827, 827,
+	827, 827, 827, 827, 827, 828, 828, 828,
+	828, 828, 828, 828, 828, 827, 828, 828,
+	828, 828, 827, 828, 827, 828, 828, 827,
+	828, 828, 827, 828, 827, 828, 827, 828,
+	827, 905, 906, 907, 827, 828, 828, 827,
+	828, 827, 828, 828, 827, 908, 909, 910,
+	911, 912, 913, 914, 915, 916, 917, 918,
+	919, 920, 921, 922, 827, 828, 828, 827,
+	828, 827, 828, 827, 828, 828, 828, 828,
+	828, 827, 828, 828, 827, 827, 827, 827,
+	828, 828, 827, 828, 827, 828, 828, 827,
+	827, 827, 828, 828, 827, 828, 828, 828,
+	827, 828, 828, 828, 828, 827, 828, 828,
+	828, 827, 828, 828, 827, 923, 924, 909,
+	827, 828, 827, 828, 828, 827, 925, 926,
+	927, 928, 929, 930, 931, 932, 933, 827,
+	934, 935, 936, 937, 938, 939, 940, 941,
+	942, 943, 827, 828, 827, 828, 827, 828,
+	827, 828, 828, 828, 828, 828, 827, 828,
+	827, 828, 827, 828, 827, 828, 827, 828,
+	827, 828, 827, 944, 945, 946, 893, 947,
+	948, 949, 950, 951, 952, 953, 954, 955,
+	956, 957, 871, 958, 959, 960, 871, 961,
+	962, 963, 964, 965, 966, 967, 968, 969,
+	970, 971, 972, 973, 974, 975, 976, 977,
+	978, 827, 828, 828, 827, 827, 827, 827,
+	828, 827, 827, 828, 828, 828, 827, 828,
+	828, 827, 828, 828, 827, 827, 827, 827,
+	828, 828, 828, 827, 828, 827, 828, 827,
+	828, 828, 828, 827, 828, 828, 828, 828,
+	828, 828, 828, 827, 828, 827, 828, 828,
+	827, 827, 828, 828, 828, 827, 827, 827,
+	828, 827, 828, 828, 827, 828, 828, 827,
+	828, 827, 828, 828, 828, 827, 828, 828,
+	827, 828, 828, 828, 827, 828, 828, 828,
+	827, 828, 828, 827, 828, 827, 828, 828,
+	827, 828, 828, 827, 828, 828, 828, 828,
+	827, 827, 827, 828, 828, 828, 828, 827,
+	828, 827, 828, 828, 828, 828, 827, 828,
+	827, 979, 827, 828, 828, 827, 980, 981,
+	982, 983, 984, 827, 828, 827, 828, 827,
+	828, 828, 827, 827, 827, 828, 828, 828,
+	827, 985, 827, 828, 827, 986, 987, 827,
+	828, 828, 827, 827, 828, 988, 989, 990,
+	991, 992, 993, 827, 828, 828, 828, 827,
+	827, 827, 827, 828, 828, 827, 828, 828,
+	827, 827, 827, 828, 828, 828, 828, 827,
+	994, 995, 981, 996, 997, 997, 998, 999,
+	827, 828, 828, 828, 828, 828, 827, 828,
+	827, 828, 827, 828, 827, 828, 827, 828,
+	827, 1000, 1001, 1002, 1003, 1004, 1005, 1006,
+	1007, 1001, 1000, 1001, 1000, 1001, 879, 1000,
+	1008, 1009, 1001, 1000, 1010, 1011, 1012, 1013,
+	1014, 1015, 1001, 1016, 1017, 1000, 1001, 1000,
+	1008, 898, 879, 879, 898, 827, 827, 828,
+	828, 828, 827, 828, 828, 827, 828, 828,
+	828, 827, 827, 828, 828, 828, 828, 828,
+	828, 827, 828, 827, 827, 828, 828, 827,
+	827, 828, 828, 827, 828, 827, 828, 827,
+	828, 828, 827, 828, 828, 827, 828, 828,
+	827, 828, 828, 827, 1018, 827, 1019, 1001,
+	1000, 1020, 898, 827, 828, 827, 1021, 906,
+	827, 828, 827, 925, 926, 927, 928, 929,
+	930, 931, 932, 1022, 827, 1023, 827, 828,
+	827, 858, 859, 860, 821, 822, 1024, 861,
+	862, 863, 864, 865, 866, 867, 868, 869,
+	870, 871, 872, 873, 874, 875, 876, 877,
+	878, 879, 827, 1025, 826, 858, 859, 860,
+	1026, 823, 824, 861, 862, 863, 864, 865,
+	866, 867, 868, 869, 870, 871, 872, 873,
+	874, 875, 876, 877, 878, 879, 827, 1025,
+	827, 1027, 1025, 858, 859, 860, 1028, 824,
+	861, 862, 863, 864, 865, 866, 867, 868,
+	869, 870, 871, 872, 873, 874, 875, 876,
+	877, 878, 879, 827, 1027, 827, 55, 1027,
+	1029, 827, 1027, 827, 1030, 1031, 827, 1025,
+	827, 827, 1027, 827, 1025, 827, 1025, 908,
+	909, 910, 911, 912, 913, 914, 1032, 916,
+	917, 918, 919, 920, 921, 922, 1034, 1035,
+	1036, 1037, 1038, 1039, 1034, 1035, 1036, 1037,
+	1038, 1039, 1034, 1033, 1040, 827, 828, 826,
+	827, 1041, 1041, 1041, 1027, 827, 858, 859,
+	860, 1026, 1024, 861, 862, 863, 864, 865,
+	866, 867, 868, 869, 870, 871, 872, 873,
+	874, 875, 876, 877, 878, 879, 827, 1030,
+	1042, 827, 827, 1025, 1041, 1041, 1027, 1041,
+	1041, 1027, 1041, 1041, 1041, 1027, 1041, 1041,
+	1027, 1041, 1041, 1027, 1041, 1041, 827, 1027,
+	1027, 1036, 1037, 1038, 1039, 1033, 1034, 1036,
+	1037, 1038, 1039, 1033, 1034, 1036, 1037, 1038,
+	1039, 1033, 1034, 1036, 1037, 1038, 1039, 1033,
+	1034, 1036, 1037, 1038, 1039, 1033, 1034, 1036,
+	1037, 1038, 1039, 1033, 1034, 1036, 1037, 1038,
+	1039, 1033, 1034, 1036, 1037, 1038, 1039, 1033,
+	1034, 1036, 1037, 1038, 1039, 1033, 1034, 1035,
+	1040, 1037, 1038, 1039, 1033, 1034, 1035, 1037,
+	1038, 1039, 1033, 1034, 1035, 1037, 1038, 1039,
+	1033, 1034, 1035, 1037, 1038, 1039, 1033, 1034,
+	1035, 1037, 1038, 1039, 1033, 1034, 1035, 1037,
+	1038, 1039, 1033, 1034, 1035, 1037, 1038, 1039,
+	1033, 1034, 1035, 1037, 1038, 1039, 1033, 1034,
+	1035, 1037, 1038, 1039, 1033, 1034, 1035, 1036,
+	1040, 1038, 1039, 1033, 1034, 1035, 1036, 1038,
+	1039, 1033, 1034, 1035, 1036, 1038, 1039, 1033,
+	1034, 1035, 1036, 1038, 1039, 1033, 1034, 1035,
+	1036, 1038, 1043, 1042, 1037, 827, 1040, 1041,
+	827, 1025, 1027, 163, 3, 1, 1044, 1045,
+	1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053,
+	1054, 216, 1055, 235, 1056, 1057, 1058, 1059,
+	1060, 1061, 54, 1, 163, 1062, 164, 3,
+	163, 3, 163, 3, 1, 1062, 1063, 1063,
+	1062, 1062, 1063, 1062, 1062, 1063, 1062, 1062,
+	1062, 1063, 1062, 1063, 1062, 1062, 1063, 1062,
+	1062, 1062, 1062, 1063, 1062, 1062, 1063, 1063,
+	1062, 1062, 1063, 1062, 1062, 1063, 1064, 1065,
+	1066, 1067, 1068, 1069, 1071, 1072, 1073, 1075,
+	1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083,
+	1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091,
+	1092, 1070, 1074, 1063, 1062, 1062, 1062, 1062,
+	1063, 1062, 1063, 1062, 1063, 1062, 1062, 1063,
+	1063, 1063, 1062, 1063, 1063, 1063, 1062, 1062,
+	1062, 1062, 1063, 1063, 1063, 1063, 1063, 1063,
+	1063, 1062, 1063, 1063, 1063, 1063, 1063, 1063,
+	1062, 1063, 1063, 1063, 1063, 1063, 1062, 1062,
+	1062, 1062, 1063, 1062, 1062, 1062, 1062, 1062,
+	1063, 1062, 1062, 1063, 1062, 1062, 1062, 1062,
+	1063, 1062, 1062, 1062, 1063, 1063, 1063, 1063,
+	1063, 1063, 1062, 1062, 1062, 1062, 1062, 1062,
+	1062, 1063, 1062, 1062, 1062, 1063, 1063, 1063,
+	1063, 1063, 1063, 1062, 1062, 1063, 1062, 1062,
+	1062, 1062, 1062, 1063, 1062, 1062, 1063, 1062,
+	1063, 1062, 1062, 1063, 1062, 1063, 1062, 1062,
+	1062, 1062, 1062, 1063, 1062, 1063, 1062, 1062,
+	1062, 1062, 1063, 1062, 1063, 1093, 1094, 1095,
+	1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103,
+	1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111,
+	1112, 1113, 1114, 1063, 1062, 1062, 1063, 1062,
+	1062, 1062, 1063, 1062, 1062, 1062, 1062, 1063,
+	1062, 1063, 1062, 1062, 1063, 1062, 1062, 1063,
+	1062, 1063, 1063, 1063, 1062, 1062, 1062, 1063,
+	1062, 1062, 1063, 1062, 1062, 1063, 1062, 1063,
+	1062, 1062, 1062, 1062, 1062, 1063, 1062, 1063,
+	1063, 1062, 1062, 1062, 1063, 1063, 1063, 1062,
+	1062, 1062, 1063, 1062, 1063, 1062, 1063, 1062,
+	1062, 1062, 1062, 1062, 1063, 1062, 1063, 1115,
+	1116, 1117, 1118, 1119, 1063, 1062, 1063, 1062,
+	1063, 1062, 1063, 1062, 1063, 1062, 1063, 1120,
+	1121, 1063, 1062, 1063, 1062, 1063, 1122, 1123,
+	1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131,
+	1132, 1133, 1134, 1135, 1136, 1063, 1062, 1062,
+	1063, 1062, 1063, 1062, 1063, 1062, 1062, 1062,
+	1062, 1062, 1063, 1062, 1062, 1063, 1063, 1063,
+	1063, 1062, 1062, 1063, 1062, 1063, 1062, 1062,
+	1063, 1063, 1063, 1062, 1062, 1063, 1062, 1062,
+	1062, 1063, 1062, 1062, 1062, 1062, 1063, 1062,
+	1062, 1062, 1063, 1062, 1062, 1063, 1137, 1138,
+	1123, 1063, 1062, 1063, 1062, 1062, 1063, 1139,
+	1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147,
+	1063, 1148, 1149, 1150, 1151, 1152, 1153, 1154,
+	1155, 1156, 1157, 1063, 1062, 1063, 1062, 1063,
+	1062, 1063, 1062, 1062, 1062, 1062, 1062, 1063,
+	1062, 1063, 1062, 1063, 1062, 1063, 1062, 1063,
+	1062, 1063, 1062, 1063, 1158, 1159, 1160, 1161,
+	1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169,
+	1170, 1171, 1172, 1106, 1173, 1174, 1175, 1106,
+	1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183,
+	1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191,
+	1192, 1193, 1063, 1062, 1062, 1063, 1063, 1063,
+	1063, 1062, 1063, 1063, 1062, 1062, 1063, 1062,
+	1062, 1063, 1062, 1062, 1063, 1062, 1062, 1063,
+	1063, 1063, 1063, 1062, 1062, 1062, 1063, 1062,
+	1063, 1062, 1063, 1062, 1062, 1062, 1063, 1062,
+	1062, 1062, 1062, 1062, 1062, 1062, 1063, 1062,
+	1063, 1062, 1062, 1063, 1063, 1062, 1062, 1062,
+	1063, 1063, 1063, 1062, 1063, 1062, 1062, 1063,
+	1062, 1062, 1063, 1062, 1063, 1062, 1062, 1062,
+	1063, 1062, 1062, 1063, 1062, 1062, 1062, 1063,
+	1062, 1062, 1062, 1063, 1062, 1062, 1063, 1062,
+	1063, 1062, 1062, 1063, 1062, 1062, 1063, 1062,
+	1062, 1062, 1062, 1063, 1063, 1063, 1062, 1062,
+	1062, 1062, 1063, 1062, 1063, 1062, 1062, 1062,
+	1062, 1063, 1062, 1063, 1194, 1063, 1062, 1062,
+	1063, 1195, 1196, 1197, 1198, 1199, 1063, 1062,
+	1063, 1062, 1063, 1062, 1062, 1063, 1063, 1063,
+	1062, 1062, 1062, 1063, 1200, 1063, 1062, 1063,
+	1201, 1202, 1063, 1062, 1062, 1063, 1063, 1062,
+	1203, 1204, 1205, 1206, 1207, 1208, 1063, 1062,
+	1062, 1062, 1063, 1063, 1063, 1063, 1062, 1062,
+	1063, 1062, 1062, 1063, 1063, 1063, 1062, 1062,
+	1062, 1062, 1063, 1209, 1210, 1196, 1211, 1212,
+	1212, 1213, 1214, 1063, 1062, 1062, 1062, 1062,
+	1062, 1063, 1062, 1063, 1062, 1063, 1062, 1063,
+	1062, 1063, 1062, 1063, 1215, 1063, 1062, 1063,
+	1216, 1063, 1217, 1218, 1219, 1221, 1220, 1063,
+	1062, 1063, 1063, 1062, 1062, 164, 3, 163,
+	3, 1, 164, 164, 3, 1, 3, 164,
+	3, 164, 3, 1, 164, 3, 164, 3,
+	1, 164, 3, 1, 164, 3, 164, 3,
+	164, 3, 1, 164, 3, 1, 164, 3,
+	164, 3, 1, 164, 3, 3, 164, 1,
+	3, 3, 164, 1, 164, 3, 164, 1,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	1, 164, 164, 164, 164, 164, 3, 3,
+	164, 3, 164, 3, 1, 164, 164, 164,
+	3, 164, 3, 1, 3, 164, 3, 1,
+	3, 164, 3, 164, 3, 1, 164, 164,
+	3, 1, 1222, 1223, 54, 1, 164, 164,
+	3, 1, 164, 164, 3, 1, 54, 1,
+	1224, 1226, 1227, 1228, 1229, 1230, 1231, 1226,
+	1227, 1228, 1229, 1230, 1231, 1226, 54, 1225,
+	1040, 1, 3, 826, 3, 1, 1025, 1025,
+	1025, 1027, 1, 1025, 1025, 1027, 1025, 1025,
+	1027, 1025, 1025, 1025, 1027, 1025, 1025, 1027,
+	1025, 1025, 1027, 1025, 1025, 1, 1027, 1228,
+	1229, 1230, 1231, 1225, 1226, 1228, 1229, 1230,
+	1231, 1225, 1226, 1228, 1229, 1230, 1231, 1225,
+	1226, 1228, 1229, 1230, 1231, 1225, 1226, 1228,
+	1229, 1230, 1231, 1225, 1226, 1228, 1229, 1230,
+	1231, 1225, 1226, 1228, 1229, 1230, 1231, 1225,
+	1226, 1228, 1229, 1230, 1231, 1225, 1226, 1228,
+	1229, 1230, 1231, 1225, 1226, 1227, 1040, 1229,
+	1230, 1231, 1225, 1226, 1227, 1229, 1230, 1231,
+	1225, 1226, 1227, 1229, 1230, 1231, 1225, 1226,
+	1227, 1229, 1230, 1231, 1225, 1226, 1227, 1229,
+	1230, 1231, 1225, 1226, 1227, 1229, 1230, 1231,
+	1225, 1226, 1227, 1229, 1230, 1231, 1225, 1226,
+	1227, 1229, 1230, 1231, 1225, 1226, 1227, 1229,
+	1230, 1231, 1225, 1226, 1227, 1228, 1040, 1230,
+	1231, 1225, 1226, 1227, 1228, 1230, 1231, 1225,
+	1226, 1227, 1228, 1230, 1231, 1225, 1226, 1227,
+	1228, 1230, 1231, 1225, 1226, 1227, 1228, 1230,
+	1232, 1233, 1229, 54, 1, 1040, 1025, 3,
+	1025, 1027, 3, 1027, 3, 1, 1025, 1234,
+	1235, 54, 1, 163, 3, 1, 3, 3,
+	163, 3, 1, 1237, 1238, 1239, 1240, 1241,
+	1236, 1, 1242, 1243, 1244, 1245, 1246, 1247,
+	1248, 1249, 1250, 54, 1, 361, 3, 1,
+	361, 3, 1, 3, 361, 3, 1, 361,
+	3, 1, 361, 3, 1, 361, 3, 1,
+	3, 361, 3, 1, 361, 3, 1, 361,
+	3, 1, 1251, 54, 1, 3, 163, 1,
+	1252, 54, 1, 3, 163, 3, 1, 1253,
+	54, 1, 3, 163, 3, 1, 337, 338,
+	1254, 1255, 1256, 1257, 1258, 1259, 338, 337,
+	338, 337, 1260, 216, 337, 1261, 1262, 338,
+	337, 1263, 54, 1264, 54, 1265, 1266, 1267,
+	1268, 338, 1269, 1270, 337, 338, 337, 1261,
+	235, 216, 54, 235, 1, 164, 3, 164,
+	3, 1, 3, 164, 3, 164, 1, 164,
+	3, 164, 3, 164, 1, 1271, 1, 164,
+	1273, 1272, 1272, 1273, 1273, 1272, 1273, 1273,
+	1272, 1273, 1273, 1273, 1272, 1273, 1272, 1273,
+	1273, 1272, 1273, 1273, 1273, 1273, 1272, 1273,
+	1273, 1272, 1272, 1273, 1273, 1272, 1273, 1273,
+	1272, 1274, 1275, 1276, 1277, 1278, 1279, 1281,
+	1282, 1283, 1285, 1286, 1287, 1288, 1289, 1290,
+	1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298,
+	1299, 1300, 1301, 1302, 1280, 1284, 1272, 1273,
+	1273, 1273, 1273, 1272, 1273, 1272, 1273, 1272,
+	1273, 1273, 1272, 1272, 1272, 1273, 1272, 1272,
+	1272, 1273, 1273, 1273, 1273, 1272, 1272, 1272,
+	1272, 1272, 1272, 1272, 1273, 1272, 1272, 1272,
+	1272, 1272, 1272, 1273, 1272, 1272, 1272, 1272,
+	1272, 1273, 1273, 1273, 1273, 1272, 1273, 1273,
+	1273, 1273, 1273, 1272, 1273, 1273, 1272, 1273,
+	1273, 1273, 1273, 1272, 1273, 1273, 1273, 1272,
+	1272, 1272, 1272, 1272, 1272, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1272, 1273, 1273, 1273,
+	1272, 1272, 1272, 1272, 1272, 1272, 1273, 1273,
+	1272, 1273, 1273, 1273, 1273, 1273, 1272, 1273,
+	1273, 1272, 1273, 1272, 1273, 1273, 1272, 1273,
+	1272, 1273, 1273, 1273, 1273, 1273, 1272, 1273,
+	1272, 1273, 1273, 1273, 1273, 1272, 1273, 1272,
+	1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310,
+	1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318,
+	1319, 1320, 1321, 1322, 1323, 1324, 1272, 1273,
+	1273, 1272, 1273, 1273, 1273, 1272, 1273, 1273,
+	1273, 1273, 1272, 1273, 1272, 1273, 1273, 1272,
+	1273, 1273, 1272, 1273, 1272, 1272, 1272, 1273,
+	1273, 1273, 1272, 1273, 1273, 1272, 1273, 1273,
+	1272, 1273, 1272, 1273, 1273, 1273, 1273, 1273,
+	1272, 1273, 1272, 1272, 1273, 1273, 1273, 1272,
+	1272, 1272, 1273, 1273, 1273, 1272, 1273, 1272,
+	1273, 1272, 1273, 1273, 1273, 1273, 1273, 1272,
+	1273, 1272, 1325, 1326, 1327, 1328, 1329, 1272,
+	1273, 1330, 1272, 1273, 1273, 1272, 1331, 1332,
+	1326, 1333, 1334, 1335, 1336, 1337, 1338, 1339,
+	1340, 1341, 1324, 1342, 1343, 1344, 1345, 1346,
+	1347, 1348, 1349, 1327, 1328, 1329, 1272, 1273,
+	1330, 1273, 1272, 1273, 1272, 1273, 1272, 1273,
+	1273, 1272, 1273, 1273, 1272, 1273, 1273, 1272,
+	1273, 1272, 1273, 1273, 1273, 1272, 1273, 1272,
+	1273, 1273, 1272, 1273, 1273, 1272, 1273, 1273,
+	1273, 1272, 1273, 1273, 1272, 1273, 1272, 1272,
+	1272, 1272, 1272, 1272, 1272, 1272, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1272, 1273,
+	1273, 1273, 1273, 1272, 1273, 1272, 1273, 1273,
+	1272, 1273, 1273, 1272, 1273, 1272, 1273, 1272,
+	1273, 1272, 1350, 1351, 1352, 1272, 1273, 1273,
+	1272, 1273, 1272, 1273, 1273, 1272, 1353, 1354,
+	1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362,
+	1363, 1364, 1365, 1366, 1367, 1272, 1273, 1273,
+	1272, 1273, 1272, 1273, 1272, 1273, 1273, 1273,
+	1273, 1273, 1272, 1273, 1273, 1272, 1272, 1272,
+	1272, 1273, 1273, 1272, 1273, 1272, 1273, 1273,
+	1272, 1272, 1272, 1273, 1273, 1272, 1273, 1273,
+	1273, 1272, 1273, 1273, 1273, 1273, 1272, 1273,
+	1273, 1273, 1272, 1273, 1273, 1272, 1368, 1369,
+	1354, 1272, 1273, 1272, 1273, 1273, 1272, 1370,
+	1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378,
+	1272, 1379, 1380, 1381, 1382, 1383, 1384, 1385,
+	1386, 1387, 1388, 1272, 1273, 1272, 1273, 1272,
+	1273, 1272, 1273, 1273, 1273, 1273, 1273, 1272,
+	1273, 1272, 1273, 1272, 1273, 1272, 1273, 1272,
+	1273, 1272, 1273, 1272, 1389, 1390, 1391, 1338,
+	1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399,
+	1400, 1401, 1402, 1316, 1403, 1404, 1405, 1316,
+	1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413,
+	1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421,
+	1422, 1423, 1272, 1273, 1273, 1272, 1272, 1272,
+	1272, 1273, 1272, 1272, 1273, 1273, 1273, 1272,
+	1273, 1273, 1272, 1273, 1273, 1272, 1272, 1272,
+	1272, 1273, 1273, 1273, 1272, 1273, 1272, 1273,
+	1272, 1273, 1273, 1273, 1272, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1272, 1273, 1272, 1273,
+	1273, 1272, 1272, 1273, 1273, 1273, 1272, 1272,
+	1272, 1273, 1272, 1273, 1273, 1272, 1273, 1273,
+	1272, 1273, 1272, 1273, 1273, 1273, 1272, 1273,
+	1273, 1272, 1273, 1273, 1273, 1272, 1273, 1273,
+	1273, 1272, 1273, 1273, 1272, 1273, 1272, 1273,
+	1273, 1272, 1273, 1273, 1272, 1273, 1273, 1273,
+	1273, 1272, 1272, 1272, 1273, 1273, 1273, 1273,
+	1272, 1273, 1272, 1273, 1273, 1273, 1273, 1272,
+	1273, 1272, 1424, 1272, 1273, 1273, 1272, 1425,
+	1426, 1427, 1428, 1429, 1272, 1273, 1272, 1273,
+	1272, 1273, 1273, 1272, 1272, 1272, 1273, 1273,
+	1273, 1272, 1430, 1272, 1273, 1272, 1431, 1432,
+	1272, 1273, 1273, 1272, 1272, 1273, 1433, 1434,
+	1435, 1436, 1437, 1438, 1272, 1273, 1273, 1273,
+	1272, 1272, 1272, 1272, 1273, 1273, 1272, 1273,
+	1273, 1272, 1272, 1272, 1273, 1273, 1273, 1273,
+	1272, 1439, 1440, 1426, 1441, 1442, 1442, 1443,
+	1444, 1272, 1273, 1273, 1273, 1273, 1273, 1272,
+	1273, 1272, 1273, 1272, 1273, 1272, 1273, 1272,
+	1273, 1272, 1445, 1446, 1447, 1448, 1449, 1450,
+	1451, 1452, 1446, 1445, 1446, 1445, 1446, 1324,
+	1445, 1453, 1454, 1446, 1445, 1455, 1456, 1457,
+	1458, 1459, 1460, 1446, 1461, 1462, 1445, 1446,
+	1445, 1453, 1343, 1324, 1324, 1343, 1272, 1272,
+	1273, 1273, 1273, 1272, 1273, 1273, 1272, 1273,
+	1273, 1273, 1272, 1272, 1273, 1273, 1273, 1273,
+	1273, 1273, 1272, 1273, 1272, 1272, 1273, 1273,
+	1272, 1272, 1273, 1273, 1272, 1273, 1272, 1273,
+	1272, 1273, 1273, 1272, 1273, 1273, 1272, 1273,
+	1273, 1272, 1273, 1273, 1272, 1463, 1272, 1464,
+	1446, 1445, 1465, 1343, 1272, 1273, 1272, 1466,
+	1351, 1272, 1273, 1272, 1370, 1371, 1372, 1373,
+	1374, 1375, 1376, 1377, 1467, 1272, 1468, 1272,
+	1273, 1272, 1370, 1371, 1372, 1373, 1374, 1375,
+	1376, 1377, 1469, 1272, 1470, 1468, 1272, 1273,
+	1272, 3, 164, 164, 3, 164, 3, 164,
+	1, 3, 164, 1, 3, 1, 164, 3,
+	1, 164, 3, 164, 1, 3, 1, 164,
+	3, 164, 1, 3, 164, 1, 3, 164,
+	3, 1, 3, 164, 3, 164, 3, 1,
+	3, 164, 3, 164, 1, 3, 3, 164,
+	1, 3, 164, 1, 1236, 1, 1471, 1236,
+	1, 1472, 1473, 1474, 1475, 1474, 54, 1476,
+	1, 163, 3, 1, 1, 163, 1, 163,
+	3, 163, 1, 163, 1, 1478, 1477, 1481,
+	1482, 1483, 1484, 1485, 1486, 1487, 1488, 1490,
+	1491, 1492, 1493, 1494, 1495, 1497, 1477, 1,
+	1480, 1489, 1496, 1, 1479, 160, 162, 1499,
+	1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507,
+	1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515,
+	1516, 1498, 337, 357, 1518, 1519, 1520, 1521,
+	1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529,
+	1530, 1531, 1532, 1533, 1534, 1535, 1517, 1536,
+	337, 357, 1518, 1519, 1520, 1521, 1522, 1523,
+	1524, 1525, 1526, 1527, 1528, 1529, 1537, 1538,
+	1532, 1533, 1539, 1535, 1517, 1541, 1542, 1543,
+	1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551,
+	1552, 1553, 1554, 1556, 363, 414, 779, 1555,
+	1540, 519, 521, 1557, 1558, 1559, 1560, 1561,
+	1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569,
+	1570, 1571, 1572, 1573, 1574, 1540, 694, 714,
+	1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582,
+	1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590,
+	1591, 1592, 1540, 1593, 694, 714, 1575, 1576,
+	1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584,
+	1585, 1586, 1594, 1595, 1589, 1590, 1596, 1592,
+	1540, 694, 714, 1575, 1576, 1577, 1578, 1579,
+	1580, 1581, 1582, 1583, 1584, 1585, 1597, 1587,
+	1588, 1598, 1599, 1600, 1601, 1590, 1591, 1592,
+	1540, 694, 714, 1575, 1576, 1577, 1578, 1579,
+	1580, 1581, 1582, 1583, 1584, 1585, 1602, 1587,
+	1588, 1589, 1603, 1590, 1591, 1592, 1540, 694,
+	714, 1575, 1576, 1577, 1578, 1579, 1580, 1581,
+	1582, 1583, 1584, 1585, 1604, 1587, 1588, 1589,
+	1605, 1590, 1591, 1592, 1540, 694, 714, 1575,
+	1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583,
+	1584, 1585, 1606, 1587, 1588, 1589, 1607, 1590,
+	1591, 1592, 1540, 694, 714, 1575, 1576, 1577,
+	1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585,
+	1586, 1587, 1588, 1589, 1590, 1608, 1592, 1540,
+	1000, 1020, 1610, 1611, 1612, 1613, 1614, 1615,
+	1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623,
+	1624, 1625, 1626, 1627, 1628, 1629, 1630, 1609,
+	1000, 1020, 1610, 1611, 1612, 1613, 1614, 1615,
+	1616, 1617, 1618, 1619, 1620, 1631, 1622, 1623,
+	1632, 1628, 1629, 1630, 1609, 1633, 1000, 1020,
+	1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617,
+	1618, 1619, 1620, 1631, 1634, 1635, 1632, 1628,
+	1636, 1630, 1609, 1000, 1020, 1610, 1611, 1612,
+	1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620,
+	1637, 1622, 1623, 1632, 1638, 1628, 1629, 1630,
+	1609, 1000, 1020, 1610, 1611, 1612, 1613, 1614,
+	1615, 1616, 1617, 1618, 1619, 1620, 1639, 1622,
+	1623, 1632, 1640, 1628, 1629, 1630, 1609, 1000,
+	1020, 1610, 1611, 1612, 1613, 1614, 1615, 1616,
+	1617, 1618, 1619, 1620, 1641, 1622, 1623, 1632,
+	1642, 1628, 1629, 1630, 1609, 1219, 1221, 1644,
+	1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652,
+	1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660,
+	1661, 1643, 1445, 1465, 1663, 1664, 1665, 1666,
+	1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674,
+	1675, 1676, 1677, 1678, 1679, 1680, 1662, 1445,
+	1465, 1663, 1664, 1665, 1666, 1667, 1668, 1669,
+	1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677,
+	1678, 1681, 1680, 1662, 1682, 1445, 1465, 1663,
+	1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671,
+	1672, 1673, 1674, 1683, 1684, 1677, 1678, 1685,
+	1680, 1662,
+}
+
+var _graphclust_trans_targs []int16 = []int16{
+	1645, 0, 1645, 1646, 15, 16, 17, 18,
+	19, 20, 21, 22, 23, 24, 25, 26,
+	27, 28, 29, 30, 31, 32, 33, 34,
+	35, 36, 37, 38, 39, 40, 41, 42,
+	43, 45, 46, 47, 48, 49, 50, 51,
+	52, 53, 54, 55, 56, 57, 58, 59,
+	60, 61, 62, 63, 64, 65, 66, 1645,
+	68, 69, 70, 71, 72, 74, 75, 77,
+	78, 79, 80, 81, 82, 83, 84, 85,
+	86, 87, 88, 89, 90, 91, 93, 94,
+	96, 107, 144, 146, 152, 154, 157, 164,
+	171, 97, 98, 99, 100, 101, 102, 103,
+	104, 105, 106, 108, 109, 110, 111, 112,
+	113, 114, 115, 116, 117, 118, 119, 120,
+	121, 122, 123, 124, 125, 126, 127, 128,
+	129, 130, 131, 132, 133, 134, 135, 136,
+	137, 138, 139, 140, 141, 142, 143, 145,
+	147, 148, 149, 150, 151, 153, 155, 156,
+	158, 159, 160, 161, 162, 163, 165, 166,
+	167, 168, 169, 170, 172, 174, 175, 176,
+	2, 177, 3, 1645, 1647, 1645, 192, 193,
+	194, 195, 196, 197, 198, 199, 200, 201,
+	202, 203, 204, 205, 206, 207, 208, 209,
+	210, 211, 212, 213, 214, 215, 216, 217,
+	218, 219, 220, 222, 223, 224, 225, 226,
+	227, 228, 229, 230, 231, 232, 233, 234,
+	235, 236, 237, 238, 239, 240, 241, 242,
+	243, 245, 250, 268, 269, 270, 1648, 248,
+	249, 251, 252, 253, 254, 255, 256, 257,
+	258, 259, 260, 261, 262, 263, 264, 265,
+	266, 267, 272, 273, 274, 276, 277, 278,
+	279, 280, 281, 282, 283, 284, 285, 286,
+	287, 288, 289, 290, 292, 293, 295, 306,
+	342, 344, 350, 352, 355, 362, 369, 296,
+	297, 298, 299, 300, 301, 302, 303, 304,
+	305, 307, 308, 309, 310, 311, 312, 313,
+	314, 315, 316, 317, 318, 319, 320, 321,
+	322, 323, 324, 325, 326, 327, 328, 329,
+	330, 331, 332, 333, 334, 335, 336, 337,
+	338, 339, 340, 341, 343, 345, 346, 347,
+	348, 349, 351, 353, 354, 356, 357, 358,
+	359, 360, 361, 363, 364, 365, 366, 367,
+	368, 179, 370, 371, 372, 373, 374, 375,
+	376, 377, 378, 379, 380, 381, 382, 383,
+	384, 385, 386, 388, 389, 180, 391, 393,
+	394, 1649, 1645, 1650, 409, 410, 411, 412,
+	413, 414, 415, 416, 417, 418, 419, 420,
+	421, 422, 423, 424, 425, 426, 427, 428,
+	429, 430, 431, 432, 433, 434, 435, 436,
+	437, 439, 440, 441, 442, 443, 444, 445,
+	446, 447, 448, 449, 450, 451, 452, 453,
+	454, 455, 456, 457, 458, 459, 460, 462,
+	463, 464, 465, 466, 468, 469, 471, 472,
+	473, 474, 475, 476, 477, 478, 479, 480,
+	481, 482, 483, 484, 485, 487, 488, 490,
+	501, 538, 540, 546, 548, 551, 558, 565,
+	491, 492, 493, 494, 495, 496, 497, 498,
+	499, 500, 502, 503, 504, 505, 506, 507,
+	508, 509, 510, 511, 512, 513, 514, 515,
+	516, 517, 518, 519, 520, 521, 522, 523,
+	524, 525, 526, 527, 528, 529, 530, 531,
+	532, 533, 534, 535, 536, 537, 539, 541,
+	542, 543, 544, 545, 547, 549, 550, 552,
+	553, 554, 555, 556, 557, 559, 560, 561,
+	562, 563, 564, 566, 568, 569, 570, 396,
+	571, 397, 1651, 586, 587, 588, 589, 590,
+	591, 592, 593, 594, 595, 596, 597, 598,
+	599, 600, 601, 602, 603, 604, 605, 606,
+	607, 608, 609, 610, 611, 612, 613, 614,
+	616, 617, 618, 619, 620, 621, 622, 623,
+	624, 625, 626, 627, 628, 629, 630, 631,
+	632, 633, 634, 635, 636, 637, 639, 644,
+	662, 663, 664, 1652, 642, 643, 645, 646,
+	647, 648, 649, 650, 651, 652, 653, 654,
+	655, 656, 657, 658, 659, 660, 661, 666,
+	667, 668, 670, 671, 672, 673, 674, 675,
+	676, 677, 678, 679, 680, 681, 682, 683,
+	684, 686, 687, 689, 700, 736, 738, 744,
+	746, 749, 756, 763, 690, 691, 692, 693,
+	694, 695, 696, 697, 698, 699, 701, 702,
+	703, 704, 705, 706, 707, 708, 709, 710,
+	711, 712, 713, 714, 715, 716, 717, 718,
+	719, 720, 721, 722, 723, 724, 725, 726,
+	727, 728, 729, 730, 731, 732, 733, 734,
+	735, 737, 739, 740, 741, 742, 743, 745,
+	747, 748, 750, 751, 752, 753, 754, 755,
+	757, 758, 759, 760, 761, 762, 573, 764,
+	765, 766, 767, 768, 769, 770, 771, 772,
+	773, 774, 775, 776, 777, 778, 779, 780,
+	782, 783, 574, 785, 787, 788, 793, 794,
+	795, 797, 799, 802, 805, 829, 1653, 811,
+	1654, 801, 1655, 804, 807, 809, 810, 813,
+	814, 818, 819, 820, 821, 822, 823, 824,
+	1656, 817, 828, 831, 832, 833, 834, 835,
+	836, 837, 838, 839, 840, 841, 842, 843,
+	844, 845, 846, 847, 848, 850, 851, 854,
+	855, 856, 857, 858, 859, 860, 861, 865,
+	866, 868, 869, 852, 871, 881, 883, 885,
+	887, 872, 873, 874, 875, 876, 877, 878,
+	879, 880, 882, 884, 886, 888, 889, 890,
+	891, 895, 896, 897, 898, 899, 900, 901,
+	902, 903, 904, 905, 906, 907, 1657, 893,
+	894, 910, 914, 915, 916, 918, 1136, 1139,
+	1142, 1166, 1658, 1645, 1659, 932, 933, 934,
+	935, 936, 937, 938, 939, 940, 941, 942,
+	943, 944, 945, 946, 947, 948, 949, 950,
+	951, 952, 953, 954, 955, 956, 957, 958,
+	959, 960, 962, 963, 964, 965, 966, 967,
+	968, 969, 970, 971, 972, 973, 974, 975,
+	976, 977, 978, 979, 980, 981, 982, 983,
+	985, 990, 1008, 1009, 1010, 1660, 988, 989,
+	991, 992, 993, 994, 995, 996, 997, 998,
+	999, 1000, 1001, 1002, 1003, 1004, 1005, 1006,
+	1007, 1012, 1013, 1014, 1016, 1017, 1018, 1019,
+	1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027,
+	1028, 1029, 1030, 1032, 1033, 1035, 1046, 1082,
+	1084, 1090, 1092, 1095, 1102, 1109, 1036, 1037,
+	1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045,
+	1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054,
+	1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062,
+	1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070,
+	1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078,
+	1079, 1080, 1081, 1083, 1085, 1086, 1087, 1088,
+	1089, 1091, 1093, 1094, 1096, 1097, 1098, 1099,
+	1100, 1101, 1103, 1104, 1105, 1106, 1107, 1108,
+	919, 1110, 1111, 1112, 1113, 1114, 1115, 1116,
+	1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124,
+	1125, 1126, 1128, 1129, 920, 1131, 1133, 1134,
+	1148, 1661, 1138, 1662, 1141, 1144, 1146, 1147,
+	1150, 1151, 1155, 1156, 1157, 1158, 1159, 1160,
+	1161, 1663, 1154, 1165, 1168, 1345, 1346, 1347,
+	1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355,
+	1356, 1357, 1358, 1359, 1360, 1361, 1664, 1645,
+	1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189,
+	1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197,
+	1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205,
+	1206, 1207, 1208, 1209, 1210, 1212, 1213, 1214,
+	1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222,
+	1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230,
+	1231, 1232, 1233, 1235, 1236, 1237, 1238, 1239,
+	1241, 1242, 1244, 1245, 1246, 1247, 1248, 1249,
+	1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257,
+	1258, 1260, 1261, 1263, 1274, 1311, 1313, 1319,
+	1321, 1324, 1331, 1338, 1264, 1265, 1266, 1267,
+	1268, 1269, 1270, 1271, 1272, 1273, 1275, 1276,
+	1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284,
+	1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292,
+	1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300,
+	1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308,
+	1309, 1310, 1312, 1314, 1315, 1316, 1317, 1318,
+	1320, 1322, 1323, 1325, 1326, 1327, 1328, 1329,
+	1330, 1332, 1333, 1334, 1335, 1336, 1337, 1339,
+	1341, 1342, 1343, 1169, 1344, 1170, 1363, 1364,
+	1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374,
+	1378, 1379, 1381, 1382, 1365, 1384, 1394, 1396,
+	1398, 1400, 1385, 1386, 1387, 1388, 1389, 1390,
+	1391, 1392, 1393, 1395, 1397, 1399, 1401, 1402,
+	1403, 1404, 1624, 1625, 1626, 1627, 1628, 1629,
+	1630, 1631, 1632, 1633, 1634, 1635, 1636, 1665,
+	1645, 1666, 1418, 1419, 1420, 1421, 1422, 1423,
+	1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431,
+	1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439,
+	1440, 1441, 1442, 1443, 1444, 1445, 1446, 1448,
+	1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456,
+	1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464,
+	1465, 1466, 1467, 1468, 1469, 1471, 1476, 1494,
+	1495, 1496, 1667, 1474, 1475, 1477, 1478, 1479,
+	1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487,
+	1488, 1489, 1490, 1491, 1492, 1493, 1498, 1499,
+	1500, 1502, 1503, 1504, 1505, 1506, 1507, 1508,
+	1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516,
+	1518, 1519, 1521, 1532, 1568, 1570, 1576, 1578,
+	1581, 1588, 1595, 1522, 1523, 1524, 1525, 1526,
+	1527, 1528, 1529, 1530, 1531, 1533, 1534, 1535,
+	1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543,
+	1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551,
+	1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559,
+	1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567,
+	1569, 1571, 1572, 1573, 1574, 1575, 1577, 1579,
+	1580, 1582, 1583, 1584, 1585, 1586, 1587, 1589,
+	1590, 1591, 1592, 1593, 1594, 1405, 1596, 1597,
+	1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605,
+	1606, 1607, 1608, 1609, 1610, 1611, 1612, 1614,
+	1615, 1406, 1617, 1619, 1620, 1622, 1623, 1639,
+	1640, 1641, 1642, 1643, 1644, 1645, 1, 1646,
+	66, 178, 395, 911, 912, 913, 917, 1167,
+	1362, 1365, 1366, 1375, 1376, 1377, 1380, 1383,
+	1637, 1638, 1645, 4, 5, 6, 7, 8,
+	9, 10, 11, 12, 13, 14, 44, 67,
+	73, 76, 92, 95, 173, 1645, 181, 182,
+	183, 184, 185, 186, 187, 188, 189, 190,
+	191, 221, 244, 390, 275, 291, 392, 387,
+	246, 247, 271, 294, 1645, 572, 789, 790,
+	791, 792, 796, 830, 849, 853, 862, 863,
+	864, 867, 870, 908, 909, 398, 399, 400,
+	401, 402, 403, 404, 405, 406, 407, 408,
+	438, 461, 467, 470, 486, 489, 567, 575,
+	576, 577, 578, 579, 580, 581, 582, 583,
+	584, 585, 615, 638, 784, 669, 685, 786,
+	781, 640, 641, 665, 688, 798, 812, 825,
+	826, 827, 800, 808, 803, 806, 815, 816,
+	892, 1645, 921, 922, 923, 924, 925, 926,
+	927, 928, 929, 930, 931, 1135, 984, 1130,
+	1149, 1162, 1163, 1164, 1031, 1132, 1127, 961,
+	1015, 986, 987, 1011, 1034, 1137, 1145, 1140,
+	1143, 1152, 1153, 1645, 1171, 1172, 1173, 1174,
+	1175, 1176, 1177, 1178, 1179, 1180, 1181, 1211,
+	1234, 1240, 1243, 1259, 1262, 1340, 1645, 1407,
+	1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415,
+	1416, 1417, 1447, 1470, 1616, 1501, 1517, 1621,
+	1613, 1618, 1472, 1473, 1497, 1520,
+}
+
+var _graphclust_trans_actions []byte = []byte{
+	31, 0, 27, 55, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 29,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 34, 51, 19, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 51, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 40, 25, 40, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 40, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 40, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 40, 0,
+	40, 0, 40, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	40, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 40, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 47, 17, 40, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 40, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 47, 0, 47, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 40, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 40, 21,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 40,
+	23, 40, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 40, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 43, 1, 59,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 15, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 7, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 13, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 5, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 9, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 11, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0,
+}
+
+var _graphclust_to_state_actions []byte = []byte{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 37, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0,
+}
+
+var _graphclust_from_state_actions []byte = []byte{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 3, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0,
+}
+
+var _graphclust_eof_trans []int16 = []int16{
+	0, 0, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 56, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 0, 56, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 56, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 56, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 56, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 166, 166, 166, 166, 166,
+	166, 166, 166, 0, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 363,
+	363, 363, 363, 363, 363, 363, 363, 0,
+	0, 0, 0, 0, 0, 0, 56, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	56, 828, 828, 56, 828, 828, 56, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 828, 828, 828, 828, 828, 828, 828,
+	828, 56, 828, 828, 828, 828, 0, 0,
+	0, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064,
+	1064, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	1273, 1273, 1273, 1273, 1273, 1273, 1273, 1273,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 1499, 1518,
+	1518, 1541, 1541, 1541, 1541, 1541, 1541, 1541,
+	1541, 1541, 1610, 1610, 1610, 1610, 1610, 1610,
+	1644, 1663, 1663, 1663,
+}
+
+const graphclust_start int = 1645
+const graphclust_first_final int = 1645
+const graphclust_error int = 0
+
+const graphclust_en_main int = 1645
+
+//line grapheme_clusters.rl:14
+
+var Error = errors.New("invalid UTF8 text")
+
+// ScanGraphemeClusters is a split function for bufio.Scanner that splits
+// on grapheme cluster boundaries.
+func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
+	if len(data) == 0 {
+		return 0, nil, nil
+	}
+
+	// Ragel state
+	cs := 0         // Current State
+	p := 0          // "Pointer" into data
+	pe := len(data) // End-of-data "pointer"
+	ts := 0
+	te := 0
+	act := 0
+	eof := pe
+
+	// Make Go compiler happy
+	_ = ts
+	_ = te
+	_ = act
+	_ = eof
+
+	startPos := 0
+	endPos := 0
+
+//line grapheme_clusters.go:4049
+	{
+		cs = graphclust_start
+		ts = 0
+		te = 0
+		act = 0
+	}
+
+//line grapheme_clusters.go:4057
+	{
+		var _klen int
+		var _trans int
+		var _acts int
+		var _nacts uint
+		var _keys int
+		if p == pe {
+			goto _test_eof
+		}
+		if cs == 0 {
+			goto _out
+		}
+	_resume:
+		_acts = int(_graphclust_from_state_actions[cs])
+		_nacts = uint(_graphclust_actions[_acts])
+		_acts++
+		for ; _nacts > 0; _nacts-- {
+			_acts++
+			switch _graphclust_actions[_acts-1] {
+			case 4:
+//line NONE:1
+				ts = p
+
+//line grapheme_clusters.go:4080
+			}
+		}
+
+		_keys = int(_graphclust_key_offsets[cs])
+		_trans = int(_graphclust_index_offsets[cs])
+
+		_klen = int(_graphclust_single_lengths[cs])
+		if _klen > 0 {
+			_lower := int(_keys)
+			var _mid int
+			_upper := int(_keys + _klen - 1)
+			for {
+				if _upper < _lower {
+					break
+				}
+
+				_mid = _lower + ((_upper - _lower) >> 1)
+				switch {
+				case data[p] < _graphclust_trans_keys[_mid]:
+					_upper = _mid - 1
+				case data[p] > _graphclust_trans_keys[_mid]:
+					_lower = _mid + 1
+				default:
+					_trans += int(_mid - int(_keys))
+					goto _match
+				}
+			}
+			_keys += _klen
+			_trans += _klen
+		}
+
+		_klen = int(_graphclust_range_lengths[cs])
+		if _klen > 0 {
+			_lower := int(_keys)
+			var _mid int
+			_upper := int(_keys + (_klen << 1) - 2)
+			for {
+				if _upper < _lower {
+					break
+				}
+
+				_mid = _lower + (((_upper - _lower) >> 1) & ^1)
+				switch {
+				case data[p] < _graphclust_trans_keys[_mid]:
+					_upper = _mid - 2
+				case data[p] > _graphclust_trans_keys[_mid+1]:
+					_lower = _mid + 2
+				default:
+					_trans += int((_mid - int(_keys)) >> 1)
+					goto _match
+				}
+			}
+			_trans += _klen
+		}
+
+	_match:
+		_trans = int(_graphclust_indicies[_trans])
+	_eof_trans:
+		cs = int(_graphclust_trans_targs[_trans])
+
+		if _graphclust_trans_actions[_trans] == 0 {
+			goto _again
+		}
+
+		_acts = int(_graphclust_trans_actions[_trans])
+		_nacts = uint(_graphclust_actions[_acts])
+		_acts++
+		for ; _nacts > 0; _nacts-- {
+			_acts++
+			switch _graphclust_actions[_acts-1] {
+			case 0:
+//line grapheme_clusters.rl:47
+
+				startPos = p
+
+			case 1:
+//line grapheme_clusters.rl:51
+
+				endPos = p
+
+			case 5:
+//line NONE:1
+				te = p + 1
+
+			case 6:
+//line grapheme_clusters.rl:55
+				act = 3
+			case 7:
+//line grapheme_clusters.rl:55
+				act = 4
+			case 8:
+//line grapheme_clusters.rl:55
+				act = 8
+			case 9:
+//line grapheme_clusters.rl:55
+				te = p + 1
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 10:
+//line grapheme_clusters.rl:55
+				te = p + 1
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 11:
+//line grapheme_clusters.rl:55
+				te = p
+				p--
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 12:
+//line grapheme_clusters.rl:55
+				te = p
+				p--
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 13:
+//line grapheme_clusters.rl:55
+				te = p
+				p--
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 14:
+//line grapheme_clusters.rl:55
+				te = p
+				p--
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 15:
+//line grapheme_clusters.rl:55
+				te = p
+				p--
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 16:
+//line grapheme_clusters.rl:55
+				te = p
+				p--
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 17:
+//line grapheme_clusters.rl:55
+				p = (te) - 1
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 18:
+//line grapheme_clusters.rl:55
+				p = (te) - 1
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 19:
+//line grapheme_clusters.rl:55
+				p = (te) - 1
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 20:
+//line grapheme_clusters.rl:55
+				p = (te) - 1
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 21:
+//line grapheme_clusters.rl:55
+				p = (te) - 1
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 22:
+//line grapheme_clusters.rl:55
+				p = (te) - 1
+				{
+					return endPos + 1, data[startPos : endPos+1], nil
+				}
+			case 23:
+//line NONE:1
+				switch act {
+				case 0:
+					{
+						cs = 0
+						goto _again
+					}
+				case 3:
+					{
+						p = (te) - 1
+
+						return endPos + 1, data[startPos : endPos+1], nil
+					}
+				case 4:
+					{
+						p = (te) - 1
+
+						return endPos + 1, data[startPos : endPos+1], nil
+					}
+				case 8:
+					{
+						p = (te) - 1
+
+						return endPos + 1, data[startPos : endPos+1], nil
+					}
+				}
+
+//line grapheme_clusters.go:4287
+			}
+		}
+
+	_again:
+		_acts = int(_graphclust_to_state_actions[cs])
+		_nacts = uint(_graphclust_actions[_acts])
+		_acts++
+		for ; _nacts > 0; _nacts-- {
+			_acts++
+			switch _graphclust_actions[_acts-1] {
+			case 2:
+//line NONE:1
+				ts = 0
+
+			case 3:
+//line NONE:1
+				act = 0
+
+//line grapheme_clusters.go:4305
+			}
+		}
+
+		if cs == 0 {
+			goto _out
+		}
+		p++
+		if p != pe {
+			goto _resume
+		}
+	_test_eof:
+		{
+		}
+		if p == eof {
+			if _graphclust_eof_trans[cs] > 0 {
+				_trans = int(_graphclust_eof_trans[cs] - 1)
+				goto _eof_trans
+			}
+		}
+
+	_out:
+		{
+		}
+	}
+
+//line grapheme_clusters.rl:117
+
+	// If we fall out here then we were unable to complete a sequence.
+	// If we weren't able to complete a sequence then either we've
+	// reached the end of a partial buffer (so there's more data to come)
+	// or we have an isolated symbol that would normally be part of a
+	// grapheme cluster but has appeared in isolation here.
+
+	if !atEOF {
+		// Request more
+		return 0, nil, nil
+	}
+
+	// Just take the first UTF-8 sequence and return that.
+	_, seqLen := utf8.DecodeRune(data)
+	return seqLen, data[:seqLen], nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,133 @@
+package textseg
+
+import (
+    "errors"
+    "unicode/utf8"
+)
+
+// Generated from grapheme_clusters.rl. DO NOT EDIT
+%%{
+  # (except you are actually in grapheme_clusters.rl here, so edit away!)
+
+  machine graphclust;
+  write data;
+}%%
+
+var Error = errors.New("invalid UTF8 text")
+
+// ScanGraphemeClusters is a split function for bufio.Scanner that splits
+// on grapheme cluster boundaries.
+func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
+    if len(data) == 0 {
+        return 0, nil, nil
+    }
+
+    // Ragel state
+	cs := 0 // Current State
+	p := 0  // "Pointer" into data
+	pe := len(data) // End-of-data "pointer"
+    ts := 0
+    te := 0
+    act := 0
+    eof := pe
+
+    // Make Go compiler happy
+    _ = ts
+    _ = te
+    _ = act
+    _ = eof
+
+    startPos := 0
+    endPos := 0
+
+    %%{
+        include GraphemeCluster "grapheme_clusters_table.rl";
+        include Emoji "emoji_table.rl";
+
+        action start {
+            startPos = p
+        }
+
+        action end {
+            endPos = p
+        }
+
+        action emit {
+            return endPos+1, data[startPos:endPos+1], nil
+        }
+
+        ZWJGlue = ZWJ (Extended_Pictographic Extend*)?;
+        AnyExtender = Extend | ZWJGlue | SpacingMark;
+        Extension = AnyExtender*;
+        ReplacementChar = (0xEF 0xBF 0xBD);
+
+        CRLFSeq = CR LF;
+        ControlSeq = Control | ReplacementChar;
+        HangulSeq = (
+            L+ (((LV? V+ | LVT) T*)?|LV?) |
+            LV V* T* |
+            V+ T* |
+            LVT T* |
+            T+
+        ) Extension;
+        EmojiSeq = Extended_Pictographic Extend* Extension;
+        ZWJSeq = ZWJ (ZWJ | Extend | SpacingMark)*;
+        EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
+
+        UTF8Cont = 0x80 .. 0xBF;
+        AnyUTF8 = (
+            0x00..0x7F |
+            0xC0..0xDF . UTF8Cont |
+            0xE0..0xEF . UTF8Cont . UTF8Cont |
+            0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
+        );
+
+        # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
+        OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|Extended_Pictographic|ZWJ|Regional_Indicator|Prepend)) (Extend | ZWJ | SpacingMark)*;
+
+        # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
+        PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
+
+        CRLFTok = CRLFSeq >start @end;
+        ControlTok = ControlSeq >start @end;
+        HangulTok = HangulSeq >start @end;
+        EmojiTok = EmojiSeq >start @end;
+        ZWJTok = ZWJSeq >start @end;
+        EmojiFlagTok = EmojiFlagSeq >start @end;
+        OtherTok = OtherSeq >start @end;
+        PrependTok = PrependSeq >start @end;
+
+        main := |*
+            CRLFTok => emit;
+            ControlTok => emit;
+            HangulTok => emit;
+            EmojiTok => emit;
+            ZWJTok => emit;
+            EmojiFlagTok => emit;
+            PrependTok => emit;
+            OtherTok => emit;
+
+            # any single valid UTF-8 character would also be valid per spec,
+            # but we'll handle that separately after the loop so we can deal
+            # with requesting more bytes if we're not at EOF.
+        *|;
+
+        write init;
+        write exec;
+    }%%
+
+    // If we fall out here then we were unable to complete a sequence.
+    // If we weren't able to complete a sequence then either we've
+    // reached the end of a partial buffer (so there's more data to come)
+    // or we have an isolated symbol that would normally be part of a
+    // grapheme cluster but has appeared in isolation here.
+
+    if !atEOF {
+        // Request more
+        return 0, nil, nil
+    }
+
+    // Just take the first UTF-8 sequence and return that.
+    _, seqLen := utf8.DecodeRune(data)
+    return seqLen, data[:seqLen], nil
+}
\ No newline at end of file
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,1637 @@
+# The following Ragel file was autogenerated with unicode2ragel.rb 
+# from: https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt
+#
+# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "ZWJ"].
+#
+# To use this, make sure that your alphtype is set to byte,
+# and that your input is in utf8.
+
+%%{
+    machine GraphemeCluster;
+    
+    Prepend = 
+        0xD8 0x80..0x85         #Cf   [6] ARABIC NUMBER SIGN..ARABIC NUMBER ...
+      | 0xDB 0x9D               #Cf       ARABIC END OF AYAH
+      | 0xDC 0x8F               #Cf       SYRIAC ABBREVIATION MARK
+      | 0xE0 0xA2 0x90..0x91    #Cf   [2] ARABIC POUND MARK ABOVE..ARABIC PI...
+      | 0xE0 0xA3 0xA2          #Cf       ARABIC DISPUTED END OF AYAH
+      | 0xE0 0xB5 0x8E          #Lo       MALAYALAM LETTER DOT REPH
+      | 0xF0 0x91 0x82 0xBD     #Cf       KAITHI NUMBER SIGN
+      | 0xF0 0x91 0x83 0x8D     #Cf       KAITHI NUMBER SIGN ABOVE
+      | 0xF0 0x91 0x87 0x82..0x83  #Lo   [2] SHARADA SIGN JIHVAMULIYA..SHARA...
+      | 0xF0 0x91 0xA4 0xBF     #Lo       DIVES AKURU PREFIXED NASAL SIGN
+      | 0xF0 0x91 0xA5 0x81     #Lo       DIVES AKURU INITIAL RA
+      | 0xF0 0x91 0xA8 0xBA     #Lo       ZANABAZAR SQUARE CLUSTER-INITIAL L...
+      | 0xF0 0x91 0xAA 0x84..0x89  #Lo   [6] SOYOMBO SIGN JIHVAMULIYA..SOYOM...
+      | 0xF0 0x91 0xB5 0x86     #Lo       MASARAM GONDI REPHA
+      | 0xF0 0x91 0xBC 0x82     #Lo       KAWI SIGN REPHA
+      ;
+
+    CR = 
+        0x0D                    #Cc       <control-000D>
+      ;
+
+    LF = 
+        0x0A                    #Cc       <control-000A>
+      ;
+
+    Control = 
+        0x00..0x09              #Cc  [10] <control-0000>..<control-0009>
+      | 0x0B..0x0C              #Cc   [2] <control-000B>..<control-000C>
+      | 0x0E..0x1F              #Cc  [18] <control-000E>..<control-001F>
+      | 0x7F                    #Cc  [33] <control-007F>..<control-009F>
+      | 0xC2 0x80..0x9F         #
+      | 0xC2 0xAD               #Cf       SOFT HYPHEN
+      | 0xD8 0x9C               #Cf       ARABIC LETTER MARK
+      | 0xE1 0xA0 0x8E          #Cf       MONGOLIAN VOWEL SEPARATOR
+      | 0xE2 0x80 0x8B          #Cf       ZERO WIDTH SPACE
+      | 0xE2 0x80 0x8E..0x8F    #Cf   [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ...
+      | 0xE2 0x80 0xA8          #Zl       LINE SEPARATOR
+      | 0xE2 0x80 0xA9          #Zp       PARAGRAPH SEPARATOR
+      | 0xE2 0x80 0xAA..0xAE    #Cf   [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-...
+      | 0xE2 0x81 0xA0..0xA4    #Cf   [5] WORD JOINER..INVISIBLE PLUS
+      | 0xE2 0x81 0xA5          #Cn       <reserved-2065>
+      | 0xE2 0x81 0xA6..0xAF    #Cf  [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG...
+      | 0xEF 0xBB 0xBF          #Cf       ZERO WIDTH NO-BREAK SPACE
+      | 0xEF 0xBF 0xB0..0xB8    #Cn   [9] <reserved-FFF0>..<reserved-FFF8>
+      | 0xEF 0xBF 0xB9..0xBB    #Cf   [3] INTERLINEAR ANNOTATION ANCHOR..INT...
+      | 0xF0 0x93 0x90 0xB0..0xBF  #Cf  [16] EGYPTIAN HIEROGLYPH VERTICAL JO...
+      | 0xF0 0x9B 0xB2 0xA0..0xA3  #Cf   [4] SHORTHAND FORMAT LETTER OVERLAP...
+      | 0xF0 0x9D 0x85 0xB3..0xBA  #Cf   [8] MUSICAL SYMBOL BEGIN BEAM..MUSI...
+      | 0xF3 0xA0 0x80 0x80     #Cn       <reserved-E0000>
+      | 0xF3 0xA0 0x80 0x81     #Cf       LANGUAGE TAG
+      | 0xF3 0xA0 0x80 0x82..0x9F  #Cn  [30] <reserved-E0002>..<reserved-E001F>
+      | 0xF3 0xA0 0x82 0x80..0xFF  #Cn [128] <reserved-E0080>..<reserved-E00FF>
+      | 0xF3 0xA0 0x83 0x00..0xBF  #
+      | 0xF3 0xA0 0x87 0xB0..0xFF        #Cn [3600] <reserved-E01F0>..<reser...
+      | 0xF3 0xA0 0x88..0xBE 0x00..0xFF  #
+      | 0xF3 0xA0 0xBF 0x00..0xBF        #
+      ;
+
+    Extend = 
+        0xCC 0x80..0xFF         #Mn [112] COMBINING GRAVE ACCENT..COMBINING ...
+      | 0xCD 0x00..0xAF         #
+      | 0xD2 0x83..0x87         #Mn   [5] COMBINING CYRILLIC TITLO..COMBININ...
+      | 0xD2 0x88..0x89         #Me   [2] COMBINING CYRILLIC HUNDRED THOUSAN...
+      | 0xD6 0x91..0xBD         #Mn  [45] HEBREW ACCENT ETNAHTA..HEBREW POIN...
+      | 0xD6 0xBF               #Mn       HEBREW POINT RAFE
+      | 0xD7 0x81..0x82         #Mn   [2] HEBREW POINT SHIN DOT..HEBREW POIN...
+      | 0xD7 0x84..0x85         #Mn   [2] HEBREW MARK UPPER DOT..HEBREW MARK...
+      | 0xD7 0x87               #Mn       HEBREW POINT QAMATS QATAN
+      | 0xD8 0x90..0x9A         #Mn  [11] ARABIC SIGN SALLALLAHOU ALAYHE WAS...
+      | 0xD9 0x8B..0x9F         #Mn  [21] ARABIC FATHATAN..ARABIC WAVY HAMZA...
+      | 0xD9 0xB0               #Mn       ARABIC LETTER SUPERSCRIPT ALEF
+      | 0xDB 0x96..0x9C         #Mn   [7] ARABIC SMALL HIGH LIGATURE SAD WIT...
+      | 0xDB 0x9F..0xA4         #Mn   [6] ARABIC SMALL HIGH ROUNDED ZERO..AR...
+      | 0xDB 0xA7..0xA8         #Mn   [2] ARABIC SMALL HIGH YEH..ARABIC SMAL...
+      | 0xDB 0xAA..0xAD         #Mn   [4] ARABIC EMPTY CENTRE LOW STOP..ARAB...
+      | 0xDC 0x91               #Mn       SYRIAC LETTER SUPERSCRIPT ALAPH
+      | 0xDC 0xB0..0xFF         #Mn  [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH
+      | 0xDD 0x00..0x8A         #
+      | 0xDE 0xA6..0xB0         #Mn  [11] THAANA ABAFILI..THAANA SUKUN
+      | 0xDF 0xAB..0xB3         #Mn   [9] NKO COMBINING SHORT HIGH TONE..NKO...
+      | 0xDF 0xBD               #Mn       NKO DANTAYALAN
+      | 0xE0 0xA0 0x96..0x99    #Mn   [4] SAMARITAN MARK IN..SAMARITAN MARK ...
+      | 0xE0 0xA0 0x9B..0xA3    #Mn   [9] SAMARITAN MARK EPENTHETIC YUT..SAM...
+      | 0xE0 0xA0 0xA5..0xA7    #Mn   [3] SAMARITAN VOWEL SIGN SHORT A..SAMA...
+      | 0xE0 0xA0 0xA9..0xAD    #Mn   [5] SAMARITAN VOWEL SIGN LONG I..SAMAR...
+      | 0xE0 0xA1 0x99..0x9B    #Mn   [3] MANDAIC AFFRICATION MARK..MANDAIC ...
+      | 0xE0 0xA2 0x98..0x9F    #Mn   [8] ARABIC SMALL HIGH WORD AL-JUZ..ARA...
+      | 0xE0 0xA3 0x8A..0xA1    #Mn  [24] ARABIC SMALL HIGH FARSI YEH..ARABI...
+      | 0xE0 0xA3 0xA3..0xFF    #Mn  [32] ARABIC TURNED DAMMA BELOW..DEVANAG...
+      | 0xE0 0xA4 0x00..0x82    #
+      | 0xE0 0xA4 0xBA          #Mn       DEVANAGARI VOWEL SIGN OE
+      | 0xE0 0xA4 0xBC          #Mn       DEVANAGARI SIGN NUKTA
+      | 0xE0 0xA5 0x81..0x88    #Mn   [8] DEVANAGARI VOWEL SIGN U..DEVANAGAR...
+      | 0xE0 0xA5 0x8D          #Mn       DEVANAGARI SIGN VIRAMA
+      | 0xE0 0xA5 0x91..0x97    #Mn   [7] DEVANAGARI STRESS SIGN UDATTA..DEV...
+      | 0xE0 0xA5 0xA2..0xA3    #Mn   [2] DEVANAGARI VOWEL SIGN VOCALIC L..D...
+      | 0xE0 0xA6 0x81          #Mn       BENGALI SIGN CANDRABINDU
+      | 0xE0 0xA6 0xBC          #Mn       BENGALI SIGN NUKTA
+      | 0xE0 0xA6 0xBE          #Mc       BENGALI VOWEL SIGN AA
+      | 0xE0 0xA7 0x81..0x84    #Mn   [4] BENGALI VOWEL SIGN U..BENGALI VOWE...
+      | 0xE0 0xA7 0x8D          #Mn       BENGALI SIGN VIRAMA
+      | 0xE0 0xA7 0x97          #Mc       BENGALI AU LENGTH MARK
+      | 0xE0 0xA7 0xA2..0xA3    #Mn   [2] BENGALI VOWEL SIGN VOCALIC L..BENG...
+      | 0xE0 0xA7 0xBE          #Mn       BENGALI SANDHI MARK
+      | 0xE0 0xA8 0x81..0x82    #Mn   [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI...
+      | 0xE0 0xA8 0xBC          #Mn       GURMUKHI SIGN NUKTA
+      | 0xE0 0xA9 0x81..0x82    #Mn   [2] GURMUKHI VOWEL SIGN U..GURMUKHI VO...
+      | 0xE0 0xA9 0x87..0x88    #Mn   [2] GURMUKHI VOWEL SIGN EE..GURMUKHI V...
+      | 0xE0 0xA9 0x8B..0x8D    #Mn   [3] GURMUKHI VOWEL SIGN OO..GURMUKHI S...
+      | 0xE0 0xA9 0x91          #Mn       GURMUKHI SIGN UDAAT
+      | 0xE0 0xA9 0xB0..0xB1    #Mn   [2] GURMUKHI TIPPI..GURMUKHI ADDAK
+      | 0xE0 0xA9 0xB5          #Mn       GURMUKHI SIGN YAKASH
+      | 0xE0 0xAA 0x81..0x82    #Mn   [2] GUJARATI SIGN CANDRABINDU..GUJARAT...
+      | 0xE0 0xAA 0xBC          #Mn       GUJARATI SIGN NUKTA
+      | 0xE0 0xAB 0x81..0x85    #Mn   [5] GUJARATI VOWEL SIGN U..GUJARATI VO...
+      | 0xE0 0xAB 0x87..0x88    #Mn   [2] GUJARATI VOWEL SIGN E..GUJARATI VO...
+      | 0xE0 0xAB 0x8D          #Mn       GUJARATI SIGN VIRAMA
+      | 0xE0 0xAB 0xA2..0xA3    #Mn   [2] GUJARATI VOWEL SIGN VOCALIC L..GUJ...
+      | 0xE0 0xAB 0xBA..0xBF    #Mn   [6] GUJARATI SIGN SUKUN..GUJARATI SIGN...
+      | 0xE0 0xAC 0x81          #Mn       ORIYA SIGN CANDRABINDU
+      | 0xE0 0xAC 0xBC          #Mn       ORIYA SIGN NUKTA
+      | 0xE0 0xAC 0xBE          #Mc       ORIYA VOWEL SIGN AA
+      | 0xE0 0xAC 0xBF          #Mn       ORIYA VOWEL SIGN I
+      | 0xE0 0xAD 0x81..0x84    #Mn   [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SI...
+      | 0xE0 0xAD 0x8D          #Mn       ORIYA SIGN VIRAMA
+      | 0xE0 0xAD 0x95..0x96    #Mn   [2] ORIYA SIGN OVERLINE..ORIYA AI LENG...
+      | 0xE0 0xAD 0x97          #Mc       ORIYA AU LENGTH MARK
+      | 0xE0 0xAD 0xA2..0xA3    #Mn   [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA ...
+      | 0xE0 0xAE 0x82          #Mn       TAMIL SIGN ANUSVARA
+      | 0xE0 0xAE 0xBE          #Mc       TAMIL VOWEL SIGN AA
+      | 0xE0 0xAF 0x80          #Mn       TAMIL VOWEL SIGN II
+      | 0xE0 0xAF 0x8D          #Mn       TAMIL SIGN VIRAMA
+      | 0xE0 0xAF 0x97          #Mc       TAMIL AU LENGTH MARK
+      | 0xE0 0xB0 0x80          #Mn       TELUGU SIGN COMBINING CANDRABINDU ...
+      | 0xE0 0xB0 0x84          #Mn       TELUGU SIGN COMBINING ANUSVARA ABOVE
+      | 0xE0 0xB0 0xBC          #Mn       TELUGU SIGN NUKTA
+      | 0xE0 0xB0 0xBE..0xFF    #Mn   [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL...
+      | 0xE0 0xB1 0x00..0x80    #
+      | 0xE0 0xB1 0x86..0x88    #Mn   [3] TELUGU VOWEL SIGN E..TELUGU VOWEL ...
+      | 0xE0 0xB1 0x8A..0x8D    #Mn   [4] TELUGU VOWEL SIGN O..TELUGU SIGN V...
+      | 0xE0 0xB1 0x95..0x96    #Mn   [2] TELUGU LENGTH MARK..TELUGU AI LENG...
+      | 0xE0 0xB1 0xA2..0xA3    #Mn   [2] TELUGU VOWEL SIGN VOCALIC L..TELUG...
+      | 0xE0 0xB2 0x81          #Mn       KANNADA SIGN CANDRABINDU
+      | 0xE0 0xB2 0xBC          #Mn       KANNADA SIGN NUKTA
+      | 0xE0 0xB2 0xBF          #Mn       KANNADA VOWEL SIGN I
+      | 0xE0 0xB3 0x82          #Mc       KANNADA VOWEL SIGN UU
+      | 0xE0 0xB3 0x86          #Mn       KANNADA VOWEL SIGN E
+      | 0xE0 0xB3 0x8C..0x8D    #Mn   [2] KANNADA VOWEL SIGN AU..KANNADA SIG...
+      | 0xE0 0xB3 0x95..0x96    #Mc   [2] KANNADA LENGTH MARK..KANNADA AI LE...
+      | 0xE0 0xB3 0xA2..0xA3    #Mn   [2] KANNADA VOWEL SIGN VOCALIC L..KANN...
+      | 0xE0 0xB4 0x80..0x81    #Mn   [2] MALAYALAM SIGN COMBINING ANUSVARA ...
+      | 0xE0 0xB4 0xBB..0xBC    #Mn   [2] MALAYALAM SIGN VERTICAL BAR VIRAMA...
+      | 0xE0 0xB4 0xBE          #Mc       MALAYALAM VOWEL SIGN AA
+      | 0xE0 0xB5 0x81..0x84    #Mn   [4] MALAYALAM VOWEL SIGN U..MALAYALAM ...
+      | 0xE0 0xB5 0x8D          #Mn       MALAYALAM SIGN VIRAMA
+      | 0xE0 0xB5 0x97          #Mc       MALAYALAM AU LENGTH MARK
+      | 0xE0 0xB5 0xA2..0xA3    #Mn   [2] MALAYALAM VOWEL SIGN VOCALIC L..MA...
+      | 0xE0 0xB6 0x81          #Mn       SINHALA SIGN CANDRABINDU
+      | 0xE0 0xB7 0x8A          #Mn       SINHALA SIGN AL-LAKUNA
+      | 0xE0 0xB7 0x8F          #Mc       SINHALA VOWEL SIGN AELA-PILLA
+      | 0xE0 0xB7 0x92..0x94    #Mn   [3] SINHALA VOWEL SIGN KETTI IS-PILLA....
+      | 0xE0 0xB7 0x96          #Mn       SINHALA VOWEL SIGN DIGA PAA-PILLA
+      | 0xE0 0xB7 0x9F          #Mc       SINHALA VOWEL SIGN GAYANUKITTA
+      | 0xE0 0xB8 0xB1          #Mn       THAI CHARACTER MAI HAN-AKAT
+      | 0xE0 0xB8 0xB4..0xBA    #Mn   [7] THAI CHARACTER SARA I..THAI CHARAC...
+      | 0xE0 0xB9 0x87..0x8E    #Mn   [8] THAI CHARACTER MAITAIKHU..THAI CHA...
+      | 0xE0 0xBA 0xB1          #Mn       LAO VOWEL SIGN MAI KAN
+      | 0xE0 0xBA 0xB4..0xBC    #Mn   [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SI...
+      | 0xE0 0xBB 0x88..0x8E    #Mn   [7] LAO TONE MAI EK..LAO YAMAKKAN
+      | 0xE0 0xBC 0x98..0x99    #Mn   [2] TIBETAN ASTROLOGICAL SIGN -KHYUD P...
+      | 0xE0 0xBC 0xB5          #Mn       TIBETAN MARK NGAS BZUNG NYI ZLA
+      | 0xE0 0xBC 0xB7          #Mn       TIBETAN MARK NGAS BZUNG SGOR RTAGS
+      | 0xE0 0xBC 0xB9          #Mn       TIBETAN MARK TSA -PHRU
+      | 0xE0 0xBD 0xB1..0xBE    #Mn  [14] TIBETAN VOWEL SIGN AA..TIBETAN SIG...
+      | 0xE0 0xBE 0x80..0x84    #Mn   [5] TIBETAN VOWEL SIGN REVERSED I..TIB...
+      | 0xE0 0xBE 0x86..0x87    #Mn   [2] TIBETAN SIGN LCI RTAGS..TIBETAN SI...
+      | 0xE0 0xBE 0x8D..0x97    #Mn  [11] TIBETAN SUBJOINED SIGN LCE TSA CAN...
+      | 0xE0 0xBE 0x99..0xBC    #Mn  [36] TIBETAN SUBJOINED LETTER NYA..TIBE...
+      | 0xE0 0xBF 0x86          #Mn       TIBETAN SYMBOL PADMA GDAN
+      | 0xE1 0x80 0xAD..0xB0    #Mn   [4] MYANMAR VOWEL SIGN I..MYANMAR VOWE...
+      | 0xE1 0x80 0xB2..0xB7    #Mn   [6] MYANMAR VOWEL SIGN AI..MYANMAR SIG...
+      | 0xE1 0x80 0xB9..0xBA    #Mn   [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ...
+      | 0xE1 0x80 0xBD..0xBE    #Mn   [2] MYANMAR CONSONANT SIGN MEDIAL WA.....
+      | 0xE1 0x81 0x98..0x99    #Mn   [2] MYANMAR VOWEL SIGN VOCALIC L..MYAN...
+      | 0xE1 0x81 0x9E..0xA0    #Mn   [3] MYANMAR CONSONANT SIGN MON MEDIAL ...
+      | 0xE1 0x81 0xB1..0xB4    #Mn   [4] MYANMAR VOWEL SIGN GEBA KAREN I..M...
+      | 0xE1 0x82 0x82          #Mn       MYANMAR CONSONANT SIGN SHAN MEDIAL WA
+      | 0xE1 0x82 0x85..0x86    #Mn   [2] MYANMAR VOWEL SIGN SHAN E ABOVE..M...
+      | 0xE1 0x82 0x8D          #Mn       MYANMAR SIGN SHAN COUNCIL EMPHATIC...
+      | 0xE1 0x82 0x9D          #Mn       MYANMAR VOWEL SIGN AITON AI
+      | 0xE1 0x8D 0x9D..0x9F    #Mn   [3] ETHIOPIC COMBINING GEMINATION AND ...
+      | 0xE1 0x9C 0x92..0x94    #Mn   [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN...
+      | 0xE1 0x9C 0xB2..0xB3    #Mn   [2] HANUNOO VOWEL SIGN I..HANUNOO VOWE...
+      | 0xE1 0x9D 0x92..0x93    #Mn   [2] BUHID VOWEL SIGN I..BUHID VOWEL SI...
+      | 0xE1 0x9D 0xB2..0xB3    #Mn   [2] TAGBANWA VOWEL SIGN I..TAGBANWA VO...
+      | 0xE1 0x9E 0xB4..0xB5    #Mn   [2] KHMER VOWEL INHERENT AQ..KHMER VOW...
+      | 0xE1 0x9E 0xB7..0xBD    #Mn   [7] KHMER VOWEL SIGN I..KHMER VOWEL SI...
+      | 0xE1 0x9F 0x86          #Mn       KHMER SIGN NIKAHIT
+      | 0xE1 0x9F 0x89..0x93    #Mn  [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN...
+      | 0xE1 0x9F 0x9D          #Mn       KHMER SIGN ATTHACAN
+      | 0xE1 0xA0 0x8B..0x8D    #Mn   [3] MONGOLIAN FREE VARIATION SELECTOR ...
+      | 0xE1 0xA0 0x8F          #Mn       MONGOLIAN FREE VARIATION SELECTOR ...
+      | 0xE1 0xA2 0x85..0x86    #Mn   [2] MONGOLIAN LETTER ALI GALI BALUDA.....
+      | 0xE1 0xA2 0xA9          #Mn       MONGOLIAN LETTER ALI GALI DAGALGA
+      | 0xE1 0xA4 0xA0..0xA2    #Mn   [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SI...
+      | 0xE1 0xA4 0xA7..0xA8    #Mn   [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SI...
+      | 0xE1 0xA4 0xB2          #Mn       LIMBU SMALL LETTER ANUSVARA
+      | 0xE1 0xA4 0xB9..0xBB    #Mn   [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I
+      | 0xE1 0xA8 0x97..0x98    #Mn   [2] BUGINESE VOWEL SIGN I..BUGINESE VO...
+      | 0xE1 0xA8 0x9B          #Mn       BUGINESE VOWEL SIGN AE
+      | 0xE1 0xA9 0x96          #Mn       TAI THAM CONSONANT SIGN MEDIAL LA
+      | 0xE1 0xA9 0x98..0x9E    #Mn   [7] TAI THAM SIGN MAI KANG LAI..TAI TH...
+      | 0xE1 0xA9 0xA0          #Mn       TAI THAM SIGN SAKOT
+      | 0xE1 0xA9 0xA2          #Mn       TAI THAM VOWEL SIGN MAI SAT
+      | 0xE1 0xA9 0xA5..0xAC    #Mn   [8] TAI THAM VOWEL SIGN I..TAI THAM VO...
+      | 0xE1 0xA9 0xB3..0xBC    #Mn  [10] TAI THAM VOWEL SIGN OA ABOVE..TAI ...
+      | 0xE1 0xA9 0xBF          #Mn       TAI THAM COMBINING CRYPTOGRAMMIC DOT
+      | 0xE1 0xAA 0xB0..0xBD    #Mn  [14] COMBINING DOUBLED CIRCUMFLEX ACCEN...
+      | 0xE1 0xAA 0xBE          #Me       COMBINING PARENTHESES OVERLAY
+      | 0xE1 0xAA 0xBF..0xFF    #Mn  [16] COMBINING LATIN SMALL LETTER W BEL...
+      | 0xE1 0xAB 0x00..0x8E    #
+      | 0xE1 0xAC 0x80..0x83    #Mn   [4] BALINESE SIGN ULU RICEM..BALINESE ...
+      | 0xE1 0xAC 0xB4          #Mn       BALINESE SIGN REREKAN
+      | 0xE1 0xAC 0xB5          #Mc       BALINESE VOWEL SIGN TEDUNG
+      | 0xE1 0xAC 0xB6..0xBA    #Mn   [5] BALINESE VOWEL SIGN ULU..BALINESE ...
+      | 0xE1 0xAC 0xBC          #Mn       BALINESE VOWEL SIGN LA LENGA
+      | 0xE1 0xAD 0x82          #Mn       BALINESE VOWEL SIGN PEPET
+      | 0xE1 0xAD 0xAB..0xB3    #Mn   [9] BALINESE MUSICAL SYMBOL COMBINING ...
+      | 0xE1 0xAE 0x80..0x81    #Mn   [2] SUNDANESE SIGN PANYECEK..SUNDANESE...
+      | 0xE1 0xAE 0xA2..0xA5    #Mn   [4] SUNDANESE CONSONANT SIGN PANYAKRA....
+      | 0xE1 0xAE 0xA8..0xA9    #Mn   [2] SUNDANESE VOWEL SIGN PAMEPET..SUND...
+      | 0xE1 0xAE 0xAB..0xAD    #Mn   [3] SUNDANESE SIGN VIRAMA..SUNDANESE C...
+      | 0xE1 0xAF 0xA6          #Mn       BATAK SIGN TOMPI
+      | 0xE1 0xAF 0xA8..0xA9    #Mn   [2] BATAK VOWEL SIGN PAKPAK E..BATAK V...
+      | 0xE1 0xAF 0xAD          #Mn       BATAK VOWEL SIGN KARO O
+      | 0xE1 0xAF 0xAF..0xB1    #Mn   [3] BATAK VOWEL SIGN U FOR SIMALUNGUN ...
+      | 0xE1 0xB0 0xAC..0xB3    #Mn   [8] LEPCHA VOWEL SIGN E..LEPCHA CONSON...
+      | 0xE1 0xB0 0xB6..0xB7    #Mn   [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA
+      | 0xE1 0xB3 0x90..0x92    #Mn   [3] VEDIC TONE KARSHANA..VEDIC TONE PR...
+      | 0xE1 0xB3 0x94..0xA0    #Mn  [13] VEDIC SIGN YAJURVEDIC MIDLINE SVAR...
+      | 0xE1 0xB3 0xA2..0xA8    #Mn   [7] VEDIC SIGN VISARGA SVARITA..VEDIC ...
+      | 0xE1 0xB3 0xAD          #Mn       VEDIC SIGN TIRYAK
+      | 0xE1 0xB3 0xB4          #Mn       VEDIC TONE CANDRA ABOVE
+      | 0xE1 0xB3 0xB8..0xB9    #Mn   [2] VEDIC TONE RING ABOVE..VEDIC TONE ...
+      | 0xE1 0xB7 0x80..0xBF    #Mn  [64] COMBINING DOTTED GRAVE ACCENT..COM...
+      | 0xE2 0x80 0x8C          #Cf       ZERO WIDTH NON-JOINER
+      | 0xE2 0x83 0x90..0x9C    #Mn  [13] COMBINING LEFT HARPOON ABOVE..COMB...
+      | 0xE2 0x83 0x9D..0xA0    #Me   [4] COMBINING ENCLOSING CIRCLE..COMBIN...
+      | 0xE2 0x83 0xA1          #Mn       COMBINING LEFT RIGHT ARROW ABOVE
+      | 0xE2 0x83 0xA2..0xA4    #Me   [3] COMBINING ENCLOSING SCREEN..COMBIN...
+      | 0xE2 0x83 0xA5..0xB0    #Mn  [12] COMBINING REVERSE SOLIDUS OVERLAY....
+      | 0xE2 0xB3 0xAF..0xB1    #Mn   [3] COPTIC COMBINING NI ABOVE..COPTIC ...
+      | 0xE2 0xB5 0xBF          #Mn       TIFINAGH CONSONANT JOINER
+      | 0xE2 0xB7 0xA0..0xBF    #Mn  [32] COMBINING CYRILLIC LETTER BE..COMB...
+      | 0xE3 0x80 0xAA..0xAD    #Mn   [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOG...
+      | 0xE3 0x80 0xAE..0xAF    #Mc   [2] HANGUL SINGLE DOT TONE MARK..HANGU...
+      | 0xE3 0x82 0x99..0x9A    #Mn   [2] COMBINING KATAKANA-HIRAGANA VOICED...
+      | 0xEA 0x99 0xAF          #Mn       COMBINING CYRILLIC VZMET
+      | 0xEA 0x99 0xB0..0xB2    #Me   [3] COMBINING CYRILLIC TEN MILLIONS SI...
+      | 0xEA 0x99 0xB4..0xBD    #Mn  [10] COMBINING CYRILLIC LETTER UKRAINIA...
+      | 0xEA 0x9A 0x9E..0x9F    #Mn   [2] COMBINING CYRILLIC LETTER EF..COMB...
+      | 0xEA 0x9B 0xB0..0xB1    #Mn   [2] BAMUM COMBINING MARK KOQNDON..BAMU...
+      | 0xEA 0xA0 0x82          #Mn       SYLOTI NAGRI SIGN DVISVARA
+      | 0xEA 0xA0 0x86          #Mn       SYLOTI NAGRI SIGN HASANTA
+      | 0xEA 0xA0 0x8B          #Mn       SYLOTI NAGRI SIGN ANUSVARA
+      | 0xEA 0xA0 0xA5..0xA6    #Mn   [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI ...
+      | 0xEA 0xA0 0xAC          #Mn       SYLOTI NAGRI SIGN ALTERNATE HASANTA
+      | 0xEA 0xA3 0x84..0x85    #Mn   [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA...
+      | 0xEA 0xA3 0xA0..0xB1    #Mn  [18] COMBINING DEVANAGARI DIGIT ZERO..C...
+      | 0xEA 0xA3 0xBF          #Mn       DEVANAGARI VOWEL SIGN AY
+      | 0xEA 0xA4 0xA6..0xAD    #Mn   [8] KAYAH LI VOWEL UE..KAYAH LI TONE C...
+      | 0xEA 0xA5 0x87..0x91    #Mn  [11] REJANG VOWEL SIGN I..REJANG CONSON...
+      | 0xEA 0xA6 0x80..0x82    #Mn   [3] JAVANESE SIGN PANYANGGA..JAVANESE ...
+      | 0xEA 0xA6 0xB3          #Mn       JAVANESE SIGN CECAK TELU
+      | 0xEA 0xA6 0xB6..0xB9    #Mn   [4] JAVANESE VOWEL SIGN WULU..JAVANESE...
+      | 0xEA 0xA6 0xBC..0xBD    #Mn   [2] JAVANESE VOWEL SIGN PEPET..JAVANES...
+      | 0xEA 0xA7 0xA5          #Mn       MYANMAR SIGN SHAN SAW
+      | 0xEA 0xA8 0xA9..0xAE    #Mn   [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIG...
+      | 0xEA 0xA8 0xB1..0xB2    #Mn   [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIG...
+      | 0xEA 0xA8 0xB5..0xB6    #Mn   [2] CHAM CONSONANT SIGN LA..CHAM CONSO...
+      | 0xEA 0xA9 0x83          #Mn       CHAM CONSONANT SIGN FINAL NG
+      | 0xEA 0xA9 0x8C          #Mn       CHAM CONSONANT SIGN FINAL M
+      | 0xEA 0xA9 0xBC          #Mn       MYANMAR SIGN TAI LAING TONE-2
+      | 0xEA 0xAA 0xB0          #Mn       TAI VIET MAI KANG
+      | 0xEA 0xAA 0xB2..0xB4    #Mn   [3] TAI VIET VOWEL I..TAI VIET VOWEL U
+      | 0xEA 0xAA 0xB7..0xB8    #Mn   [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA
+      | 0xEA 0xAA 0xBE..0xBF    #Mn   [2] TAI VIET VOWEL AM..TAI VIET TONE M...
+      | 0xEA 0xAB 0x81          #Mn       TAI VIET TONE MAI THO
+      | 0xEA 0xAB 0xAC..0xAD    #Mn   [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI...
+      | 0xEA 0xAB 0xB6          #Mn       MEETEI MAYEK VIRAMA
+      | 0xEA 0xAF 0xA5          #Mn       MEETEI MAYEK VOWEL SIGN ANAP
+      | 0xEA 0xAF 0xA8          #Mn       MEETEI MAYEK VOWEL SIGN UNAP
+      | 0xEA 0xAF 0xAD          #Mn       MEETEI MAYEK APUN IYEK
+      | 0xEF 0xAC 0x9E          #Mn       HEBREW POINT JUDEO-SPANISH VARIKA
+      | 0xEF 0xB8 0x80..0x8F    #Mn  [16] VARIATION SELECTOR-1..VARIATION SE...
+      | 0xEF 0xB8 0xA0..0xAF    #Mn  [16] COMBINING LIGATURE LEFT HALF..COMB...
+      | 0xEF 0xBE 0x9E..0x9F    #Lm   [2] HALFWIDTH KATAKANA VOICED SOUND MA...
+      | 0xF0 0x90 0x87 0xBD     #Mn       PHAISTOS DISC SIGN COMBINING OBLIQ...
+      | 0xF0 0x90 0x8B 0xA0     #Mn       COPTIC EPACT THOUSANDS MARK
+      | 0xF0 0x90 0x8D 0xB6..0xBA  #Mn   [5] COMBINING OLD PERMIC LETTER AN....
+      | 0xF0 0x90 0xA8 0x81..0x83  #Mn   [3] KHAROSHTHI VOWEL SIGN I..KHAROS...
+      | 0xF0 0x90 0xA8 0x85..0x86  #Mn   [2] KHAROSHTHI VOWEL SIGN E..KHAROS...
+      | 0xF0 0x90 0xA8 0x8C..0x8F  #Mn   [4] KHAROSHTHI VOWEL LENGTH MARK..K...
+      | 0xF0 0x90 0xA8 0xB8..0xBA  #Mn   [3] KHAROSHTHI SIGN BAR ABOVE..KHAR...
+      | 0xF0 0x90 0xA8 0xBF     #Mn       KHAROSHTHI VIRAMA
+      | 0xF0 0x90 0xAB 0xA5..0xA6  #Mn   [2] MANICHAEAN ABBREVIATION MARK AB...
+      | 0xF0 0x90 0xB4 0xA4..0xA7  #Mn   [4] HANIFI ROHINGYA SIGN HARBAHAY.....
+      | 0xF0 0x90 0xBA 0xAB..0xAC  #Mn   [2] YEZIDI COMBINING HAMZA MARK..YE...
+      | 0xF0 0x90 0xBB 0xBD..0xBF  #Mn   [3] ARABIC SMALL LOW WORD SAKTA..AR...
+      | 0xF0 0x90 0xBD 0x86..0x90  #Mn  [11] SOGDIAN COMBINING DOT BELOW..SO...
+      | 0xF0 0x90 0xBE 0x82..0x85  #Mn   [4] OLD UYGHUR COMBINING DOT ABOVE....
+      | 0xF0 0x91 0x80 0x81     #Mn       BRAHMI SIGN ANUSVARA
+      | 0xF0 0x91 0x80 0xB8..0xFF  #Mn  [15] BRAHMI VOWEL SIGN AA..BRAHMI VI...
+      | 0xF0 0x91 0x81 0x00..0x86  #
+      | 0xF0 0x91 0x81 0xB0     #Mn       BRAHMI SIGN OLD TAMIL VIRAMA
+      | 0xF0 0x91 0x81 0xB3..0xB4  #Mn   [2] BRAHMI VOWEL SIGN OLD TAMIL SHO...
+      | 0xF0 0x91 0x81 0xBF..0xFF  #Mn   [3] BRAHMI NUMBER JOINER..KAITHI SI...
+      | 0xF0 0x91 0x82 0x00..0x81  #
+      | 0xF0 0x91 0x82 0xB3..0xB6  #Mn   [4] KAITHI VOWEL SIGN U..KAITHI VOW...
+      | 0xF0 0x91 0x82 0xB9..0xBA  #Mn   [2] KAITHI SIGN VIRAMA..KAITHI SIGN...
+      | 0xF0 0x91 0x83 0x82     #Mn       KAITHI VOWEL SIGN VOCALIC R
+      | 0xF0 0x91 0x84 0x80..0x82  #Mn   [3] CHAKMA SIGN CANDRABINDU..CHAKMA...
+      | 0xF0 0x91 0x84 0xA7..0xAB  #Mn   [5] CHAKMA VOWEL SIGN A..CHAKMA VOW...
+      | 0xF0 0x91 0x84 0xAD..0xB4  #Mn   [8] CHAKMA VOWEL SIGN AI..CHAKMA MA...
+      | 0xF0 0x91 0x85 0xB3     #Mn       MAHAJANI SIGN NUKTA
+      | 0xF0 0x91 0x86 0x80..0x81  #Mn   [2] SHARADA SIGN CANDRABINDU..SHARA...
+      | 0xF0 0x91 0x86 0xB6..0xBE  #Mn   [9] SHARADA VOWEL SIGN U..SHARADA V...
+      | 0xF0 0x91 0x87 0x89..0x8C  #Mn   [4] SHARADA SANDHI MARK..SHARADA EX...
+      | 0xF0 0x91 0x87 0x8F     #Mn       SHARADA SIGN INVERTED CANDRABINDU
+      | 0xF0 0x91 0x88 0xAF..0xB1  #Mn   [3] KHOJKI VOWEL SIGN U..KHOJKI VOW...
+      | 0xF0 0x91 0x88 0xB4     #Mn       KHOJKI SIGN ANUSVARA
+      | 0xF0 0x91 0x88 0xB6..0xB7  #Mn   [2] KHOJKI SIGN NUKTA..KHOJKI SIGN ...
+      | 0xF0 0x91 0x88 0xBE     #Mn       KHOJKI SIGN SUKUN
+      | 0xF0 0x91 0x89 0x81     #Mn       KHOJKI VOWEL SIGN VOCALIC R
+      | 0xF0 0x91 0x8B 0x9F     #Mn       KHUDAWADI SIGN ANUSVARA
+      | 0xF0 0x91 0x8B 0xA3..0xAA  #Mn   [8] KHUDAWADI VOWEL SIGN U..KHUDAWA...
+      | 0xF0 0x91 0x8C 0x80..0x81  #Mn   [2] GRANTHA SIGN COMBINING ANUSVARA...
+      | 0xF0 0x91 0x8C 0xBB..0xBC  #Mn   [2] COMBINING BINDU BELOW..GRANTHA ...
+      | 0xF0 0x91 0x8C 0xBE     #Mc       GRANTHA VOWEL SIGN AA
+      | 0xF0 0x91 0x8D 0x80     #Mn       GRANTHA VOWEL SIGN II
+      | 0xF0 0x91 0x8D 0x97     #Mc       GRANTHA AU LENGTH MARK
+      | 0xF0 0x91 0x8D 0xA6..0xAC  #Mn   [7] COMBINING GRANTHA DIGIT ZERO..C...
+      | 0xF0 0x91 0x8D 0xB0..0xB4  #Mn   [5] COMBINING GRANTHA LETTER A..COM...
+      | 0xF0 0x91 0x90 0xB8..0xBF  #Mn   [8] NEWA VOWEL SIGN U..NEWA VOWEL S...
+      | 0xF0 0x91 0x91 0x82..0x84  #Mn   [3] NEWA SIGN VIRAMA..NEWA SIGN ANU...
+      | 0xF0 0x91 0x91 0x86     #Mn       NEWA SIGN NUKTA
+      | 0xF0 0x91 0x91 0x9E     #Mn       NEWA SANDHI MARK
+      | 0xF0 0x91 0x92 0xB0     #Mc       TIRHUTA VOWEL SIGN AA
+      | 0xF0 0x91 0x92 0xB3..0xB8  #Mn   [6] TIRHUTA VOWEL SIGN U..TIRHUTA V...
+      | 0xF0 0x91 0x92 0xBA     #Mn       TIRHUTA VOWEL SIGN SHORT E
+      | 0xF0 0x91 0x92 0xBD     #Mc       TIRHUTA VOWEL SIGN SHORT O
+      | 0xF0 0x91 0x92 0xBF..0xFF  #Mn   [2] TIRHUTA SIGN CANDRABINDU..TIRHU...
+      | 0xF0 0x91 0x93 0x00..0x80  #
+      | 0xF0 0x91 0x93 0x82..0x83  #Mn   [2] TIRHUTA SIGN VIRAMA..TIRHUTA SI...
+      | 0xF0 0x91 0x96 0xAF     #Mc       SIDDHAM VOWEL SIGN AA
+      | 0xF0 0x91 0x96 0xB2..0xB5  #Mn   [4] SIDDHAM VOWEL SIGN U..SIDDHAM V...
+      | 0xF0 0x91 0x96 0xBC..0xBD  #Mn   [2] SIDDHAM SIGN CANDRABINDU..SIDDH...
+      | 0xF0 0x91 0x96 0xBF..0xFF  #Mn   [2] SIDDHAM SIGN VIRAMA..SIDDHAM SI...
+      | 0xF0 0x91 0x97 0x00..0x80  #
+      | 0xF0 0x91 0x97 0x9C..0x9D  #Mn   [2] SIDDHAM VOWEL SIGN ALTERNATE U....
+      | 0xF0 0x91 0x98 0xB3..0xBA  #Mn   [8] MODI VOWEL SIGN U..MODI VOWEL S...
+      | 0xF0 0x91 0x98 0xBD     #Mn       MODI SIGN ANUSVARA
+      | 0xF0 0x91 0x98 0xBF..0xFF  #Mn   [2] MODI SIGN VIRAMA..MODI SIGN ARD...
+      | 0xF0 0x91 0x99 0x00..0x80  #
+      | 0xF0 0x91 0x9A 0xAB     #Mn       TAKRI SIGN ANUSVARA
+      | 0xF0 0x91 0x9A 0xAD     #Mn       TAKRI VOWEL SIGN AA
+      | 0xF0 0x91 0x9A 0xB0..0xB5  #Mn   [6] TAKRI VOWEL SIGN U..TAKRI VOWEL...
+      | 0xF0 0x91 0x9A 0xB7     #Mn       TAKRI SIGN NUKTA
+      | 0xF0 0x91 0x9C 0x9D..0x9F  #Mn   [3] AHOM CONSONANT SIGN MEDIAL LA.....
+      | 0xF0 0x91 0x9C 0xA2..0xA5  #Mn   [4] AHOM VOWEL SIGN I..AHOM VOWEL S...
+      | 0xF0 0x91 0x9C 0xA7..0xAB  #Mn   [5] AHOM VOWEL SIGN AW..AHOM SIGN K...
+      | 0xF0 0x91 0xA0 0xAF..0xB7  #Mn   [9] DOGRA VOWEL SIGN U..DOGRA SIGN ...
+      | 0xF0 0x91 0xA0 0xB9..0xBA  #Mn   [2] DOGRA SIGN VIRAMA..DOGRA SIGN N...
+      | 0xF0 0x91 0xA4 0xB0     #Mc       DIVES AKURU VOWEL SIGN AA
+      | 0xF0 0x91 0xA4 0xBB..0xBC  #Mn   [2] DIVES AKURU SIGN ANUSVARA..DIVE...
+      | 0xF0 0x91 0xA4 0xBE     #Mn       DIVES AKURU VIRAMA
+      | 0xF0 0x91 0xA5 0x83     #Mn       DIVES AKURU SIGN NUKTA
+      | 0xF0 0x91 0xA7 0x94..0x97  #Mn   [4] NANDINAGARI VOWEL SIGN U..NANDI...
+      | 0xF0 0x91 0xA7 0x9A..0x9B  #Mn   [2] NANDINAGARI VOWEL SIGN E..NANDI...
+      | 0xF0 0x91 0xA7 0xA0     #Mn       NANDINAGARI SIGN VIRAMA
+      | 0xF0 0x91 0xA8 0x81..0x8A  #Mn  [10] ZANABAZAR SQUARE VOWEL SIGN I.....
+      | 0xF0 0x91 0xA8 0xB3..0xB8  #Mn   [6] ZANABAZAR SQUARE FINAL CONSONAN...
+      | 0xF0 0x91 0xA8 0xBB..0xBE  #Mn   [4] ZANABAZAR SQUARE CLUSTER-FINAL ...
+      | 0xF0 0x91 0xA9 0x87     #Mn       ZANABAZAR SQUARE SUBJOINER
+      | 0xF0 0x91 0xA9 0x91..0x96  #Mn   [6] SOYOMBO VOWEL SIGN I..SOYOMBO V...
+      | 0xF0 0x91 0xA9 0x99..0x9B  #Mn   [3] SOYOMBO VOWEL SIGN VOCALIC R..S...
+      | 0xF0 0x91 0xAA 0x8A..0x96  #Mn  [13] SOYOMBO FINAL CONSONANT SIGN G....
+      | 0xF0 0x91 0xAA 0x98..0x99  #Mn   [2] SOYOMBO GEMINATION MARK..SOYOMB...
+      | 0xF0 0x91 0xB0 0xB0..0xB6  #Mn   [7] BHAIKSUKI VOWEL SIGN I..BHAIKSU...
+      | 0xF0 0x91 0xB0 0xB8..0xBD  #Mn   [6] BHAIKSUKI VOWEL SIGN E..BHAIKSU...
+      | 0xF0 0x91 0xB0 0xBF     #Mn       BHAIKSUKI SIGN VIRAMA
+      | 0xF0 0x91 0xB2 0x92..0xA7  #Mn  [22] MARCHEN SUBJOINED LETTER KA..MA...
+      | 0xF0 0x91 0xB2 0xAA..0xB0  #Mn   [7] MARCHEN SUBJOINED LETTER RA..MA...
+      | 0xF0 0x91 0xB2 0xB2..0xB3  #Mn   [2] MARCHEN VOWEL SIGN U..MARCHEN V...
+      | 0xF0 0x91 0xB2 0xB5..0xB6  #Mn   [2] MARCHEN SIGN ANUSVARA..MARCHEN ...
+      | 0xF0 0x91 0xB4 0xB1..0xB6  #Mn   [6] MASARAM GONDI VOWEL SIGN AA..MA...
+      | 0xF0 0x91 0xB4 0xBA     #Mn       MASARAM GONDI VOWEL SIGN E
+      | 0xF0 0x91 0xB4 0xBC..0xBD  #Mn   [2] MASARAM GONDI VOWEL SIGN AI..MA...
+      | 0xF0 0x91 0xB4 0xBF..0xFF  #Mn   [7] MASARAM GONDI VOWEL SIGN AU..MA...
+      | 0xF0 0x91 0xB5 0x00..0x85  #
+      | 0xF0 0x91 0xB5 0x87     #Mn       MASARAM GONDI RA-KARA
+      | 0xF0 0x91 0xB6 0x90..0x91  #Mn   [2] GUNJALA GONDI VOWEL SIGN EE..GU...
+      | 0xF0 0x91 0xB6 0x95     #Mn       GUNJALA GONDI SIGN ANUSVARA
+      | 0xF0 0x91 0xB6 0x97     #Mn       GUNJALA GONDI VIRAMA
+      | 0xF0 0x91 0xBB 0xB3..0xB4  #Mn   [2] MAKASAR VOWEL SIGN I..MAKASAR V...
+      | 0xF0 0x91 0xBC 0x80..0x81  #Mn   [2] KAWI SIGN CANDRABINDU..KAWI SIG...
+      | 0xF0 0x91 0xBC 0xB6..0xBA  #Mn   [5] KAWI VOWEL SIGN I..KAWI VOWEL S...
+      | 0xF0 0x91 0xBD 0x80     #Mn       KAWI VOWEL SIGN EU
+      | 0xF0 0x91 0xBD 0x82     #Mn       KAWI CONJOINER
+      | 0xF0 0x93 0x91 0x80     #Mn       EGYPTIAN HIEROGLYPH MIRROR HORIZON...
+      | 0xF0 0x93 0x91 0x87..0x95  #Mn  [15] EGYPTIAN HIEROGLYPH MODIFIER DA...
+      | 0xF0 0x96 0xAB 0xB0..0xB4  #Mn   [5] BASSA VAH COMBINING HIGH TONE.....
+      | 0xF0 0x96 0xAC 0xB0..0xB6  #Mn   [7] PAHAWH HMONG MARK CIM TUB..PAHA...
+      | 0xF0 0x96 0xBD 0x8F     #Mn       MIAO SIGN CONSONANT MODIFIER BAR
+      | 0xF0 0x96 0xBE 0x8F..0x92  #Mn   [4] MIAO TONE RIGHT..MIAO TONE BELOW
+      | 0xF0 0x96 0xBF 0xA4     #Mn       KHITAN SMALL SCRIPT FILLER
+      | 0xF0 0x9B 0xB2 0x9D..0x9E  #Mn   [2] DUPLOYAN THICK LETTER SELECTOR....
+      | 0xF0 0x9C 0xBC 0x80..0xAD  #Mn  [46] ZNAMENNY COMBINING MARK GORAZDO...
+      | 0xF0 0x9C 0xBC 0xB0..0xFF  #Mn  [23] ZNAMENNY COMBINING TONAL RANGE ...
+      | 0xF0 0x9C 0xBD 0x00..0x86  #
+      | 0xF0 0x9D 0x85 0xA5     #Mc       MUSICAL SYMBOL COMBINING STEM
+      | 0xF0 0x9D 0x85 0xA7..0xA9  #Mn   [3] MUSICAL SYMBOL COMBINING TREMOL...
+      | 0xF0 0x9D 0x85 0xAE..0xB2  #Mc   [5] MUSICAL SYMBOL COMBINING FLAG-1...
+      | 0xF0 0x9D 0x85 0xBB..0xFF  #Mn   [8] MUSICAL SYMBOL COMBINING ACCENT...
+      | 0xF0 0x9D 0x86 0x00..0x82  #
+      | 0xF0 0x9D 0x86 0x85..0x8B  #Mn   [7] MUSICAL SYMBOL COMBINING DOIT.....
+      | 0xF0 0x9D 0x86 0xAA..0xAD  #Mn   [4] MUSICAL SYMBOL COMBINING DOWN B...
+      | 0xF0 0x9D 0x89 0x82..0x84  #Mn   [3] COMBINING GREEK MUSICAL TRISEME...
+      | 0xF0 0x9D 0xA8 0x80..0xB6  #Mn  [55] SIGNWRITING HEAD RIM..SIGNWRITI...
+      | 0xF0 0x9D 0xA8 0xBB..0xFF  #Mn  [50] SIGNWRITING MOUTH CLOSED NEUTRA...
+      | 0xF0 0x9D 0xA9 0x00..0xAC  #
+      | 0xF0 0x9D 0xA9 0xB5     #Mn       SIGNWRITING UPPER BODY TILTING FRO...
+      | 0xF0 0x9D 0xAA 0x84     #Mn       SIGNWRITING LOCATION HEAD NECK
+      | 0xF0 0x9D 0xAA 0x9B..0x9F  #Mn   [5] SIGNWRITING FILL MODIFIER-2..SI...
+      | 0xF0 0x9D 0xAA 0xA1..0xAF  #Mn  [15] SIGNWRITING ROTATION MODIFIER-2...
+      | 0xF0 0x9E 0x80 0x80..0x86  #Mn   [7] COMBINING GLAGOLITIC LETTER AZU...
+      | 0xF0 0x9E 0x80 0x88..0x98  #Mn  [17] COMBINING GLAGOLITIC LETTER ZEM...
+      | 0xF0 0x9E 0x80 0x9B..0xA1  #Mn   [7] COMBINING GLAGOLITIC LETTER SHT...
+      | 0xF0 0x9E 0x80 0xA3..0xA4  #Mn   [2] COMBINING GLAGOLITIC LETTER YU....
+      | 0xF0 0x9E 0x80 0xA6..0xAA  #Mn   [5] COMBINING GLAGOLITIC LETTER YO....
+      | 0xF0 0x9E 0x82 0x8F     #Mn       COMBINING CYRILLIC SMALL LETTER BY...
+      | 0xF0 0x9E 0x84 0xB0..0xB6  #Mn   [7] NYIAKENG PUACHUE HMONG TONE-B.....
+      | 0xF0 0x9E 0x8A 0xAE     #Mn       TOTO SIGN RISING TONE
+      | 0xF0 0x9E 0x8B 0xAC..0xAF  #Mn   [4] WANCHO TONE TUP..WANCHO TONE KOINI
+      | 0xF0 0x9E 0x93 0xAC..0xAF  #Mn   [4] NAG MUNDARI SIGN MUHOR..NAG MUN...
+      | 0xF0 0x9E 0xA3 0x90..0x96  #Mn   [7] MENDE KIKAKUI COMBINING NUMBER ...
+      | 0xF0 0x9E 0xA5 0x84..0x8A  #Mn   [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA
+      | 0xF0 0x9F 0x8F 0xBB..0xBF  #Sk   [5] EMOJI MODIFIER FITZPATRICK TYPE...
+      | 0xF3 0xA0 0x80 0xA0..0xFF  #Cf  [96] TAG SPACE..CANCEL TAG
+      | 0xF3 0xA0 0x81 0x00..0xBF  #
+      | 0xF3 0xA0 0x84 0x80..0xFF        #Mn [240] VARIATION SELECTOR-17..VA...
+      | 0xF3 0xA0 0x85..0x86 0x00..0xFF  #
+      | 0xF3 0xA0 0x87 0x00..0xAF        #
+      ;
+
+    Regional_Indicator = 
+        0xF0 0x9F 0x87 0xA6..0xBF  #So  [26] REGIONAL INDICATOR SYMBOL LETTE...
+      ;
+
+    SpacingMark = 
+        0xE0 0xA4 0x83          #Mc       DEVANAGARI SIGN VISARGA
+      | 0xE0 0xA4 0xBB          #Mc       DEVANAGARI VOWEL SIGN OOE
+      | 0xE0 0xA4 0xBE..0xFF    #Mc   [3] DEVANAGARI VOWEL SIGN AA..DEVANAGA...
+      | 0xE0 0xA5 0x00..0x80    #
+      | 0xE0 0xA5 0x89..0x8C    #Mc   [4] DEVANAGARI VOWEL SIGN CANDRA O..DE...
+      | 0xE0 0xA5 0x8E..0x8F    #Mc   [2] DEVANAGARI VOWEL SIGN PRISHTHAMATR...
+      | 0xE0 0xA6 0x82..0x83    #Mc   [2] BENGALI SIGN ANUSVARA..BENGALI SIG...
+      | 0xE0 0xA6 0xBF..0xFF    #Mc   [2] BENGALI VOWEL SIGN I..BENGALI VOWE...
+      | 0xE0 0xA7 0x00..0x80    #
+      | 0xE0 0xA7 0x87..0x88    #Mc   [2] BENGALI VOWEL SIGN E..BENGALI VOWE...
+      | 0xE0 0xA7 0x8B..0x8C    #Mc   [2] BENGALI VOWEL SIGN O..BENGALI VOWE...
+      | 0xE0 0xA8 0x83          #Mc       GURMUKHI SIGN VISARGA
+      | 0xE0 0xA8 0xBE..0xFF    #Mc   [3] GURMUKHI VOWEL SIGN AA..GURMUKHI V...
+      | 0xE0 0xA9 0x00..0x80    #
+      | 0xE0 0xAA 0x83          #Mc       GUJARATI SIGN VISARGA
+      | 0xE0 0xAA 0xBE..0xFF    #Mc   [3] GUJARATI VOWEL SIGN AA..GUJARATI V...
+      | 0xE0 0xAB 0x00..0x80    #
+      | 0xE0 0xAB 0x89          #Mc       GUJARATI VOWEL SIGN CANDRA O
+      | 0xE0 0xAB 0x8B..0x8C    #Mc   [2] GUJARATI VOWEL SIGN O..GUJARATI VO...
+      | 0xE0 0xAC 0x82..0x83    #Mc   [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VI...
+      | 0xE0 0xAD 0x80          #Mc       ORIYA VOWEL SIGN II
+      | 0xE0 0xAD 0x87..0x88    #Mc   [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SI...
+      | 0xE0 0xAD 0x8B..0x8C    #Mc   [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SI...
+      | 0xE0 0xAE 0xBF          #Mc       TAMIL VOWEL SIGN I
+      | 0xE0 0xAF 0x81..0x82    #Mc   [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SI...
+      | 0xE0 0xAF 0x86..0x88    #Mc   [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SI...
+      | 0xE0 0xAF 0x8A..0x8C    #Mc   [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SI...
+      | 0xE0 0xB0 0x81..0x83    #Mc   [3] TELUGU SIGN CANDRABINDU..TELUGU SI...
+      | 0xE0 0xB1 0x81..0x84    #Mc   [4] TELUGU VOWEL SIGN U..TELUGU VOWEL ...
+      | 0xE0 0xB2 0x82..0x83    #Mc   [2] KANNADA SIGN ANUSVARA..KANNADA SIG...
+      | 0xE0 0xB2 0xBE          #Mc       KANNADA VOWEL SIGN AA
+      | 0xE0 0xB3 0x80..0x81    #Mc   [2] KANNADA VOWEL SIGN II..KANNADA VOW...
+      | 0xE0 0xB3 0x83..0x84    #Mc   [2] KANNADA VOWEL SIGN VOCALIC R..KANN...
+      | 0xE0 0xB3 0x87..0x88    #Mc   [2] KANNADA VOWEL SIGN EE..KANNADA VOW...
+      | 0xE0 0xB3 0x8A..0x8B    #Mc   [2] KANNADA VOWEL SIGN O..KANNADA VOWE...
+      | 0xE0 0xB3 0xB3          #Mc       KANNADA SIGN COMBINING ANUSVARA AB...
+      | 0xE0 0xB4 0x82..0x83    #Mc   [2] MALAYALAM SIGN ANUSVARA..MALAYALAM...
+      | 0xE0 0xB4 0xBF..0xFF    #Mc   [2] MALAYALAM VOWEL SIGN I..MALAYALAM ...
+      | 0xE0 0xB5 0x00..0x80    #
+      | 0xE0 0xB5 0x86..0x88    #Mc   [3] MALAYALAM VOWEL SIGN E..MALAYALAM ...
+      | 0xE0 0xB5 0x8A..0x8C    #Mc   [3] MALAYALAM VOWEL SIGN O..MALAYALAM ...
+      | 0xE0 0xB6 0x82..0x83    #Mc   [2] SINHALA SIGN ANUSVARAYA..SINHALA S...
+      | 0xE0 0xB7 0x90..0x91    #Mc   [2] SINHALA VOWEL SIGN KETTI AEDA-PILL...
+      | 0xE0 0xB7 0x98..0x9E    #Mc   [7] SINHALA VOWEL SIGN GAETTA-PILLA..S...
+      | 0xE0 0xB7 0xB2..0xB3    #Mc   [2] SINHALA VOWEL SIGN DIGA GAETTA-PIL...
+      | 0xE0 0xB8 0xB3          #Lo       THAI CHARACTER SARA AM
+      | 0xE0 0xBA 0xB3          #Lo       LAO VOWEL SIGN AM
+      | 0xE0 0xBC 0xBE..0xBF    #Mc   [2] TIBETAN SIGN YAR TSHES..TIBETAN SI...
+      | 0xE0 0xBD 0xBF          #Mc       TIBETAN SIGN RNAM BCAD
+      | 0xE1 0x80 0xB1          #Mc       MYANMAR VOWEL SIGN E
+      | 0xE1 0x80 0xBB..0xBC    #Mc   [2] MYANMAR CONSONANT SIGN MEDIAL YA.....
+      | 0xE1 0x81 0x96..0x97    #Mc   [2] MYANMAR VOWEL SIGN VOCALIC R..MYAN...
+      | 0xE1 0x82 0x84          #Mc       MYANMAR VOWEL SIGN SHAN E
+      | 0xE1 0x9C 0x95          #Mc       TAGALOG SIGN PAMUDPOD
+      | 0xE1 0x9C 0xB4          #Mc       HANUNOO SIGN PAMUDPOD
+      | 0xE1 0x9E 0xB6          #Mc       KHMER VOWEL SIGN AA
+      | 0xE1 0x9E 0xBE..0xFF    #Mc   [8] KHMER VOWEL SIGN OE..KHMER VOWEL S...
+      | 0xE1 0x9F 0x00..0x85    #
+      | 0xE1 0x9F 0x87..0x88    #Mc   [2] KHMER SIGN REAHMUK..KHMER SIGN YUU...
+      | 0xE1 0xA4 0xA3..0xA6    #Mc   [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL S...
+      | 0xE1 0xA4 0xA9..0xAB    #Mc   [3] LIMBU SUBJOINED LETTER YA..LIMBU S...
+      | 0xE1 0xA4 0xB0..0xB1    #Mc   [2] LIMBU SMALL LETTER KA..LIMBU SMALL...
+      | 0xE1 0xA4 0xB3..0xB8    #Mc   [6] LIMBU SMALL LETTER TA..LIMBU SMALL...
+      | 0xE1 0xA8 0x99..0x9A    #Mc   [2] BUGINESE VOWEL SIGN E..BUGINESE VO...
+      | 0xE1 0xA9 0x95          #Mc       TAI THAM CONSONANT SIGN MEDIAL RA
+      | 0xE1 0xA9 0x97          #Mc       TAI THAM CONSONANT SIGN LA TANG LAI
+      | 0xE1 0xA9 0xAD..0xB2    #Mc   [6] TAI THAM VOWEL SIGN OY..TAI THAM V...
+      | 0xE1 0xAC 0x84          #Mc       BALINESE SIGN BISAH
+      | 0xE1 0xAC 0xBB          #Mc       BALINESE VOWEL SIGN RA REPA TEDUNG
+      | 0xE1 0xAC 0xBD..0xFF    #Mc   [5] BALINESE VOWEL SIGN LA LENGA TEDUN...
+      | 0xE1 0xAD 0x00..0x81    #
+      | 0xE1 0xAD 0x83..0x84    #Mc   [2] BALINESE VOWEL SIGN PEPET TEDUNG.....
+      | 0xE1 0xAE 0x82          #Mc       SUNDANESE SIGN PANGWISAD
+      | 0xE1 0xAE 0xA1          #Mc       SUNDANESE CONSONANT SIGN PAMINGKAL
+      | 0xE1 0xAE 0xA6..0xA7    #Mc   [2] SUNDANESE VOWEL SIGN PANAELAENG..S...
+      | 0xE1 0xAE 0xAA          #Mc       SUNDANESE SIGN PAMAAEH
+      | 0xE1 0xAF 0xA7          #Mc       BATAK VOWEL SIGN E
+      | 0xE1 0xAF 0xAA..0xAC    #Mc   [3] BATAK VOWEL SIGN I..BATAK VOWEL SI...
+      | 0xE1 0xAF 0xAE          #Mc       BATAK VOWEL SIGN U
+      | 0xE1 0xAF 0xB2..0xB3    #Mc   [2] BATAK PANGOLAT..BATAK PANONGONAN
+      | 0xE1 0xB0 0xA4..0xAB    #Mc   [8] LEPCHA SUBJOINED LETTER YA..LEPCHA...
+      | 0xE1 0xB0 0xB4..0xB5    #Mc   [2] LEPCHA CONSONANT SIGN NYIN-DO..LEP...
+      | 0xE1 0xB3 0xA1          #Mc       VEDIC TONE ATHARVAVEDIC INDEPENDEN...
+      | 0xE1 0xB3 0xB7          #Mc       VEDIC SIGN ATIKRAMA
+      | 0xEA 0xA0 0xA3..0xA4    #Mc   [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI ...
+      | 0xEA 0xA0 0xA7          #Mc       SYLOTI NAGRI VOWEL SIGN OO
+      | 0xEA 0xA2 0x80..0x81    #Mc   [2] SAURASHTRA SIGN ANUSVARA..SAURASHT...
+      | 0xEA 0xA2 0xB4..0xFF    #Mc  [16] SAURASHTRA CONSONANT SIGN HAARU..S...
+      | 0xEA 0xA3 0x00..0x83    #
+      | 0xEA 0xA5 0x92..0x93    #Mc   [2] REJANG CONSONANT SIGN H..REJANG VI...
+      | 0xEA 0xA6 0x83          #Mc       JAVANESE SIGN WIGNYAN
+      | 0xEA 0xA6 0xB4..0xB5    #Mc   [2] JAVANESE VOWEL SIGN TARUNG..JAVANE...
+      | 0xEA 0xA6 0xBA..0xBB    #Mc   [2] JAVANESE VOWEL SIGN TALING..JAVANE...
+      | 0xEA 0xA6 0xBE..0xFF    #Mc   [3] JAVANESE CONSONANT SIGN PENGKAL..J...
+      | 0xEA 0xA7 0x00..0x80    #
+      | 0xEA 0xA8 0xAF..0xB0    #Mc   [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI
+      | 0xEA 0xA8 0xB3..0xB4    #Mc   [2] CHAM CONSONANT SIGN YA..CHAM CONSO...
+      | 0xEA 0xA9 0x8D          #Mc       CHAM CONSONANT SIGN FINAL H
+      | 0xEA 0xAB 0xAB          #Mc       MEETEI MAYEK VOWEL SIGN II
+      | 0xEA 0xAB 0xAE..0xAF    #Mc   [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI...
+      | 0xEA 0xAB 0xB5          #Mc       MEETEI MAYEK VOWEL SIGN VISARGA
+      | 0xEA 0xAF 0xA3..0xA4    #Mc   [2] MEETEI MAYEK VOWEL SIGN ONAP..MEET...
+      | 0xEA 0xAF 0xA6..0xA7    #Mc   [2] MEETEI MAYEK VOWEL SIGN YENAP..MEE...
+      | 0xEA 0xAF 0xA9..0xAA    #Mc   [2] MEETEI MAYEK VOWEL SIGN CHEINAP..M...
+      | 0xEA 0xAF 0xAC          #Mc       MEETEI MAYEK LUM IYEK
+      | 0xF0 0x91 0x80 0x80     #Mc       BRAHMI SIGN CANDRABINDU
+      | 0xF0 0x91 0x80 0x82     #Mc       BRAHMI SIGN VISARGA
+      | 0xF0 0x91 0x82 0x82     #Mc       KAITHI SIGN VISARGA
+      | 0xF0 0x91 0x82 0xB0..0xB2  #Mc   [3] KAITHI VOWEL SIGN AA..KAITHI VO...
+      | 0xF0 0x91 0x82 0xB7..0xB8  #Mc   [2] KAITHI VOWEL SIGN O..KAITHI VOW...
+      | 0xF0 0x91 0x84 0xAC     #Mc       CHAKMA VOWEL SIGN E
+      | 0xF0 0x91 0x85 0x85..0x86  #Mc   [2] CHAKMA VOWEL SIGN AA..CHAKMA VO...
+      | 0xF0 0x91 0x86 0x82     #Mc       SHARADA SIGN VISARGA
+      | 0xF0 0x91 0x86 0xB3..0xB5  #Mc   [3] SHARADA VOWEL SIGN AA..SHARADA ...
+      | 0xF0 0x91 0x86 0xBF..0xFF  #Mc   [2] SHARADA VOWEL SIGN AU..SHARADA ...
+      | 0xF0 0x91 0x87 0x00..0x80  #
+      | 0xF0 0x91 0x87 0x8E     #Mc       SHARADA VOWEL SIGN PRISHTHAMATRA E
+      | 0xF0 0x91 0x88 0xAC..0xAE  #Mc   [3] KHOJKI VOWEL SIGN AA..KHOJKI VO...
+      | 0xF0 0x91 0x88 0xB2..0xB3  #Mc   [2] KHOJKI VOWEL SIGN O..KHOJKI VOW...
+      | 0xF0 0x91 0x88 0xB5     #Mc       KHOJKI SIGN VIRAMA
+      | 0xF0 0x91 0x8B 0xA0..0xA2  #Mc   [3] KHUDAWADI VOWEL SIGN AA..KHUDAW...
+      | 0xF0 0x91 0x8C 0x82..0x83  #Mc   [2] GRANTHA SIGN ANUSVARA..GRANTHA ...
+      | 0xF0 0x91 0x8C 0xBF     #Mc       GRANTHA VOWEL SIGN I
+      | 0xF0 0x91 0x8D 0x81..0x84  #Mc   [4] GRANTHA VOWEL SIGN U..GRANTHA V...
+      | 0xF0 0x91 0x8D 0x87..0x88  #Mc   [2] GRANTHA VOWEL SIGN EE..GRANTHA ...
+      | 0xF0 0x91 0x8D 0x8B..0x8D  #Mc   [3] GRANTHA VOWEL SIGN OO..GRANTHA ...
+      | 0xF0 0x91 0x8D 0xA2..0xA3  #Mc   [2] GRANTHA VOWEL SIGN VOCALIC L..G...
+      | 0xF0 0x91 0x90 0xB5..0xB7  #Mc   [3] NEWA VOWEL SIGN AA..NEWA VOWEL ...
+      | 0xF0 0x91 0x91 0x80..0x81  #Mc   [2] NEWA VOWEL SIGN O..NEWA VOWEL S...
+      | 0xF0 0x91 0x91 0x85     #Mc       NEWA SIGN VISARGA
+      | 0xF0 0x91 0x92 0xB1..0xB2  #Mc   [2] TIRHUTA VOWEL SIGN I..TIRHUTA V...
+      | 0xF0 0x91 0x92 0xB9     #Mc       TIRHUTA VOWEL SIGN E
+      | 0xF0 0x91 0x92 0xBB..0xBC  #Mc   [2] TIRHUTA VOWEL SIGN AI..TIRHUTA ...
+      | 0xF0 0x91 0x92 0xBE     #Mc       TIRHUTA VOWEL SIGN AU
+      | 0xF0 0x91 0x93 0x81     #Mc       TIRHUTA SIGN VISARGA
+      | 0xF0 0x91 0x96 0xB0..0xB1  #Mc   [2] SIDDHAM VOWEL SIGN I..SIDDHAM V...
+      | 0xF0 0x91 0x96 0xB8..0xBB  #Mc   [4] SIDDHAM VOWEL SIGN E..SIDDHAM V...
+      | 0xF0 0x91 0x96 0xBE     #Mc       SIDDHAM SIGN VISARGA
+      | 0xF0 0x91 0x98 0xB0..0xB2  #Mc   [3] MODI VOWEL SIGN AA..MODI VOWEL ...
+      | 0xF0 0x91 0x98 0xBB..0xBC  #Mc   [2] MODI VOWEL SIGN O..MODI VOWEL S...
+      | 0xF0 0x91 0x98 0xBE     #Mc       MODI SIGN VISARGA
+      | 0xF0 0x91 0x9A 0xAC     #Mc       TAKRI SIGN VISARGA
+      | 0xF0 0x91 0x9A 0xAE..0xAF  #Mc   [2] TAKRI VOWEL SIGN I..TAKRI VOWEL...
+      | 0xF0 0x91 0x9A 0xB6     #Mc       TAKRI SIGN VIRAMA
+      | 0xF0 0x91 0x9C 0xA6     #Mc       AHOM VOWEL SIGN E
+      | 0xF0 0x91 0xA0 0xAC..0xAE  #Mc   [3] DOGRA VOWEL SIGN AA..DOGRA VOWE...
+      | 0xF0 0x91 0xA0 0xB8     #Mc       DOGRA SIGN VISARGA
+      | 0xF0 0x91 0xA4 0xB1..0xB5  #Mc   [5] DIVES AKURU VOWEL SIGN I..DIVES...
+      | 0xF0 0x91 0xA4 0xB7..0xB8  #Mc   [2] DIVES AKURU VOWEL SIGN AI..DIVE...
+      | 0xF0 0x91 0xA4 0xBD     #Mc       DIVES AKURU SIGN HALANTA
+      | 0xF0 0x91 0xA5 0x80     #Mc       DIVES AKURU MEDIAL YA
+      | 0xF0 0x91 0xA5 0x82     #Mc       DIVES AKURU MEDIAL RA
+      | 0xF0 0x91 0xA7 0x91..0x93  #Mc   [3] NANDINAGARI VOWEL SIGN AA..NAND...
+      | 0xF0 0x91 0xA7 0x9C..0x9F  #Mc   [4] NANDINAGARI VOWEL SIGN O..NANDI...
+      | 0xF0 0x91 0xA7 0xA4     #Mc       NANDINAGARI VOWEL SIGN PRISHTHAMAT...
+      | 0xF0 0x91 0xA8 0xB9     #Mc       ZANABAZAR SQUARE SIGN VISARGA
+      | 0xF0 0x91 0xA9 0x97..0x98  #Mc   [2] SOYOMBO VOWEL SIGN AI..SOYOMBO ...
+      | 0xF0 0x91 0xAA 0x97     #Mc       SOYOMBO SIGN VISARGA
+      | 0xF0 0x91 0xB0 0xAF     #Mc       BHAIKSUKI VOWEL SIGN AA
+      | 0xF0 0x91 0xB0 0xBE     #Mc       BHAIKSUKI SIGN VISARGA
+      | 0xF0 0x91 0xB2 0xA9     #Mc       MARCHEN SUBJOINED LETTER YA
+      | 0xF0 0x91 0xB2 0xB1     #Mc       MARCHEN VOWEL SIGN I
+      | 0xF0 0x91 0xB2 0xB4     #Mc       MARCHEN VOWEL SIGN O
+      | 0xF0 0x91 0xB6 0x8A..0x8E  #Mc   [5] GUNJALA GONDI VOWEL SIGN AA..GU...
+      | 0xF0 0x91 0xB6 0x93..0x94  #Mc   [2] GUNJALA GONDI VOWEL SIGN OO..GU...
+      | 0xF0 0x91 0xB6 0x96     #Mc       GUNJALA GONDI SIGN VISARGA
+      | 0xF0 0x91 0xBB 0xB5..0xB6  #Mc   [2] MAKASAR VOWEL SIGN E..MAKASAR V...
+      | 0xF0 0x91 0xBC 0x83     #Mc       KAWI SIGN VISARGA
+      | 0xF0 0x91 0xBC 0xB4..0xB5  #Mc   [2] KAWI VOWEL SIGN AA..KAWI VOWEL ...
+      | 0xF0 0x91 0xBC 0xBE..0xBF  #Mc   [2] KAWI VOWEL SIGN E..KAWI VOWEL S...
+      | 0xF0 0x91 0xBD 0x81     #Mc       KAWI SIGN KILLER
+      | 0xF0 0x96 0xBD 0x91..0xFF  #Mc  [55] MIAO SIGN ASPIRATION..MIAO VOWE...
+      | 0xF0 0x96 0xBE 0x00..0x87  #
+      | 0xF0 0x96 0xBF 0xB0..0xB1  #Mc   [2] VIETNAMESE ALTERNATE READING MA...
+      | 0xF0 0x9D 0x85 0xA6     #Mc       MUSICAL SYMBOL COMBINING SPRECHGES...
+      | 0xF0 0x9D 0x85 0xAD     #Mc       MUSICAL SYMBOL COMBINING AUGMENTAT...
+      ;
+
+    L = 
+        0xE1 0x84 0x80..0xFF    #Lo  [96] HANGUL CHOSEONG KIYEOK..HANGUL CHO...
+      | 0xE1 0x85 0x00..0x9F    #
+      | 0xEA 0xA5 0xA0..0xBC    #Lo  [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANG...
+      ;
+
+    V = 
+        0xE1 0x85 0xA0..0xFF    #Lo  [72] HANGUL JUNGSEONG FILLER..HANGUL JU...
+      | 0xE1 0x86 0x00..0xA7    #
+      | 0xED 0x9E 0xB0..0xFF    #Lo  [23] HANGUL JUNGSEONG O-YEO..HANGUL JUN...
+      | 0xED 0x9F 0x00..0x86    #
+      ;
+
+    T = 
+        0xE1 0x86 0xA8..0xFF    #Lo  [88] HANGUL JONGSEONG KIYEOK..HANGUL JO...
+      | 0xE1 0x87 0x00..0xBF    #
+      | 0xED 0x9F 0x8B..0xBB    #Lo  [49] HANGUL JONGSEONG NIEUN-RIEUL..HANG...
+      ;
+
+    LV = 
+        0xEA 0xB0 0x80          #Lo       HANGUL SYLLABLE GA
+      | 0xEA 0xB0 0x9C          #Lo       HANGUL SYLLABLE GAE
+      | 0xEA 0xB0 0xB8          #Lo       HANGUL SYLLABLE GYA
+      | 0xEA 0xB1 0x94          #Lo       HANGUL SYLLABLE GYAE
+      | 0xEA 0xB1 0xB0          #Lo       HANGUL SYLLABLE GEO
+      | 0xEA 0xB2 0x8C          #Lo       HANGUL SYLLABLE GE
+      | 0xEA 0xB2 0xA8          #Lo       HANGUL SYLLABLE GYEO
+      | 0xEA 0xB3 0x84          #Lo       HANGUL SYLLABLE GYE
+      | 0xEA 0xB3 0xA0          #Lo       HANGUL SYLLABLE GO
+      | 0xEA 0xB3 0xBC          #Lo       HANGUL SYLLABLE GWA
+      | 0xEA 0xB4 0x98          #Lo       HANGUL SYLLABLE GWAE
+      | 0xEA 0xB4 0xB4          #Lo       HANGUL SYLLABLE GOE
+      | 0xEA 0xB5 0x90          #Lo       HANGUL SYLLABLE GYO
+      | 0xEA 0xB5 0xAC          #Lo       HANGUL SYLLABLE GU
+      | 0xEA 0xB6 0x88          #Lo       HANGUL SYLLABLE GWEO
+      | 0xEA 0xB6 0xA4          #Lo       HANGUL SYLLABLE GWE
+      | 0xEA 0xB7 0x80          #Lo       HANGUL SYLLABLE GWI
+      | 0xEA 0xB7 0x9C          #Lo       HANGUL SYLLABLE GYU
+      | 0xEA 0xB7 0xB8          #Lo       HANGUL SYLLABLE GEU
+      | 0xEA 0xB8 0x94          #Lo       HANGUL SYLLABLE GYI
+      | 0xEA 0xB8 0xB0          #Lo       HANGUL SYLLABLE GI
+      | 0xEA 0xB9 0x8C          #Lo       HANGUL SYLLABLE GGA
+      | 0xEA 0xB9 0xA8          #Lo       HANGUL SYLLABLE GGAE
+      | 0xEA 0xBA 0x84          #Lo       HANGUL SYLLABLE GGYA
+      | 0xEA 0xBA 0xA0          #Lo       HANGUL SYLLABLE GGYAE
+      | 0xEA 0xBA 0xBC          #Lo       HANGUL SYLLABLE GGEO
+      | 0xEA 0xBB 0x98          #Lo       HANGUL SYLLABLE GGE
+      | 0xEA 0xBB 0xB4          #Lo       HANGUL SYLLABLE GGYEO
+      | 0xEA 0xBC 0x90          #Lo       HANGUL SYLLABLE GGYE
+      | 0xEA 0xBC 0xAC          #Lo       HANGUL SYLLABLE GGO
+      | 0xEA 0xBD 0x88          #Lo       HANGUL SYLLABLE GGWA
+      | 0xEA 0xBD 0xA4          #Lo       HANGUL SYLLABLE GGWAE
+      | 0xEA 0xBE 0x80          #Lo       HANGUL SYLLABLE GGOE
+      | 0xEA 0xBE 0x9C          #Lo       HANGUL SYLLABLE GGYO
+      | 0xEA 0xBE 0xB8          #Lo       HANGUL SYLLABLE GGU
+      | 0xEA 0xBF 0x94          #Lo       HANGUL SYLLABLE GGWEO
+      | 0xEA 0xBF 0xB0          #Lo       HANGUL SYLLABLE GGWE
+      | 0xEB 0x80 0x8C          #Lo       HANGUL SYLLABLE GGWI
+      | 0xEB 0x80 0xA8          #Lo       HANGUL SYLLABLE GGYU
+      | 0xEB 0x81 0x84          #Lo       HANGUL SYLLABLE GGEU
+      | 0xEB 0x81 0xA0          #Lo       HANGUL SYLLABLE GGYI
+      | 0xEB 0x81 0xBC          #Lo       HANGUL SYLLABLE GGI
+      | 0xEB 0x82 0x98          #Lo       HANGUL SYLLABLE NA
+      | 0xEB 0x82 0xB4          #Lo       HANGUL SYLLABLE NAE
+      | 0xEB 0x83 0x90          #Lo       HANGUL SYLLABLE NYA
+      | 0xEB 0x83 0xAC          #Lo       HANGUL SYLLABLE NYAE
+      | 0xEB 0x84 0x88          #Lo       HANGUL SYLLABLE NEO
+      | 0xEB 0x84 0xA4          #Lo       HANGUL SYLLABLE NE
+      | 0xEB 0x85 0x80          #Lo       HANGUL SYLLABLE NYEO
+      | 0xEB 0x85 0x9C          #Lo       HANGUL SYLLABLE NYE
+      | 0xEB 0x85 0xB8          #Lo       HANGUL SYLLABLE NO
+      | 0xEB 0x86 0x94          #Lo       HANGUL SYLLABLE NWA
+      | 0xEB 0x86 0xB0          #Lo       HANGUL SYLLABLE NWAE
+      | 0xEB 0x87 0x8C          #Lo       HANGUL SYLLABLE NOE
+      | 0xEB 0x87 0xA8          #Lo       HANGUL SYLLABLE NYO
+      | 0xEB 0x88 0x84          #Lo       HANGUL SYLLABLE NU
+      | 0xEB 0x88 0xA0          #Lo       HANGUL SYLLABLE NWEO
+      | 0xEB 0x88 0xBC          #Lo       HANGUL SYLLABLE NWE
+      | 0xEB 0x89 0x98          #Lo       HANGUL SYLLABLE NWI
+      | 0xEB 0x89 0xB4          #Lo       HANGUL SYLLABLE NYU
+      | 0xEB 0x8A 0x90          #Lo       HANGUL SYLLABLE NEU
+      | 0xEB 0x8A 0xAC          #Lo       HANGUL SYLLABLE NYI
+      | 0xEB 0x8B 0x88          #Lo       HANGUL SYLLABLE NI
+      | 0xEB 0x8B 0xA4          #Lo       HANGUL SYLLABLE DA
+      | 0xEB 0x8C 0x80          #Lo       HANGUL SYLLABLE DAE
+      | 0xEB 0x8C 0x9C          #Lo       HANGUL SYLLABLE DYA
+      | 0xEB 0x8C 0xB8          #Lo       HANGUL SYLLABLE DYAE
+      | 0xEB 0x8D 0x94          #Lo       HANGUL SYLLABLE DEO
+      | 0xEB 0x8D 0xB0          #Lo       HANGUL SYLLABLE DE
+      | 0xEB 0x8E 0x8C          #Lo       HANGUL SYLLABLE DYEO
+      | 0xEB 0x8E 0xA8          #Lo       HANGUL SYLLABLE DYE
+      | 0xEB 0x8F 0x84          #Lo       HANGUL SYLLABLE DO
+      | 0xEB 0x8F 0xA0          #Lo       HANGUL SYLLABLE DWA
+      | 0xEB 0x8F 0xBC          #Lo       HANGUL SYLLABLE DWAE
+      | 0xEB 0x90 0x98          #Lo       HANGUL SYLLABLE DOE
+      | 0xEB 0x90 0xB4          #Lo       HANGUL SYLLABLE DYO
+      | 0xEB 0x91 0x90          #Lo       HANGUL SYLLABLE DU
+      | 0xEB 0x91 0xAC          #Lo       HANGUL SYLLABLE DWEO
+      | 0xEB 0x92 0x88          #Lo       HANGUL SYLLABLE DWE
+      | 0xEB 0x92 0xA4          #Lo       HANGUL SYLLABLE DWI
+      | 0xEB 0x93 0x80          #Lo       HANGUL SYLLABLE DYU
+      | 0xEB 0x93 0x9C          #Lo       HANGUL SYLLABLE DEU
+      | 0xEB 0x93 0xB8          #Lo       HANGUL SYLLABLE DYI
+      | 0xEB 0x94 0x94          #Lo       HANGUL SYLLABLE DI
+      | 0xEB 0x94 0xB0          #Lo       HANGUL SYLLABLE DDA
+      | 0xEB 0x95 0x8C          #Lo       HANGUL SYLLABLE DDAE
+      | 0xEB 0x95 0xA8          #Lo       HANGUL SYLLABLE DDYA
+      | 0xEB 0x96 0x84          #Lo       HANGUL SYLLABLE DDYAE
+      | 0xEB 0x96 0xA0          #Lo       HANGUL SYLLABLE DDEO
+      | 0xEB 0x96 0xBC          #Lo       HANGUL SYLLABLE DDE
+      | 0xEB 0x97 0x98          #Lo       HANGUL SYLLABLE DDYEO
+      | 0xEB 0x97 0xB4          #Lo       HANGUL SYLLABLE DDYE
+      | 0xEB 0x98 0x90          #Lo       HANGUL SYLLABLE DDO
+      | 0xEB 0x98 0xAC          #Lo       HANGUL SYLLABLE DDWA
+      | 0xEB 0x99 0x88          #Lo       HANGUL SYLLABLE DDWAE
+      | 0xEB 0x99 0xA4          #Lo       HANGUL SYLLABLE DDOE
+      | 0xEB 0x9A 0x80          #Lo       HANGUL SYLLABLE DDYO
+      | 0xEB 0x9A 0x9C          #Lo       HANGUL SYLLABLE DDU
+      | 0xEB 0x9A 0xB8          #Lo       HANGUL SYLLABLE DDWEO
+      | 0xEB 0x9B 0x94          #Lo       HANGUL SYLLABLE DDWE
+      | 0xEB 0x9B 0xB0          #Lo       HANGUL SYLLABLE DDWI
+      | 0xEB 0x9C 0x8C          #Lo       HANGUL SYLLABLE DDYU
+      | 0xEB 0x9C 0xA8          #Lo       HANGUL SYLLABLE DDEU
+      | 0xEB 0x9D 0x84          #Lo       HANGUL SYLLABLE DDYI
+      | 0xEB 0x9D 0xA0          #Lo       HANGUL SYLLABLE DDI
+      | 0xEB 0x9D 0xBC          #Lo       HANGUL SYLLABLE RA
+      | 0xEB 0x9E 0x98          #Lo       HANGUL SYLLABLE RAE
+      | 0xEB 0x9E 0xB4          #Lo       HANGUL SYLLABLE RYA
+      | 0xEB 0x9F 0x90          #Lo       HANGUL SYLLABLE RYAE
+      | 0xEB 0x9F 0xAC          #Lo       HANGUL SYLLABLE REO
+      | 0xEB 0xA0 0x88          #Lo       HANGUL SYLLABLE RE
+      | 0xEB 0xA0 0xA4          #Lo       HANGUL SYLLABLE RYEO
+      | 0xEB 0xA1 0x80          #Lo       HANGUL SYLLABLE RYE
+      | 0xEB 0xA1 0x9C          #Lo       HANGUL SYLLABLE RO
+      | 0xEB 0xA1 0xB8          #Lo       HANGUL SYLLABLE RWA
+      | 0xEB 0xA2 0x94          #Lo       HANGUL SYLLABLE RWAE
+      | 0xEB 0xA2 0xB0          #Lo       HANGUL SYLLABLE ROE
+      | 0xEB 0xA3 0x8C          #Lo       HANGUL SYLLABLE RYO
+      | 0xEB 0xA3 0xA8          #Lo       HANGUL SYLLABLE RU
+      | 0xEB 0xA4 0x84          #Lo       HANGUL SYLLABLE RWEO
+      | 0xEB 0xA4 0xA0          #Lo       HANGUL SYLLABLE RWE
+      | 0xEB 0xA4 0xBC          #Lo       HANGUL SYLLABLE RWI
+      | 0xEB 0xA5 0x98          #Lo       HANGUL SYLLABLE RYU
+      | 0xEB 0xA5 0xB4          #Lo       HANGUL SYLLABLE REU
+      | 0xEB 0xA6 0x90          #Lo       HANGUL SYLLABLE RYI
+      | 0xEB 0xA6 0xAC          #Lo       HANGUL SYLLABLE RI
+      | 0xEB 0xA7 0x88          #Lo       HANGUL SYLLABLE MA
+      | 0xEB 0xA7 0xA4          #Lo       HANGUL SYLLABLE MAE
+      | 0xEB 0xA8 0x80          #Lo       HANGUL SYLLABLE MYA
+      | 0xEB 0xA8 0x9C          #Lo       HANGUL SYLLABLE MYAE
+      | 0xEB 0xA8 0xB8          #Lo       HANGUL SYLLABLE MEO
+      | 0xEB 0xA9 0x94          #Lo       HANGUL SYLLABLE ME
+      | 0xEB 0xA9 0xB0          #Lo       HANGUL SYLLABLE MYEO
+      | 0xEB 0xAA 0x8C          #Lo       HANGUL SYLLABLE MYE
+      | 0xEB 0xAA 0xA8          #Lo       HANGUL SYLLABLE MO
+      | 0xEB 0xAB 0x84          #Lo       HANGUL SYLLABLE MWA
+      | 0xEB 0xAB 0xA0          #Lo       HANGUL SYLLABLE MWAE
+      | 0xEB 0xAB 0xBC          #Lo       HANGUL SYLLABLE MOE
+      | 0xEB 0xAC 0x98          #Lo       HANGUL SYLLABLE MYO
+      | 0xEB 0xAC 0xB4          #Lo       HANGUL SYLLABLE MU
+      | 0xEB 0xAD 0x90          #Lo       HANGUL SYLLABLE MWEO
+      | 0xEB 0xAD 0xAC          #Lo       HANGUL SYLLABLE MWE
+      | 0xEB 0xAE 0x88          #Lo       HANGUL SYLLABLE MWI
+      | 0xEB 0xAE 0xA4          #Lo       HANGUL SYLLABLE MYU
+      | 0xEB 0xAF 0x80          #Lo       HANGUL SYLLABLE MEU
+      | 0xEB 0xAF 0x9C          #Lo       HANGUL SYLLABLE MYI
+      | 0xEB 0xAF 0xB8          #Lo       HANGUL SYLLABLE MI
+      | 0xEB 0xB0 0x94          #Lo       HANGUL SYLLABLE BA
+      | 0xEB 0xB0 0xB0          #Lo       HANGUL SYLLABLE BAE
+      | 0xEB 0xB1 0x8C          #Lo       HANGUL SYLLABLE BYA
+      | 0xEB 0xB1 0xA8          #Lo       HANGUL SYLLABLE BYAE
+      | 0xEB 0xB2 0x84          #Lo       HANGUL SYLLABLE BEO
+      | 0xEB 0xB2 0xA0          #Lo       HANGUL SYLLABLE BE
+      | 0xEB 0xB2 0xBC          #Lo       HANGUL SYLLABLE BYEO
+      | 0xEB 0xB3 0x98          #Lo       HANGUL SYLLABLE BYE
+      | 0xEB 0xB3 0xB4          #Lo       HANGUL SYLLABLE BO
+      | 0xEB 0xB4 0x90          #Lo       HANGUL SYLLABLE BWA
+      | 0xEB 0xB4 0xAC          #Lo       HANGUL SYLLABLE BWAE
+      | 0xEB 0xB5 0x88          #Lo       HANGUL SYLLABLE BOE
+      | 0xEB 0xB5 0xA4          #Lo       HANGUL SYLLABLE BYO
+      | 0xEB 0xB6 0x80          #Lo       HANGUL SYLLABLE BU
+      | 0xEB 0xB6 0x9C          #Lo       HANGUL SYLLABLE BWEO
+      | 0xEB 0xB6 0xB8          #Lo       HANGUL SYLLABLE BWE
+      | 0xEB 0xB7 0x94          #Lo       HANGUL SYLLABLE BWI
+      | 0xEB 0xB7 0xB0          #Lo       HANGUL SYLLABLE BYU
+      | 0xEB 0xB8 0x8C          #Lo       HANGUL SYLLABLE BEU
+      | 0xEB 0xB8 0xA8          #Lo       HANGUL SYLLABLE BYI
+      | 0xEB 0xB9 0x84          #Lo       HANGUL SYLLABLE BI
+      | 0xEB 0xB9 0xA0          #Lo       HANGUL SYLLABLE BBA
+      | 0xEB 0xB9 0xBC          #Lo       HANGUL SYLLABLE BBAE
+      | 0xEB 0xBA 0x98          #Lo       HANGUL SYLLABLE BBYA
+      | 0xEB 0xBA 0xB4          #Lo       HANGUL SYLLABLE BBYAE
+      | 0xEB 0xBB 0x90          #Lo       HANGUL SYLLABLE BBEO
+      | 0xEB 0xBB 0xAC          #Lo       HANGUL SYLLABLE BBE
+      | 0xEB 0xBC 0x88          #Lo       HANGUL SYLLABLE BBYEO
+      | 0xEB 0xBC 0xA4          #Lo       HANGUL SYLLABLE BBYE
+      | 0xEB 0xBD 0x80          #Lo       HANGUL SYLLABLE BBO
+      | 0xEB 0xBD 0x9C          #Lo       HANGUL SYLLABLE BBWA
+      | 0xEB 0xBD 0xB8          #Lo       HANGUL SYLLABLE BBWAE
+      | 0xEB 0xBE 0x94          #Lo       HANGUL SYLLABLE BBOE
+      | 0xEB 0xBE 0xB0          #Lo       HANGUL SYLLABLE BBYO
+      | 0xEB 0xBF 0x8C          #Lo       HANGUL SYLLABLE BBU
+      | 0xEB 0xBF 0xA8          #Lo       HANGUL SYLLABLE BBWEO
+      | 0xEC 0x80 0x84          #Lo       HANGUL SYLLABLE BBWE
+      | 0xEC 0x80 0xA0          #Lo       HANGUL SYLLABLE BBWI
+      | 0xEC 0x80 0xBC          #Lo       HANGUL SYLLABLE BBYU
+      | 0xEC 0x81 0x98          #Lo       HANGUL SYLLABLE BBEU
+      | 0xEC 0x81 0xB4          #Lo       HANGUL SYLLABLE BBYI
+      | 0xEC 0x82 0x90          #Lo       HANGUL SYLLABLE BBI
+      | 0xEC 0x82 0xAC          #Lo       HANGUL SYLLABLE SA
+      | 0xEC 0x83 0x88          #Lo       HANGUL SYLLABLE SAE
+      | 0xEC 0x83 0xA4          #Lo       HANGUL SYLLABLE SYA
+      | 0xEC 0x84 0x80          #Lo       HANGUL SYLLABLE SYAE
+      | 0xEC 0x84 0x9C          #Lo       HANGUL SYLLABLE SEO
+      | 0xEC 0x84 0xB8          #Lo       HANGUL SYLLABLE SE
+      | 0xEC 0x85 0x94          #Lo       HANGUL SYLLABLE SYEO
+      | 0xEC 0x85 0xB0          #Lo       HANGUL SYLLABLE SYE
+      | 0xEC 0x86 0x8C          #Lo       HANGUL SYLLABLE SO
+      | 0xEC 0x86 0xA8          #Lo       HANGUL SYLLABLE SWA
+      | 0xEC 0x87 0x84          #Lo       HANGUL SYLLABLE SWAE
+      | 0xEC 0x87 0xA0          #Lo       HANGUL SYLLABLE SOE
+      | 0xEC 0x87 0xBC          #Lo       HANGUL SYLLABLE SYO
+      | 0xEC 0x88 0x98          #Lo       HANGUL SYLLABLE SU
+      | 0xEC 0x88 0xB4          #Lo       HANGUL SYLLABLE SWEO
+      | 0xEC 0x89 0x90          #Lo       HANGUL SYLLABLE SWE
+      | 0xEC 0x89 0xAC          #Lo       HANGUL SYLLABLE SWI
+      | 0xEC 0x8A 0x88          #Lo       HANGUL SYLLABLE SYU
+      | 0xEC 0x8A 0xA4          #Lo       HANGUL SYLLABLE SEU
+      | 0xEC 0x8B 0x80          #Lo       HANGUL SYLLABLE SYI
+      | 0xEC 0x8B 0x9C          #Lo       HANGUL SYLLABLE SI
+      | 0xEC 0x8B 0xB8          #Lo       HANGUL SYLLABLE SSA
+      | 0xEC 0x8C 0x94          #Lo       HANGUL SYLLABLE SSAE
+      | 0xEC 0x8C 0xB0          #Lo       HANGUL SYLLABLE SSYA
+      | 0xEC 0x8D 0x8C          #Lo       HANGUL SYLLABLE SSYAE
+      | 0xEC 0x8D 0xA8          #Lo       HANGUL SYLLABLE SSEO
+      | 0xEC 0x8E 0x84          #Lo       HANGUL SYLLABLE SSE
+      | 0xEC 0x8E 0xA0          #Lo       HANGUL SYLLABLE SSYEO
+      | 0xEC 0x8E 0xBC          #Lo       HANGUL SYLLABLE SSYE
+      | 0xEC 0x8F 0x98          #Lo       HANGUL SYLLABLE SSO
+      | 0xEC 0x8F 0xB4          #Lo       HANGUL SYLLABLE SSWA
+      | 0xEC 0x90 0x90          #Lo       HANGUL SYLLABLE SSWAE
+      | 0xEC 0x90 0xAC          #Lo       HANGUL SYLLABLE SSOE
+      | 0xEC 0x91 0x88          #Lo       HANGUL SYLLABLE SSYO
+      | 0xEC 0x91 0xA4          #Lo       HANGUL SYLLABLE SSU
+      | 0xEC 0x92 0x80          #Lo       HANGUL SYLLABLE SSWEO
+      | 0xEC 0x92 0x9C          #Lo       HANGUL SYLLABLE SSWE
+      | 0xEC 0x92 0xB8          #Lo       HANGUL SYLLABLE SSWI
+      | 0xEC 0x93 0x94          #Lo       HANGUL SYLLABLE SSYU
+      | 0xEC 0x93 0xB0          #Lo       HANGUL SYLLABLE SSEU
+      | 0xEC 0x94 0x8C          #Lo       HANGUL SYLLABLE SSYI
+      | 0xEC 0x94 0xA8          #Lo       HANGUL SYLLABLE SSI
+      | 0xEC 0x95 0x84          #Lo       HANGUL SYLLABLE A
+      | 0xEC 0x95 0xA0          #Lo       HANGUL SYLLABLE AE
+      | 0xEC 0x95 0xBC          #Lo       HANGUL SYLLABLE YA
+      | 0xEC 0x96 0x98          #Lo       HANGUL SYLLABLE YAE
+      | 0xEC 0x96 0xB4          #Lo       HANGUL SYLLABLE EO
+      | 0xEC 0x97 0x90          #Lo       HANGUL SYLLABLE E
+      | 0xEC 0x97 0xAC          #Lo       HANGUL SYLLABLE YEO
+      | 0xEC 0x98 0x88          #Lo       HANGUL SYLLABLE YE
+      | 0xEC 0x98 0xA4          #Lo       HANGUL SYLLABLE O
+      | 0xEC 0x99 0x80          #Lo       HANGUL SYLLABLE WA
+      | 0xEC 0x99 0x9C          #Lo       HANGUL SYLLABLE WAE
+      | 0xEC 0x99 0xB8          #Lo       HANGUL SYLLABLE OE
+      | 0xEC 0x9A 0x94          #Lo       HANGUL SYLLABLE YO
+      | 0xEC 0x9A 0xB0          #Lo       HANGUL SYLLABLE U
+      | 0xEC 0x9B 0x8C          #Lo       HANGUL SYLLABLE WEO
+      | 0xEC 0x9B 0xA8          #Lo       HANGUL SYLLABLE WE
+      | 0xEC 0x9C 0x84          #Lo       HANGUL SYLLABLE WI
+      | 0xEC 0x9C 0xA0          #Lo       HANGUL SYLLABLE YU
+      | 0xEC 0x9C 0xBC          #Lo       HANGUL SYLLABLE EU
+      | 0xEC 0x9D 0x98          #Lo       HANGUL SYLLABLE YI
+      | 0xEC 0x9D 0xB4          #Lo       HANGUL SYLLABLE I
+      | 0xEC 0x9E 0x90          #Lo       HANGUL SYLLABLE JA
+      | 0xEC 0x9E 0xAC          #Lo       HANGUL SYLLABLE JAE
+      | 0xEC 0x9F 0x88          #Lo       HANGUL SYLLABLE JYA
+      | 0xEC 0x9F 0xA4          #Lo       HANGUL SYLLABLE JYAE
+      | 0xEC 0xA0 0x80          #Lo       HANGUL SYLLABLE JEO
+      | 0xEC 0xA0 0x9C          #Lo       HANGUL SYLLABLE JE
+      | 0xEC 0xA0 0xB8          #Lo       HANGUL SYLLABLE JYEO
+      | 0xEC 0xA1 0x94          #Lo       HANGUL SYLLABLE JYE
+      | 0xEC 0xA1 0xB0          #Lo       HANGUL SYLLABLE JO
+      | 0xEC 0xA2 0x8C          #Lo       HANGUL SYLLABLE JWA
+      | 0xEC 0xA2 0xA8          #Lo       HANGUL SYLLABLE JWAE
+      | 0xEC 0xA3 0x84          #Lo       HANGUL SYLLABLE JOE
+      | 0xEC 0xA3 0xA0          #Lo       HANGUL SYLLABLE JYO
+      | 0xEC 0xA3 0xBC          #Lo       HANGUL SYLLABLE JU
+      | 0xEC 0xA4 0x98          #Lo       HANGUL SYLLABLE JWEO
+      | 0xEC 0xA4 0xB4          #Lo       HANGUL SYLLABLE JWE
+      | 0xEC 0xA5 0x90          #Lo       HANGUL SYLLABLE JWI
+      | 0xEC 0xA5 0xAC          #Lo       HANGUL SYLLABLE JYU
+      | 0xEC 0xA6 0x88          #Lo       HANGUL SYLLABLE JEU
+      | 0xEC 0xA6 0xA4          #Lo       HANGUL SYLLABLE JYI
+      | 0xEC 0xA7 0x80          #Lo       HANGUL SYLLABLE JI
+      | 0xEC 0xA7 0x9C          #Lo       HANGUL SYLLABLE JJA
+      | 0xEC 0xA7 0xB8          #Lo       HANGUL SYLLABLE JJAE
+      | 0xEC 0xA8 0x94          #Lo       HANGUL SYLLABLE JJYA
+      | 0xEC 0xA8 0xB0          #Lo       HANGUL SYLLABLE JJYAE
+      | 0xEC 0xA9 0x8C          #Lo       HANGUL SYLLABLE JJEO
+      | 0xEC 0xA9 0xA8          #Lo       HANGUL SYLLABLE JJE
+      | 0xEC 0xAA 0x84          #Lo       HANGUL SYLLABLE JJYEO
+      | 0xEC 0xAA 0xA0          #Lo       HANGUL SYLLABLE JJYE
+      | 0xEC 0xAA 0xBC          #Lo       HANGUL SYLLABLE JJO
+      | 0xEC 0xAB 0x98          #Lo       HANGUL SYLLABLE JJWA
+      | 0xEC 0xAB 0xB4          #Lo       HANGUL SYLLABLE JJWAE
+      | 0xEC 0xAC 0x90          #Lo       HANGUL SYLLABLE JJOE
+      | 0xEC 0xAC 0xAC          #Lo       HANGUL SYLLABLE JJYO
+      | 0xEC 0xAD 0x88          #Lo       HANGUL SYLLABLE JJU
+      | 0xEC 0xAD 0xA4          #Lo       HANGUL SYLLABLE JJWEO
+      | 0xEC 0xAE 0x80          #Lo       HANGUL SYLLABLE JJWE
+      | 0xEC 0xAE 0x9C          #Lo       HANGUL SYLLABLE JJWI
+      | 0xEC 0xAE 0xB8          #Lo       HANGUL SYLLABLE JJYU
+      | 0xEC 0xAF 0x94          #Lo       HANGUL SYLLABLE JJEU
+      | 0xEC 0xAF 0xB0          #Lo       HANGUL SYLLABLE JJYI
+      | 0xEC 0xB0 0x8C          #Lo       HANGUL SYLLABLE JJI
+      | 0xEC 0xB0 0xA8          #Lo       HANGUL SYLLABLE CA
+      | 0xEC 0xB1 0x84          #Lo       HANGUL SYLLABLE CAE
+      | 0xEC 0xB1 0xA0          #Lo       HANGUL SYLLABLE CYA
+      | 0xEC 0xB1 0xBC          #Lo       HANGUL SYLLABLE CYAE
+      | 0xEC 0xB2 0x98          #Lo       HANGUL SYLLABLE CEO
+      | 0xEC 0xB2 0xB4          #Lo       HANGUL SYLLABLE CE
+      | 0xEC 0xB3 0x90          #Lo       HANGUL SYLLABLE CYEO
+      | 0xEC 0xB3 0xAC          #Lo       HANGUL SYLLABLE CYE
+      | 0xEC 0xB4 0x88          #Lo       HANGUL SYLLABLE CO
+      | 0xEC 0xB4 0xA4          #Lo       HANGUL SYLLABLE CWA
+      | 0xEC 0xB5 0x80          #Lo       HANGUL SYLLABLE CWAE
+      | 0xEC 0xB5 0x9C          #Lo       HANGUL SYLLABLE COE
+      | 0xEC 0xB5 0xB8          #Lo       HANGUL SYLLABLE CYO
+      | 0xEC 0xB6 0x94          #Lo       HANGUL SYLLABLE CU
+      | 0xEC 0xB6 0xB0          #Lo       HANGUL SYLLABLE CWEO
+      | 0xEC 0xB7 0x8C          #Lo       HANGUL SYLLABLE CWE
+      | 0xEC 0xB7 0xA8          #Lo       HANGUL SYLLABLE CWI
+      | 0xEC 0xB8 0x84          #Lo       HANGUL SYLLABLE CYU
+      | 0xEC 0xB8 0xA0          #Lo       HANGUL SYLLABLE CEU
+      | 0xEC 0xB8 0xBC          #Lo       HANGUL SYLLABLE CYI
+      | 0xEC 0xB9 0x98          #Lo       HANGUL SYLLABLE CI
+      | 0xEC 0xB9 0xB4          #Lo       HANGUL SYLLABLE KA
+      | 0xEC 0xBA 0x90          #Lo       HANGUL SYLLABLE KAE
+      | 0xEC 0xBA 0xAC          #Lo       HANGUL SYLLABLE KYA
+      | 0xEC 0xBB 0x88          #Lo       HANGUL SYLLABLE KYAE
+      | 0xEC 0xBB 0xA4          #Lo       HANGUL SYLLABLE KEO
+      | 0xEC 0xBC 0x80          #Lo       HANGUL SYLLABLE KE
+      | 0xEC 0xBC 0x9C          #Lo       HANGUL SYLLABLE KYEO
+      | 0xEC 0xBC 0xB8          #Lo       HANGUL SYLLABLE KYE
+      | 0xEC 0xBD 0x94          #Lo       HANGUL SYLLABLE KO
+      | 0xEC 0xBD 0xB0          #Lo       HANGUL SYLLABLE KWA
+      | 0xEC 0xBE 0x8C          #Lo       HANGUL SYLLABLE KWAE
+      | 0xEC 0xBE 0xA8          #Lo       HANGUL SYLLABLE KOE
+      | 0xEC 0xBF 0x84          #Lo       HANGUL SYLLABLE KYO
+      | 0xEC 0xBF 0xA0          #Lo       HANGUL SYLLABLE KU
+      | 0xEC 0xBF 0xBC          #Lo       HANGUL SYLLABLE KWEO
+      | 0xED 0x80 0x98          #Lo       HANGUL SYLLABLE KWE
+      | 0xED 0x80 0xB4          #Lo       HANGUL SYLLABLE KWI
+      | 0xED 0x81 0x90          #Lo       HANGUL SYLLABLE KYU
+      | 0xED 0x81 0xAC          #Lo       HANGUL SYLLABLE KEU
+      | 0xED 0x82 0x88          #Lo       HANGUL SYLLABLE KYI
+      | 0xED 0x82 0xA4          #Lo       HANGUL SYLLABLE KI
+      | 0xED 0x83 0x80          #Lo       HANGUL SYLLABLE TA
+      | 0xED 0x83 0x9C          #Lo       HANGUL SYLLABLE TAE
+      | 0xED 0x83 0xB8          #Lo       HANGUL SYLLABLE TYA
+      | 0xED 0x84 0x94          #Lo       HANGUL SYLLABLE TYAE
+      | 0xED 0x84 0xB0          #Lo       HANGUL SYLLABLE TEO
+      | 0xED 0x85 0x8C          #Lo       HANGUL SYLLABLE TE
+      | 0xED 0x85 0xA8          #Lo       HANGUL SYLLABLE TYEO
+      | 0xED 0x86 0x84          #Lo       HANGUL SYLLABLE TYE
+      | 0xED 0x86 0xA0          #Lo       HANGUL SYLLABLE TO
+      | 0xED 0x86 0xBC          #Lo       HANGUL SYLLABLE TWA
+      | 0xED 0x87 0x98          #Lo       HANGUL SYLLABLE TWAE
+      | 0xED 0x87 0xB4          #Lo       HANGUL SYLLABLE TOE
+      | 0xED 0x88 0x90          #Lo       HANGUL SYLLABLE TYO
+      | 0xED 0x88 0xAC          #Lo       HANGUL SYLLABLE TU
+      | 0xED 0x89 0x88          #Lo       HANGUL SYLLABLE TWEO
+      | 0xED 0x89 0xA4          #Lo       HANGUL SYLLABLE TWE
+      | 0xED 0x8A 0x80          #Lo       HANGUL SYLLABLE TWI
+      | 0xED 0x8A 0x9C          #Lo       HANGUL SYLLABLE TYU
+      | 0xED 0x8A 0xB8          #Lo       HANGUL SYLLABLE TEU
+      | 0xED 0x8B 0x94          #Lo       HANGUL SYLLABLE TYI
+      | 0xED 0x8B 0xB0          #Lo       HANGUL SYLLABLE TI
+      | 0xED 0x8C 0x8C          #Lo       HANGUL SYLLABLE PA
+      | 0xED 0x8C 0xA8          #Lo       HANGUL SYLLABLE PAE
+      | 0xED 0x8D 0x84          #Lo       HANGUL SYLLABLE PYA
+      | 0xED 0x8D 0xA0          #Lo       HANGUL SYLLABLE PYAE
+      | 0xED 0x8D 0xBC          #Lo       HANGUL SYLLABLE PEO
+      | 0xED 0x8E 0x98          #Lo       HANGUL SYLLABLE PE
+      | 0xED 0x8E 0xB4          #Lo       HANGUL SYLLABLE PYEO
+      | 0xED 0x8F 0x90          #Lo       HANGUL SYLLABLE PYE
+      | 0xED 0x8F 0xAC          #Lo       HANGUL SYLLABLE PO
+      | 0xED 0x90 0x88          #Lo       HANGUL SYLLABLE PWA
+      | 0xED 0x90 0xA4          #Lo       HANGUL SYLLABLE PWAE
+      | 0xED 0x91 0x80          #Lo       HANGUL SYLLABLE POE
+      | 0xED 0x91 0x9C          #Lo       HANGUL SYLLABLE PYO
+      | 0xED 0x91 0xB8          #Lo       HANGUL SYLLABLE PU
+      | 0xED 0x92 0x94          #Lo       HANGUL SYLLABLE PWEO
+      | 0xED 0x92 0xB0          #Lo       HANGUL SYLLABLE PWE
+      | 0xED 0x93 0x8C          #Lo       HANGUL SYLLABLE PWI
+      | 0xED 0x93 0xA8          #Lo       HANGUL SYLLABLE PYU
+      | 0xED 0x94 0x84          #Lo       HANGUL SYLLABLE PEU
+      | 0xED 0x94 0xA0          #Lo       HANGUL SYLLABLE PYI
+      | 0xED 0x94 0xBC          #Lo       HANGUL SYLLABLE PI
+      | 0xED 0x95 0x98          #Lo       HANGUL SYLLABLE HA
+      | 0xED 0x95 0xB4          #Lo       HANGUL SYLLABLE HAE
+      | 0xED 0x96 0x90          #Lo       HANGUL SYLLABLE HYA
+      | 0xED 0x96 0xAC          #Lo       HANGUL SYLLABLE HYAE
+      | 0xED 0x97 0x88          #Lo       HANGUL SYLLABLE HEO
+      | 0xED 0x97 0xA4          #Lo       HANGUL SYLLABLE HE
+      | 0xED 0x98 0x80          #Lo       HANGUL SYLLABLE HYEO
+      | 0xED 0x98 0x9C          #Lo       HANGUL SYLLABLE HYE
+      | 0xED 0x98 0xB8          #Lo       HANGUL SYLLABLE HO
+      | 0xED 0x99 0x94          #Lo       HANGUL SYLLABLE HWA
+      | 0xED 0x99 0xB0          #Lo       HANGUL SYLLABLE HWAE
+      | 0xED 0x9A 0x8C          #Lo       HANGUL SYLLABLE HOE
+      | 0xED 0x9A 0xA8          #Lo       HANGUL SYLLABLE HYO
+      | 0xED 0x9B 0x84          #Lo       HANGUL SYLLABLE HU
+      | 0xED 0x9B 0xA0          #Lo       HANGUL SYLLABLE HWEO
+      | 0xED 0x9B 0xBC          #Lo       HANGUL SYLLABLE HWE
+      | 0xED 0x9C 0x98          #Lo       HANGUL SYLLABLE HWI
+      | 0xED 0x9C 0xB4          #Lo       HANGUL SYLLABLE HYU
+      | 0xED 0x9D 0x90          #Lo       HANGUL SYLLABLE HEU
+      | 0xED 0x9D 0xAC          #Lo       HANGUL SYLLABLE HYI
+      | 0xED 0x9E 0x88          #Lo       HANGUL SYLLABLE HI
+      ;
+
+    LVT = 
+        0xEA 0xB0 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE GAG..HANGUL SYLLAB...
+      | 0xEA 0xB0 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE GAEG..HANGUL SYLLA...
+      | 0xEA 0xB0 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE GYAG..HANGUL SYLLA...
+      | 0xEA 0xB1 0x00..0x93    #
+      | 0xEA 0xB1 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE GYAEG..HANGUL SYLL...
+      | 0xEA 0xB1 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE GEOG..HANGUL SYLLA...
+      | 0xEA 0xB2 0x00..0x8B    #
+      | 0xEA 0xB2 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE GEG..HANGUL SYLLAB...
+      | 0xEA 0xB2 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE GYEOG..HANGUL SYLL...
+      | 0xEA 0xB3 0x00..0x83    #
+      | 0xEA 0xB3 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE GYEG..HANGUL SYLLA...
+      | 0xEA 0xB3 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE GOG..HANGUL SYLLAB...
+      | 0xEA 0xB3 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE GWAG..HANGUL SYLLA...
+      | 0xEA 0xB4 0x00..0x97    #
+      | 0xEA 0xB4 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE GWAEG..HANGUL SYLL...
+      | 0xEA 0xB4 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE GOEG..HANGUL SYLLA...
+      | 0xEA 0xB5 0x00..0x8F    #
+      | 0xEA 0xB5 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE GYOG..HANGUL SYLLA...
+      | 0xEA 0xB5 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE GUG..HANGUL SYLLAB...
+      | 0xEA 0xB6 0x00..0x87    #
+      | 0xEA 0xB6 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE GWEOG..HANGUL SYLL...
+      | 0xEA 0xB6 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE GWEG..HANGUL SYLLA...
+      | 0xEA 0xB7 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE GWIG..HANGUL SYLLA...
+      | 0xEA 0xB7 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE GYUG..HANGUL SYLLA...
+      | 0xEA 0xB7 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE GEUG..HANGUL SYLLA...
+      | 0xEA 0xB8 0x00..0x93    #
+      | 0xEA 0xB8 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE GYIG..HANGUL SYLLA...
+      | 0xEA 0xB8 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE GIG..HANGUL SYLLAB...
+      | 0xEA 0xB9 0x00..0x8B    #
+      | 0xEA 0xB9 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE GGAG..HANGUL SYLLA...
+      | 0xEA 0xB9 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE GGAEG..HANGUL SYLL...
+      | 0xEA 0xBA 0x00..0x83    #
+      | 0xEA 0xBA 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE GGYAG..HANGUL SYLL...
+      | 0xEA 0xBA 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE GGYAEG..HANGUL SYL...
+      | 0xEA 0xBA 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE GGEOG..HANGUL SYLL...
+      | 0xEA 0xBB 0x00..0x97    #
+      | 0xEA 0xBB 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE GGEG..HANGUL SYLLA...
+      | 0xEA 0xBB 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE GGYEOG..HANGUL SYL...
+      | 0xEA 0xBC 0x00..0x8F    #
+      | 0xEA 0xBC 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE GGYEG..HANGUL SYLL...
+      | 0xEA 0xBC 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE GGOG..HANGUL SYLLA...
+      | 0xEA 0xBD 0x00..0x87    #
+      | 0xEA 0xBD 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE GGWAG..HANGUL SYLL...
+      | 0xEA 0xBD 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE GGWAEG..HANGUL SYL...
+      | 0xEA 0xBE 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE GGOEG..HANGUL SYLL...
+      | 0xEA 0xBE 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE GGYOG..HANGUL SYLL...
+      | 0xEA 0xBE 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE GGUG..HANGUL SYLLA...
+      | 0xEA 0xBF 0x00..0x93    #
+      | 0xEA 0xBF 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE GGWEOG..HANGUL SYL...
+      | 0xEA 0xBF 0xB1..0xFF        #Lo  [27] HANGUL SYLLABLE GGWEG..HANGUL ...
+      | 0xEA 0xC0..0xFF 0x00..0xFF  #
+      | 0xEB 0x00 0x00..0xFF        #
+      | 0xEB 0x01..0x7F 0x00..0xFF  #
+      | 0xEB 0x80 0x00..0x8B        #
+      | 0xEB 0x80 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE GGWIG..HANGUL SYLL...
+      | 0xEB 0x80 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE GGYUG..HANGUL SYLL...
+      | 0xEB 0x81 0x00..0x83    #
+      | 0xEB 0x81 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE GGEUG..HANGUL SYLL...
+      | 0xEB 0x81 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE GGYIG..HANGUL SYLL...
+      | 0xEB 0x81 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE GGIG..HANGUL SYLLA...
+      | 0xEB 0x82 0x00..0x97    #
+      | 0xEB 0x82 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE NAG..HANGUL SYLLAB...
+      | 0xEB 0x82 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE NAEG..HANGUL SYLLA...
+      | 0xEB 0x83 0x00..0x8F    #
+      | 0xEB 0x83 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE NYAG..HANGUL SYLLA...
+      | 0xEB 0x83 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE NYAEG..HANGUL SYLL...
+      | 0xEB 0x84 0x00..0x87    #
+      | 0xEB 0x84 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE NEOG..HANGUL SYLLA...
+      | 0xEB 0x84 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE NEG..HANGUL SYLLAB...
+      | 0xEB 0x85 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE NYEOG..HANGUL SYLL...
+      | 0xEB 0x85 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE NYEG..HANGUL SYLLA...
+      | 0xEB 0x85 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE NOG..HANGUL SYLLAB...
+      | 0xEB 0x86 0x00..0x93    #
+      | 0xEB 0x86 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE NWAG..HANGUL SYLLA...
+      | 0xEB 0x86 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE NWAEG..HANGUL SYLL...
+      | 0xEB 0x87 0x00..0x8B    #
+      | 0xEB 0x87 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE NOEG..HANGUL SYLLA...
+      | 0xEB 0x87 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE NYOG..HANGUL SYLLA...
+      | 0xEB 0x88 0x00..0x83    #
+      | 0xEB 0x88 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE NUG..HANGUL SYLLAB...
+      | 0xEB 0x88 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE NWEOG..HANGUL SYLL...
+      | 0xEB 0x88 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE NWEG..HANGUL SYLLA...
+      | 0xEB 0x89 0x00..0x97    #
+      | 0xEB 0x89 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE NWIG..HANGUL SYLLA...
+      | 0xEB 0x89 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE NYUG..HANGUL SYLLA...
+      | 0xEB 0x8A 0x00..0x8F    #
+      | 0xEB 0x8A 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE NEUG..HANGUL SYLLA...
+      | 0xEB 0x8A 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE NYIG..HANGUL SYLLA...
+      | 0xEB 0x8B 0x00..0x87    #
+      | 0xEB 0x8B 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE NIG..HANGUL SYLLAB...
+      | 0xEB 0x8B 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE DAG..HANGUL SYLLAB...
+      | 0xEB 0x8C 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE DAEG..HANGUL SYLLA...
+      | 0xEB 0x8C 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE DYAG..HANGUL SYLLA...
+      | 0xEB 0x8C 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE DYAEG..HANGUL SYLL...
+      | 0xEB 0x8D 0x00..0x93    #
+      | 0xEB 0x8D 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE DEOG..HANGUL SYLLA...
+      | 0xEB 0x8D 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE DEG..HANGUL SYLLAB...
+      | 0xEB 0x8E 0x00..0x8B    #
+      | 0xEB 0x8E 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE DYEOG..HANGUL SYLL...
+      | 0xEB 0x8E 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE DYEG..HANGUL SYLLA...
+      | 0xEB 0x8F 0x00..0x83    #
+      | 0xEB 0x8F 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE DOG..HANGUL SYLLAB...
+      | 0xEB 0x8F 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE DWAG..HANGUL SYLLA...
+      | 0xEB 0x8F 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE DWAEG..HANGUL SYLL...
+      | 0xEB 0x90 0x00..0x97    #
+      | 0xEB 0x90 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE DOEG..HANGUL SYLLA...
+      | 0xEB 0x90 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE DYOG..HANGUL SYLLA...
+      | 0xEB 0x91 0x00..0x8F    #
+      | 0xEB 0x91 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE DUG..HANGUL SYLLAB...
+      | 0xEB 0x91 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE DWEOG..HANGUL SYLL...
+      | 0xEB 0x92 0x00..0x87    #
+      | 0xEB 0x92 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE DWEG..HANGUL SYLLA...
+      | 0xEB 0x92 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE DWIG..HANGUL SYLLA...
+      | 0xEB 0x93 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE DYUG..HANGUL SYLLA...
+      | 0xEB 0x93 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE DEUG..HANGUL SYLLA...
+      | 0xEB 0x93 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE DYIG..HANGUL SYLLA...
+      | 0xEB 0x94 0x00..0x93    #
+      | 0xEB 0x94 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE DIG..HANGUL SYLLAB...
+      | 0xEB 0x94 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE DDAG..HANGUL SYLLA...
+      | 0xEB 0x95 0x00..0x8B    #
+      | 0xEB 0x95 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE DDAEG..HANGUL SYLL...
+      | 0xEB 0x95 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE DDYAG..HANGUL SYLL...
+      | 0xEB 0x96 0x00..0x83    #
+      | 0xEB 0x96 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE DDYAEG..HANGUL SYL...
+      | 0xEB 0x96 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE DDEOG..HANGUL SYLL...
+      | 0xEB 0x96 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE DDEG..HANGUL SYLLA...
+      | 0xEB 0x97 0x00..0x97    #
+      | 0xEB 0x97 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE DDYEOG..HANGUL SYL...
+      | 0xEB 0x97 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE DDYEG..HANGUL SYLL...
+      | 0xEB 0x98 0x00..0x8F    #
+      | 0xEB 0x98 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE DDOG..HANGUL SYLLA...
+      | 0xEB 0x98 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE DDWAG..HANGUL SYLL...
+      | 0xEB 0x99 0x00..0x87    #
+      | 0xEB 0x99 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE DDWAEG..HANGUL SYL...
+      | 0xEB 0x99 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE DDOEG..HANGUL SYLL...
+      | 0xEB 0x9A 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE DDYOG..HANGUL SYLL...
+      | 0xEB 0x9A 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE DDUG..HANGUL SYLLA...
+      | 0xEB 0x9A 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE DDWEOG..HANGUL SYL...
+      | 0xEB 0x9B 0x00..0x93    #
+      | 0xEB 0x9B 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE DDWEG..HANGUL SYLL...
+      | 0xEB 0x9B 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE DDWIG..HANGUL SYLL...
+      | 0xEB 0x9C 0x00..0x8B    #
+      | 0xEB 0x9C 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE DDYUG..HANGUL SYLL...
+      | 0xEB 0x9C 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE DDEUG..HANGUL SYLL...
+      | 0xEB 0x9D 0x00..0x83    #
+      | 0xEB 0x9D 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE DDYIG..HANGUL SYLL...
+      | 0xEB 0x9D 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE DDIG..HANGUL SYLLA...
+      | 0xEB 0x9D 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE RAG..HANGUL SYLLAB...
+      | 0xEB 0x9E 0x00..0x97    #
+      | 0xEB 0x9E 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE RAEG..HANGUL SYLLA...
+      | 0xEB 0x9E 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE RYAG..HANGUL SYLLA...
+      | 0xEB 0x9F 0x00..0x8F    #
+      | 0xEB 0x9F 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE RYAEG..HANGUL SYLL...
+      | 0xEB 0x9F 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE REOG..HANGUL SYLLA...
+      | 0xEB 0xA0 0x00..0x87    #
+      | 0xEB 0xA0 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE REG..HANGUL SYLLAB...
+      | 0xEB 0xA0 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE RYEOG..HANGUL SYLL...
+      | 0xEB 0xA1 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE RYEG..HANGUL SYLLA...
+      | 0xEB 0xA1 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE ROG..HANGUL SYLLAB...
+      | 0xEB 0xA1 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE RWAG..HANGUL SYLLA...
+      | 0xEB 0xA2 0x00..0x93    #
+      | 0xEB 0xA2 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE RWAEG..HANGUL SYLL...
+      | 0xEB 0xA2 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE ROEG..HANGUL SYLLA...
+      | 0xEB 0xA3 0x00..0x8B    #
+      | 0xEB 0xA3 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE RYOG..HANGUL SYLLA...
+      | 0xEB 0xA3 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE RUG..HANGUL SYLLAB...
+      | 0xEB 0xA4 0x00..0x83    #
+      | 0xEB 0xA4 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE RWEOG..HANGUL SYLL...
+      | 0xEB 0xA4 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE RWEG..HANGUL SYLLA...
+      | 0xEB 0xA4 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE RWIG..HANGUL SYLLA...
+      | 0xEB 0xA5 0x00..0x97    #
+      | 0xEB 0xA5 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE RYUG..HANGUL SYLLA...
+      | 0xEB 0xA5 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE REUG..HANGUL SYLLA...
+      | 0xEB 0xA6 0x00..0x8F    #
+      | 0xEB 0xA6 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE RYIG..HANGUL SYLLA...
+      | 0xEB 0xA6 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE RIG..HANGUL SYLLAB...
+      | 0xEB 0xA7 0x00..0x87    #
+      | 0xEB 0xA7 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE MAG..HANGUL SYLLAB...
+      | 0xEB 0xA7 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE MAEG..HANGUL SYLLA...
+      | 0xEB 0xA8 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE MYAG..HANGUL SYLLA...
+      | 0xEB 0xA8 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE MYAEG..HANGUL SYLL...
+      | 0xEB 0xA8 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE MEOG..HANGUL SYLLA...
+      | 0xEB 0xA9 0x00..0x93    #
+      | 0xEB 0xA9 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE MEG..HANGUL SYLLAB...
+      | 0xEB 0xA9 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE MYEOG..HANGUL SYLL...
+      | 0xEB 0xAA 0x00..0x8B    #
+      | 0xEB 0xAA 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE MYEG..HANGUL SYLLA...
+      | 0xEB 0xAA 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE MOG..HANGUL SYLLAB...
+      | 0xEB 0xAB 0x00..0x83    #
+      | 0xEB 0xAB 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE MWAG..HANGUL SYLLA...
+      | 0xEB 0xAB 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE MWAEG..HANGUL SYLL...
+      | 0xEB 0xAB 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE MOEG..HANGUL SYLLA...
+      | 0xEB 0xAC 0x00..0x97    #
+      | 0xEB 0xAC 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE MYOG..HANGUL SYLLA...
+      | 0xEB 0xAC 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE MUG..HANGUL SYLLAB...
+      | 0xEB 0xAD 0x00..0x8F    #
+      | 0xEB 0xAD 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE MWEOG..HANGUL SYLL...
+      | 0xEB 0xAD 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE MWEG..HANGUL SYLLA...
+      | 0xEB 0xAE 0x00..0x87    #
+      | 0xEB 0xAE 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE MWIG..HANGUL SYLLA...
+      | 0xEB 0xAE 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE MYUG..HANGUL SYLLA...
+      | 0xEB 0xAF 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE MEUG..HANGUL SYLLA...
+      | 0xEB 0xAF 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE MYIG..HANGUL SYLLA...
+      | 0xEB 0xAF 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE MIG..HANGUL SYLLAB...
+      | 0xEB 0xB0 0x00..0x93    #
+      | 0xEB 0xB0 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE BAG..HANGUL SYLLAB...
+      | 0xEB 0xB0 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE BAEG..HANGUL SYLLA...
+      | 0xEB 0xB1 0x00..0x8B    #
+      | 0xEB 0xB1 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE BYAG..HANGUL SYLLA...
+      | 0xEB 0xB1 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE BYAEG..HANGUL SYLL...
+      | 0xEB 0xB2 0x00..0x83    #
+      | 0xEB 0xB2 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE BEOG..HANGUL SYLLA...
+      | 0xEB 0xB2 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE BEG..HANGUL SYLLAB...
+      | 0xEB 0xB2 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE BYEOG..HANGUL SYLL...
+      | 0xEB 0xB3 0x00..0x97    #
+      | 0xEB 0xB3 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE BYEG..HANGUL SYLLA...
+      | 0xEB 0xB3 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE BOG..HANGUL SYLLAB...
+      | 0xEB 0xB4 0x00..0x8F    #
+      | 0xEB 0xB4 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE BWAG..HANGUL SYLLA...
+      | 0xEB 0xB4 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE BWAEG..HANGUL SYLL...
+      | 0xEB 0xB5 0x00..0x87    #
+      | 0xEB 0xB5 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE BOEG..HANGUL SYLLA...
+      | 0xEB 0xB5 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE BYOG..HANGUL SYLLA...
+      | 0xEB 0xB6 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE BUG..HANGUL SYLLAB...
+      | 0xEB 0xB6 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE BWEOG..HANGUL SYLL...
+      | 0xEB 0xB6 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE BWEG..HANGUL SYLLA...
+      | 0xEB 0xB7 0x00..0x93    #
+      | 0xEB 0xB7 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE BWIG..HANGUL SYLLA...
+      | 0xEB 0xB7 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE BYUG..HANGUL SYLLA...
+      | 0xEB 0xB8 0x00..0x8B    #
+      | 0xEB 0xB8 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE BEUG..HANGUL SYLLA...
+      | 0xEB 0xB8 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE BYIG..HANGUL SYLLA...
+      | 0xEB 0xB9 0x00..0x83    #
+      | 0xEB 0xB9 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE BIG..HANGUL SYLLAB...
+      | 0xEB 0xB9 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE BBAG..HANGUL SYLLA...
+      | 0xEB 0xB9 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE BBAEG..HANGUL SYLL...
+      | 0xEB 0xBA 0x00..0x97    #
+      | 0xEB 0xBA 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE BBYAG..HANGUL SYLL...
+      | 0xEB 0xBA 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE BBYAEG..HANGUL SYL...
+      | 0xEB 0xBB 0x00..0x8F    #
+      | 0xEB 0xBB 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE BBEOG..HANGUL SYLL...
+      | 0xEB 0xBB 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE BBEG..HANGUL SYLLA...
+      | 0xEB 0xBC 0x00..0x87    #
+      | 0xEB 0xBC 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE BBYEOG..HANGUL SYL...
+      | 0xEB 0xBC 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE BBYEG..HANGUL SYLL...
+      | 0xEB 0xBD 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE BBOG..HANGUL SYLLA...
+      | 0xEB 0xBD 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE BBWAG..HANGUL SYLL...
+      | 0xEB 0xBD 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE BBWAEG..HANGUL SYL...
+      | 0xEB 0xBE 0x00..0x93    #
+      | 0xEB 0xBE 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE BBOEG..HANGUL SYLL...
+      | 0xEB 0xBE 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE BBYOG..HANGUL SYLL...
+      | 0xEB 0xBF 0x00..0x8B    #
+      | 0xEB 0xBF 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE BBUG..HANGUL SYLLA...
+      | 0xEB 0xBF 0xA9..0xFF        #Lo  [27] HANGUL SYLLABLE BBWEOG..HANGUL...
+      | 0xEB 0xC0..0xFF 0x00..0xFF  #
+      | 0xEC 0x00 0x00..0xFF        #
+      | 0xEC 0x01..0x7F 0x00..0xFF  #
+      | 0xEC 0x80 0x00..0x83        #
+      | 0xEC 0x80 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE BBWEG..HANGUL SYLL...
+      | 0xEC 0x80 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE BBWIG..HANGUL SYLL...
+      | 0xEC 0x80 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE BBYUG..HANGUL SYLL...
+      | 0xEC 0x81 0x00..0x97    #
+      | 0xEC 0x81 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE BBEUG..HANGUL SYLL...
+      | 0xEC 0x81 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE BBYIG..HANGUL SYLL...
+      | 0xEC 0x82 0x00..0x8F    #
+      | 0xEC 0x82 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE BBIG..HANGUL SYLLA...
+      | 0xEC 0x82 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE SAG..HANGUL SYLLAB...
+      | 0xEC 0x83 0x00..0x87    #
+      | 0xEC 0x83 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE SAEG..HANGUL SYLLA...
+      | 0xEC 0x83 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE SYAG..HANGUL SYLLA...
+      | 0xEC 0x84 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE SYAEG..HANGUL SYLL...
+      | 0xEC 0x84 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE SEOG..HANGUL SYLLA...
+      | 0xEC 0x84 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE SEG..HANGUL SYLLAB...
+      | 0xEC 0x85 0x00..0x93    #
+      | 0xEC 0x85 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE SYEOG..HANGUL SYLL...
+      | 0xEC 0x85 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE SYEG..HANGUL SYLLA...
+      | 0xEC 0x86 0x00..0x8B    #
+      | 0xEC 0x86 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE SOG..HANGUL SYLLAB...
+      | 0xEC 0x86 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE SWAG..HANGUL SYLLA...
+      | 0xEC 0x87 0x00..0x83    #
+      | 0xEC 0x87 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE SWAEG..HANGUL SYLL...
+      | 0xEC 0x87 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE SOEG..HANGUL SYLLA...
+      | 0xEC 0x87 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE SYOG..HANGUL SYLLA...
+      | 0xEC 0x88 0x00..0x97    #
+      | 0xEC 0x88 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE SUG..HANGUL SYLLAB...
+      | 0xEC 0x88 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE SWEOG..HANGUL SYLL...
+      | 0xEC 0x89 0x00..0x8F    #
+      | 0xEC 0x89 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE SWEG..HANGUL SYLLA...
+      | 0xEC 0x89 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE SWIG..HANGUL SYLLA...
+      | 0xEC 0x8A 0x00..0x87    #
+      | 0xEC 0x8A 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE SYUG..HANGUL SYLLA...
+      | 0xEC 0x8A 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE SEUG..HANGUL SYLLA...
+      | 0xEC 0x8B 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE SYIG..HANGUL SYLLA...
+      | 0xEC 0x8B 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE SIG..HANGUL SYLLAB...
+      | 0xEC 0x8B 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE SSAG..HANGUL SYLLA...
+      | 0xEC 0x8C 0x00..0x93    #
+      | 0xEC 0x8C 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE SSAEG..HANGUL SYLL...
+      | 0xEC 0x8C 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE SSYAG..HANGUL SYLL...
+      | 0xEC 0x8D 0x00..0x8B    #
+      | 0xEC 0x8D 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE SSYAEG..HANGUL SYL...
+      | 0xEC 0x8D 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE SSEOG..HANGUL SYLL...
+      | 0xEC 0x8E 0x00..0x83    #
+      | 0xEC 0x8E 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE SSEG..HANGUL SYLLA...
+      | 0xEC 0x8E 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE SSYEOG..HANGUL SYL...
+      | 0xEC 0x8E 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE SSYEG..HANGUL SYLL...
+      | 0xEC 0x8F 0x00..0x97    #
+      | 0xEC 0x8F 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE SSOG..HANGUL SYLLA...
+      | 0xEC 0x8F 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE SSWAG..HANGUL SYLL...
+      | 0xEC 0x90 0x00..0x8F    #
+      | 0xEC 0x90 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE SSWAEG..HANGUL SYL...
+      | 0xEC 0x90 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE SSOEG..HANGUL SYLL...
+      | 0xEC 0x91 0x00..0x87    #
+      | 0xEC 0x91 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE SSYOG..HANGUL SYLL...
+      | 0xEC 0x91 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE SSUG..HANGUL SYLLA...
+      | 0xEC 0x92 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE SSWEOG..HANGUL SYL...
+      | 0xEC 0x92 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE SSWEG..HANGUL SYLL...
+      | 0xEC 0x92 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE SSWIG..HANGUL SYLL...
+      | 0xEC 0x93 0x00..0x93    #
+      | 0xEC 0x93 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE SSYUG..HANGUL SYLL...
+      | 0xEC 0x93 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE SSEUG..HANGUL SYLL...
+      | 0xEC 0x94 0x00..0x8B    #
+      | 0xEC 0x94 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE SSYIG..HANGUL SYLL...
+      | 0xEC 0x94 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE SSIG..HANGUL SYLLA...
+      | 0xEC 0x95 0x00..0x83    #
+      | 0xEC 0x95 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE AG..HANGUL SYLLABL...
+      | 0xEC 0x95 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE AEG..HANGUL SYLLAB...
+      | 0xEC 0x95 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE YAG..HANGUL SYLLAB...
+      | 0xEC 0x96 0x00..0x97    #
+      | 0xEC 0x96 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE YAEG..HANGUL SYLLA...
+      | 0xEC 0x96 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE EOG..HANGUL SYLLAB...
+      | 0xEC 0x97 0x00..0x8F    #
+      | 0xEC 0x97 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE EG..HANGUL SYLLABL...
+      | 0xEC 0x97 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE YEOG..HANGUL SYLLA...
+      | 0xEC 0x98 0x00..0x87    #
+      | 0xEC 0x98 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE YEG..HANGUL SYLLAB...
+      | 0xEC 0x98 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE OG..HANGUL SYLLABL...
+      | 0xEC 0x99 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE WAG..HANGUL SYLLAB...
+      | 0xEC 0x99 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE WAEG..HANGUL SYLLA...
+      | 0xEC 0x99 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE OEG..HANGUL SYLLAB...
+      | 0xEC 0x9A 0x00..0x93    #
+      | 0xEC 0x9A 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE YOG..HANGUL SYLLAB...
+      | 0xEC 0x9A 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE UG..HANGUL SYLLABL...
+      | 0xEC 0x9B 0x00..0x8B    #
+      | 0xEC 0x9B 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE WEOG..HANGUL SYLLA...
+      | 0xEC 0x9B 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE WEG..HANGUL SYLLAB...
+      | 0xEC 0x9C 0x00..0x83    #
+      | 0xEC 0x9C 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE WIG..HANGUL SYLLAB...
+      | 0xEC 0x9C 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE YUG..HANGUL SYLLAB...
+      | 0xEC 0x9C 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE EUG..HANGUL SYLLAB...
+      | 0xEC 0x9D 0x00..0x97    #
+      | 0xEC 0x9D 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE YIG..HANGUL SYLLAB...
+      | 0xEC 0x9D 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE IG..HANGUL SYLLABL...
+      | 0xEC 0x9E 0x00..0x8F    #
+      | 0xEC 0x9E 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE JAG..HANGUL SYLLAB...
+      | 0xEC 0x9E 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE JAEG..HANGUL SYLLA...
+      | 0xEC 0x9F 0x00..0x87    #
+      | 0xEC 0x9F 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE JYAG..HANGUL SYLLA...
+      | 0xEC 0x9F 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE JYAEG..HANGUL SYLL...
+      | 0xEC 0xA0 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE JEOG..HANGUL SYLLA...
+      | 0xEC 0xA0 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE JEG..HANGUL SYLLAB...
+      | 0xEC 0xA0 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE JYEOG..HANGUL SYLL...
+      | 0xEC 0xA1 0x00..0x93    #
+      | 0xEC 0xA1 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE JYEG..HANGUL SYLLA...
+      | 0xEC 0xA1 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE JOG..HANGUL SYLLAB...
+      | 0xEC 0xA2 0x00..0x8B    #
+      | 0xEC 0xA2 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE JWAG..HANGUL SYLLA...
+      | 0xEC 0xA2 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE JWAEG..HANGUL SYLL...
+      | 0xEC 0xA3 0x00..0x83    #
+      | 0xEC 0xA3 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE JOEG..HANGUL SYLLA...
+      | 0xEC 0xA3 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE JYOG..HANGUL SYLLA...
+      | 0xEC 0xA3 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE JUG..HANGUL SYLLAB...
+      | 0xEC 0xA4 0x00..0x97    #
+      | 0xEC 0xA4 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE JWEOG..HANGUL SYLL...
+      | 0xEC 0xA4 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE JWEG..HANGUL SYLLA...
+      | 0xEC 0xA5 0x00..0x8F    #
+      | 0xEC 0xA5 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE JWIG..HANGUL SYLLA...
+      | 0xEC 0xA5 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE JYUG..HANGUL SYLLA...
+      | 0xEC 0xA6 0x00..0x87    #
+      | 0xEC 0xA6 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE JEUG..HANGUL SYLLA...
+      | 0xEC 0xA6 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE JYIG..HANGUL SYLLA...
+      | 0xEC 0xA7 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE JIG..HANGUL SYLLAB...
+      | 0xEC 0xA7 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE JJAG..HANGUL SYLLA...
+      | 0xEC 0xA7 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE JJAEG..HANGUL SYLL...
+      | 0xEC 0xA8 0x00..0x93    #
+      | 0xEC 0xA8 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE JJYAG..HANGUL SYLL...
+      | 0xEC 0xA8 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE JJYAEG..HANGUL SYL...
+      | 0xEC 0xA9 0x00..0x8B    #
+      | 0xEC 0xA9 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE JJEOG..HANGUL SYLL...
+      | 0xEC 0xA9 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE JJEG..HANGUL SYLLA...
+      | 0xEC 0xAA 0x00..0x83    #
+      | 0xEC 0xAA 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE JJYEOG..HANGUL SYL...
+      | 0xEC 0xAA 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE JJYEG..HANGUL SYLL...
+      | 0xEC 0xAA 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE JJOG..HANGUL SYLLA...
+      | 0xEC 0xAB 0x00..0x97    #
+      | 0xEC 0xAB 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE JJWAG..HANGUL SYLL...
+      | 0xEC 0xAB 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE JJWAEG..HANGUL SYL...
+      | 0xEC 0xAC 0x00..0x8F    #
+      | 0xEC 0xAC 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE JJOEG..HANGUL SYLL...
+      | 0xEC 0xAC 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE JJYOG..HANGUL SYLL...
+      | 0xEC 0xAD 0x00..0x87    #
+      | 0xEC 0xAD 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE JJUG..HANGUL SYLLA...
+      | 0xEC 0xAD 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE JJWEOG..HANGUL SYL...
+      | 0xEC 0xAE 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE JJWEG..HANGUL SYLL...
+      | 0xEC 0xAE 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE JJWIG..HANGUL SYLL...
+      | 0xEC 0xAE 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE JJYUG..HANGUL SYLL...
+      | 0xEC 0xAF 0x00..0x93    #
+      | 0xEC 0xAF 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE JJEUG..HANGUL SYLL...
+      | 0xEC 0xAF 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE JJYIG..HANGUL SYLL...
+      | 0xEC 0xB0 0x00..0x8B    #
+      | 0xEC 0xB0 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE JJIG..HANGUL SYLLA...
+      | 0xEC 0xB0 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE CAG..HANGUL SYLLAB...
+      | 0xEC 0xB1 0x00..0x83    #
+      | 0xEC 0xB1 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE CAEG..HANGUL SYLLA...
+      | 0xEC 0xB1 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE CYAG..HANGUL SYLLA...
+      | 0xEC 0xB1 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE CYAEG..HANGUL SYLL...
+      | 0xEC 0xB2 0x00..0x97    #
+      | 0xEC 0xB2 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE CEOG..HANGUL SYLLA...
+      | 0xEC 0xB2 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE CEG..HANGUL SYLLAB...
+      | 0xEC 0xB3 0x00..0x8F    #
+      | 0xEC 0xB3 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE CYEOG..HANGUL SYLL...
+      | 0xEC 0xB3 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE CYEG..HANGUL SYLLA...
+      | 0xEC 0xB4 0x00..0x87    #
+      | 0xEC 0xB4 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE COG..HANGUL SYLLAB...
+      | 0xEC 0xB4 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE CWAG..HANGUL SYLLA...
+      | 0xEC 0xB5 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE CWAEG..HANGUL SYLL...
+      | 0xEC 0xB5 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE COEG..HANGUL SYLLA...
+      | 0xEC 0xB5 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE CYOG..HANGUL SYLLA...
+      | 0xEC 0xB6 0x00..0x93    #
+      | 0xEC 0xB6 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE CUG..HANGUL SYLLAB...
+      | 0xEC 0xB6 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE CWEOG..HANGUL SYLL...
+      | 0xEC 0xB7 0x00..0x8B    #
+      | 0xEC 0xB7 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE CWEG..HANGUL SYLLA...
+      | 0xEC 0xB7 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE CWIG..HANGUL SYLLA...
+      | 0xEC 0xB8 0x00..0x83    #
+      | 0xEC 0xB8 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE CYUG..HANGUL SYLLA...
+      | 0xEC 0xB8 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE CEUG..HANGUL SYLLA...
+      | 0xEC 0xB8 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE CYIG..HANGUL SYLLA...
+      | 0xEC 0xB9 0x00..0x97    #
+      | 0xEC 0xB9 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE CIG..HANGUL SYLLAB...
+      | 0xEC 0xB9 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE KAG..HANGUL SYLLAB...
+      | 0xEC 0xBA 0x00..0x8F    #
+      | 0xEC 0xBA 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE KAEG..HANGUL SYLLA...
+      | 0xEC 0xBA 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE KYAG..HANGUL SYLLA...
+      | 0xEC 0xBB 0x00..0x87    #
+      | 0xEC 0xBB 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE KYAEG..HANGUL SYLL...
+      | 0xEC 0xBB 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE KEOG..HANGUL SYLLA...
+      | 0xEC 0xBC 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE KEG..HANGUL SYLLAB...
+      | 0xEC 0xBC 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE KYEOG..HANGUL SYLL...
+      | 0xEC 0xBC 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE KYEG..HANGUL SYLLA...
+      | 0xEC 0xBD 0x00..0x93    #
+      | 0xEC 0xBD 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE KOG..HANGUL SYLLAB...
+      | 0xEC 0xBD 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE KWAG..HANGUL SYLLA...
+      | 0xEC 0xBE 0x00..0x8B    #
+      | 0xEC 0xBE 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE KWAEG..HANGUL SYLL...
+      | 0xEC 0xBE 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE KOEG..HANGUL SYLLA...
+      | 0xEC 0xBF 0x00..0x83    #
+      | 0xEC 0xBF 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE KYOG..HANGUL SYLLA...
+      | 0xEC 0xBF 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE KUG..HANGUL SYLLAB...
+      | 0xEC 0xBF 0xBD..0xFF        #Lo  [27] HANGUL SYLLABLE KWEOG..HANGUL ...
+      | 0xEC 0xC0..0xFF 0x00..0xFF  #
+      | 0xED 0x00 0x00..0xFF        #
+      | 0xED 0x01..0x7F 0x00..0xFF  #
+      | 0xED 0x80 0x00..0x97        #
+      | 0xED 0x80 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE KWEG..HANGUL SYLLA...
+      | 0xED 0x80 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE KWIG..HANGUL SYLLA...
+      | 0xED 0x81 0x00..0x8F    #
+      | 0xED 0x81 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE KYUG..HANGUL SYLLA...
+      | 0xED 0x81 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE KEUG..HANGUL SYLLA...
+      | 0xED 0x82 0x00..0x87    #
+      | 0xED 0x82 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE KYIG..HANGUL SYLLA...
+      | 0xED 0x82 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE KIG..HANGUL SYLLAB...
+      | 0xED 0x83 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE TAG..HANGUL SYLLAB...
+      | 0xED 0x83 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE TAEG..HANGUL SYLLA...
+      | 0xED 0x83 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE TYAG..HANGUL SYLLA...
+      | 0xED 0x84 0x00..0x93    #
+      | 0xED 0x84 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE TYAEG..HANGUL SYLL...
+      | 0xED 0x84 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE TEOG..HANGUL SYLLA...
+      | 0xED 0x85 0x00..0x8B    #
+      | 0xED 0x85 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE TEG..HANGUL SYLLAB...
+      | 0xED 0x85 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE TYEOG..HANGUL SYLL...
+      | 0xED 0x86 0x00..0x83    #
+      | 0xED 0x86 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE TYEG..HANGUL SYLLA...
+      | 0xED 0x86 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE TOG..HANGUL SYLLAB...
+      | 0xED 0x86 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE TWAG..HANGUL SYLLA...
+      | 0xED 0x87 0x00..0x97    #
+      | 0xED 0x87 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE TWAEG..HANGUL SYLL...
+      | 0xED 0x87 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE TOEG..HANGUL SYLLA...
+      | 0xED 0x88 0x00..0x8F    #
+      | 0xED 0x88 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE TYOG..HANGUL SYLLA...
+      | 0xED 0x88 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE TUG..HANGUL SYLLAB...
+      | 0xED 0x89 0x00..0x87    #
+      | 0xED 0x89 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE TWEOG..HANGUL SYLL...
+      | 0xED 0x89 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE TWEG..HANGUL SYLLA...
+      | 0xED 0x8A 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE TWIG..HANGUL SYLLA...
+      | 0xED 0x8A 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE TYUG..HANGUL SYLLA...
+      | 0xED 0x8A 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE TEUG..HANGUL SYLLA...
+      | 0xED 0x8B 0x00..0x93    #
+      | 0xED 0x8B 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE TYIG..HANGUL SYLLA...
+      | 0xED 0x8B 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE TIG..HANGUL SYLLAB...
+      | 0xED 0x8C 0x00..0x8B    #
+      | 0xED 0x8C 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE PAG..HANGUL SYLLAB...
+      | 0xED 0x8C 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE PAEG..HANGUL SYLLA...
+      | 0xED 0x8D 0x00..0x83    #
+      | 0xED 0x8D 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE PYAG..HANGUL SYLLA...
+      | 0xED 0x8D 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE PYAEG..HANGUL SYLL...
+      | 0xED 0x8D 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE PEOG..HANGUL SYLLA...
+      | 0xED 0x8E 0x00..0x97    #
+      | 0xED 0x8E 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE PEG..HANGUL SYLLAB...
+      | 0xED 0x8E 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE PYEOG..HANGUL SYLL...
+      | 0xED 0x8F 0x00..0x8F    #
+      | 0xED 0x8F 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE PYEG..HANGUL SYLLA...
+      | 0xED 0x8F 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE POG..HANGUL SYLLAB...
+      | 0xED 0x90 0x00..0x87    #
+      | 0xED 0x90 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE PWAG..HANGUL SYLLA...
+      | 0xED 0x90 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE PWAEG..HANGUL SYLL...
+      | 0xED 0x91 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE POEG..HANGUL SYLLA...
+      | 0xED 0x91 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE PYOG..HANGUL SYLLA...
+      | 0xED 0x91 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE PUG..HANGUL SYLLAB...
+      | 0xED 0x92 0x00..0x93    #
+      | 0xED 0x92 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE PWEOG..HANGUL SYLL...
+      | 0xED 0x92 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE PWEG..HANGUL SYLLA...
+      | 0xED 0x93 0x00..0x8B    #
+      | 0xED 0x93 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE PWIG..HANGUL SYLLA...
+      | 0xED 0x93 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE PYUG..HANGUL SYLLA...
+      | 0xED 0x94 0x00..0x83    #
+      | 0xED 0x94 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE PEUG..HANGUL SYLLA...
+      | 0xED 0x94 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE PYIG..HANGUL SYLLA...
+      | 0xED 0x94 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE PIG..HANGUL SYLLAB...
+      | 0xED 0x95 0x00..0x97    #
+      | 0xED 0x95 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE HAG..HANGUL SYLLAB...
+      | 0xED 0x95 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE HAEG..HANGUL SYLLA...
+      | 0xED 0x96 0x00..0x8F    #
+      | 0xED 0x96 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE HYAG..HANGUL SYLLA...
+      | 0xED 0x96 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE HYAEG..HANGUL SYLL...
+      | 0xED 0x97 0x00..0x87    #
+      | 0xED 0x97 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE HEOG..HANGUL SYLLA...
+      | 0xED 0x97 0xA5..0xBF    #Lo  [27] HANGUL SYLLABLE HEG..HANGUL SYLLAB...
+      | 0xED 0x98 0x81..0x9B    #Lo  [27] HANGUL SYLLABLE HYEOG..HANGUL SYLL...
+      | 0xED 0x98 0x9D..0xB7    #Lo  [27] HANGUL SYLLABLE HYEG..HANGUL SYLLA...
+      | 0xED 0x98 0xB9..0xFF    #Lo  [27] HANGUL SYLLABLE HOG..HANGUL SYLLAB...
+      | 0xED 0x99 0x00..0x93    #
+      | 0xED 0x99 0x95..0xAF    #Lo  [27] HANGUL SYLLABLE HWAG..HANGUL SYLLA...
+      | 0xED 0x99 0xB1..0xFF    #Lo  [27] HANGUL SYLLABLE HWAEG..HANGUL SYLL...
+      | 0xED 0x9A 0x00..0x8B    #
+      | 0xED 0x9A 0x8D..0xA7    #Lo  [27] HANGUL SYLLABLE HOEG..HANGUL SYLLA...
+      | 0xED 0x9A 0xA9..0xFF    #Lo  [27] HANGUL SYLLABLE HYOG..HANGUL SYLLA...
+      | 0xED 0x9B 0x00..0x83    #
+      | 0xED 0x9B 0x85..0x9F    #Lo  [27] HANGUL SYLLABLE HUG..HANGUL SYLLAB...
+      | 0xED 0x9B 0xA1..0xBB    #Lo  [27] HANGUL SYLLABLE HWEOG..HANGUL SYLL...
+      | 0xED 0x9B 0xBD..0xFF    #Lo  [27] HANGUL SYLLABLE HWEG..HANGUL SYLLA...
+      | 0xED 0x9C 0x00..0x97    #
+      | 0xED 0x9C 0x99..0xB3    #Lo  [27] HANGUL SYLLABLE HWIG..HANGUL SYLLA...
+      | 0xED 0x9C 0xB5..0xFF    #Lo  [27] HANGUL SYLLABLE HYUG..HANGUL SYLLA...
+      | 0xED 0x9D 0x00..0x8F    #
+      | 0xED 0x9D 0x91..0xAB    #Lo  [27] HANGUL SYLLABLE HEUG..HANGUL SYLLA...
+      | 0xED 0x9D 0xAD..0xFF    #Lo  [27] HANGUL SYLLABLE HYIG..HANGUL SYLLA...
+      | 0xED 0x9E 0x00..0x87    #
+      | 0xED 0x9E 0x89..0xA3    #Lo  [27] HANGUL SYLLABLE HIG..HANGUL SYLLAB...
+      ;
+
+    ZWJ = 
+        0xE2 0x80 0x8D          #Cf       ZERO WIDTH JOINER
+      ;
+
+}%%
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/tables.go 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/tables.go
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/tables.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/tables.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6120 @@
+// Generated by running
+//      maketables --url=http://www.unicode.org/Public/15.0.0/ucd/auxiliary/
+// DO NOT EDIT
+
+package textseg
+
+import (
+	"unicode"
+)
+
+var _GraphemeCR = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _GraphemeControl = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x0, Hi: 0x9, Stride: 0x1},
+		unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1},
+		unicode.Range16{Lo: 0xe, Hi: 0x1f, Stride: 0x1},
+		unicode.Range16{Lo: 0x7f, Hi: 0x9f, Stride: 0x1},
+		unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1},
+		unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1},
+		unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1},
+		unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1},
+		unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1},
+		unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1},
+		unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1},
+		unicode.Range16{Lo: 0x2065, Hi: 0x2065, Stride: 0x1},
+		unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1},
+		unicode.Range16{Lo: 0xfff0, Hi: 0xfff8, Stride: 0x1},
+		unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x13430, Hi: 0x1343f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0000, Hi: 0xe0000, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0002, Hi: 0xe001f, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0080, Hi: 0xe00ff, Stride: 0x1},
+		unicode.Range32{Lo: 0xe01f0, Hi: 0xe0fff, Stride: 0x1},
+	},
+	LatinOffset: 5,
+}
+
+var _GraphemeExtend = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1},
+		unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1},
+		unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1},
+		unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1},
+		unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1},
+		unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1},
+		unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1},
+		unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1},
+		unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1},
+		unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1},
+		unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1},
+		unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1},
+		unicode.Range16{Lo: 0x7fd, Hi: 0x7fd, Stride: 0x1},
+		unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1},
+		unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1},
+		unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1},
+		unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1},
+		unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1},
+		unicode.Range16{Lo: 0x898, Hi: 0x89f, Stride: 0x1},
+		unicode.Range16{Lo: 0x8ca, Hi: 0x8e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1},
+		unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1},
+		unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1},
+		unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1},
+		unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1},
+		unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1},
+		unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1},
+		unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1},
+		unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1},
+		unicode.Range16{Lo: 0x9be, Hi: 0x9be, Stride: 0x1},
+		unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1},
+		unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1},
+		unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1},
+		unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1},
+		unicode.Range16{Lo: 0x9fe, Hi: 0x9fe, Stride: 0x1},
+		unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1},
+		unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1},
+		unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1},
+		unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1},
+		unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1},
+		unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1},
+		unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1},
+		unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1},
+		unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1},
+		unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1},
+		unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1},
+		unicode.Range16{Lo: 0xafa, Hi: 0xaff, Stride: 0x1},
+		unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xb55, Hi: 0xb56, Stride: 0x1},
+		unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1},
+		unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1},
+		unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbe, Hi: 0xbbe, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1},
+		unicode.Range16{Lo: 0xc04, Hi: 0xc04, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3c, Hi: 0xc3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1},
+		unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1},
+		unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1},
+		unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc2, Hi: 0xcc2, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1},
+		unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1},
+		unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd00, Hi: 0xd01, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3b, Hi: 0xd3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3e, Hi: 0xd3e, Stride: 0x1},
+		unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1},
+		unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1},
+		unicode.Range16{Lo: 0xd81, Hi: 0xd81, Stride: 0x1},
+		unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1},
+		unicode.Range16{Lo: 0xdcf, Hi: 0xdcf, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1},
+		unicode.Range16{Lo: 0xddf, Hi: 0xddf, Stride: 0x1},
+		unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1},
+		unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1},
+		unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1},
+		unicode.Range16{Lo: 0xeb4, Hi: 0xebc, Stride: 0x1},
+		unicode.Range16{Lo: 0xec8, Hi: 0xece, Stride: 0x1},
+		unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1},
+		unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1},
+		unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1},
+		unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1},
+		unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1},
+		unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1},
+		unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1},
+		unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1},
+		unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1},
+		unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1},
+		unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1},
+		unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1},
+		unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1},
+		unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1},
+		unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1},
+		unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1},
+		unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1},
+		unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1},
+		unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1},
+		unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1},
+		unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1},
+		unicode.Range16{Lo: 0x1732, Hi: 0x1733, Stride: 0x1},
+		unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1},
+		unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1},
+		unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1},
+		unicode.Range16{Lo: 0x180f, Hi: 0x180f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1},
+		unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1},
+		unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1},
+		unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1},
+		unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1},
+		unicode.Range16{Lo: 0x1abf, Hi: 0x1ace, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1dc0, Hi: 0x1dff, Stride: 0x1},
+		unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1},
+		unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1},
+		unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1},
+		unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1},
+		unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1},
+		unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1},
+		unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1},
+		unicode.Range16{Lo: 0xa82c, Hi: 0xa82c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8ff, Hi: 0xa8ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1},
+		unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bd, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1},
+		unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1},
+		unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1},
+		unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1},
+		unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1},
+		unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1},
+		unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1},
+		unicode.Range32{Lo: 0x10d24, Hi: 0x10d27, Stride: 0x1},
+		unicode.Range32{Lo: 0x10eab, Hi: 0x10eac, Stride: 0x1},
+		unicode.Range32{Lo: 0x10efd, Hi: 0x10eff, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f46, Hi: 0x10f50, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f82, Hi: 0x10f85, Stride: 0x1},
+		unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1},
+		unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1},
+		unicode.Range32{Lo: 0x11070, Hi: 0x11070, Stride: 0x1},
+		unicode.Range32{Lo: 0x11073, Hi: 0x11074, Stride: 0x1},
+		unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1},
+		unicode.Range32{Lo: 0x110c2, Hi: 0x110c2, Stride: 0x1},
+		unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1},
+		unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1},
+		unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1},
+		unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1},
+		unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1},
+		unicode.Range32{Lo: 0x111c9, Hi: 0x111cc, Stride: 0x1},
+		unicode.Range32{Lo: 0x111cf, Hi: 0x111cf, Stride: 0x1},
+		unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1},
+		unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1},
+		unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1},
+		unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11241, Hi: 0x11241, Stride: 0x1},
+		unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1},
+		unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1},
+		unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133b, Hi: 0x1133c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133e, Hi: 0x1133e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1},
+		unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1},
+		unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1},
+		unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1},
+		unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1},
+		unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1},
+		unicode.Range32{Lo: 0x1145e, Hi: 0x1145e, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b0, Hi: 0x114b0, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1},
+		unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1},
+		unicode.Range32{Lo: 0x114bd, Hi: 0x114bd, Stride: 0x1},
+		unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x115af, Hi: 0x115af, Stride: 0x1},
+		unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1},
+		unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1},
+		unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1},
+		unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1},
+		unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1182f, Hi: 0x11837, Stride: 0x1},
+		unicode.Range32{Lo: 0x11839, Hi: 0x1183a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11930, Hi: 0x11930, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193b, Hi: 0x1193c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193e, Hi: 0x1193e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11943, Hi: 0x11943, Stride: 0x1},
+		unicode.Range32{Lo: 0x119d4, Hi: 0x119d7, Stride: 0x1},
+		unicode.Range32{Lo: 0x119da, Hi: 0x119db, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e0, Hi: 0x119e0, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a01, Hi: 0x11a0a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a33, Hi: 0x11a38, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a3b, Hi: 0x11a3e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a47, Hi: 0x11a47, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a51, Hi: 0x11a56, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a59, Hi: 0x11a5b, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a8a, Hi: 0x11a96, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a98, Hi: 0x11a99, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1},
+		unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d31, Hi: 0x11d36, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3a, Hi: 0x11d3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3c, Hi: 0x11d3d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3f, Hi: 0x11d45, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d47, Hi: 0x11d47, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d90, Hi: 0x11d91, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d95, Hi: 0x11d95, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d97, Hi: 0x11d97, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ef3, Hi: 0x11ef4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f00, Hi: 0x11f01, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f36, Hi: 0x11f3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f40, Hi: 0x11f40, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f42, Hi: 0x11f42, Stride: 0x1},
+		unicode.Range32{Lo: 0x13440, Hi: 0x13440, Stride: 0x1},
+		unicode.Range32{Lo: 0x13447, Hi: 0x13455, Stride: 0x1},
+		unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f4f, Hi: 0x16f4f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1},
+		unicode.Range32{Lo: 0x16fe4, Hi: 0x16fe4, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1cf00, Hi: 0x1cf2d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1cf30, Hi: 0x1cf46, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d165, Hi: 0x1d165, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d16e, Hi: 0x1d172, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e08f, Hi: 0x1e08f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e130, Hi: 0x1e136, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2ae, Hi: 0x1e2ae, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2ec, Hi: 0x1e2ef, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4ec, Hi: 0x1e4ef, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemeL = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x1100, Hi: 0x115f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemeLF = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _GraphemeLV = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xac00, Hi: 0xac00, Stride: 0x1},
+		unicode.Range16{Lo: 0xac1c, Hi: 0xac1c, Stride: 0x1},
+		unicode.Range16{Lo: 0xac38, Hi: 0xac38, Stride: 0x1},
+		unicode.Range16{Lo: 0xac54, Hi: 0xac54, Stride: 0x1},
+		unicode.Range16{Lo: 0xac70, Hi: 0xac70, Stride: 0x1},
+		unicode.Range16{Lo: 0xac8c, Hi: 0xac8c, Stride: 0x1},
+		unicode.Range16{Lo: 0xaca8, Hi: 0xaca8, Stride: 0x1},
+		unicode.Range16{Lo: 0xacc4, Hi: 0xacc4, Stride: 0x1},
+		unicode.Range16{Lo: 0xace0, Hi: 0xace0, Stride: 0x1},
+		unicode.Range16{Lo: 0xacfc, Hi: 0xacfc, Stride: 0x1},
+		unicode.Range16{Lo: 0xad18, Hi: 0xad18, Stride: 0x1},
+		unicode.Range16{Lo: 0xad34, Hi: 0xad34, Stride: 0x1},
+		unicode.Range16{Lo: 0xad50, Hi: 0xad50, Stride: 0x1},
+		unicode.Range16{Lo: 0xad6c, Hi: 0xad6c, Stride: 0x1},
+		unicode.Range16{Lo: 0xad88, Hi: 0xad88, Stride: 0x1},
+		unicode.Range16{Lo: 0xada4, Hi: 0xada4, Stride: 0x1},
+		unicode.Range16{Lo: 0xadc0, Hi: 0xadc0, Stride: 0x1},
+		unicode.Range16{Lo: 0xaddc, Hi: 0xaddc, Stride: 0x1},
+		unicode.Range16{Lo: 0xadf8, Hi: 0xadf8, Stride: 0x1},
+		unicode.Range16{Lo: 0xae14, Hi: 0xae14, Stride: 0x1},
+		unicode.Range16{Lo: 0xae30, Hi: 0xae30, Stride: 0x1},
+		unicode.Range16{Lo: 0xae4c, Hi: 0xae4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xae68, Hi: 0xae68, Stride: 0x1},
+		unicode.Range16{Lo: 0xae84, Hi: 0xae84, Stride: 0x1},
+		unicode.Range16{Lo: 0xaea0, Hi: 0xaea0, Stride: 0x1},
+		unicode.Range16{Lo: 0xaebc, Hi: 0xaebc, Stride: 0x1},
+		unicode.Range16{Lo: 0xaed8, Hi: 0xaed8, Stride: 0x1},
+		unicode.Range16{Lo: 0xaef4, Hi: 0xaef4, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf10, Hi: 0xaf10, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf2c, Hi: 0xaf2c, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf48, Hi: 0xaf48, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf64, Hi: 0xaf64, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf80, Hi: 0xaf80, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf9c, Hi: 0xaf9c, Stride: 0x1},
+		unicode.Range16{Lo: 0xafb8, Hi: 0xafb8, Stride: 0x1},
+		unicode.Range16{Lo: 0xafd4, Hi: 0xafd4, Stride: 0x1},
+		unicode.Range16{Lo: 0xaff0, Hi: 0xaff0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb00c, Hi: 0xb00c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb028, Hi: 0xb028, Stride: 0x1},
+		unicode.Range16{Lo: 0xb044, Hi: 0xb044, Stride: 0x1},
+		unicode.Range16{Lo: 0xb060, Hi: 0xb060, Stride: 0x1},
+		unicode.Range16{Lo: 0xb07c, Hi: 0xb07c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb098, Hi: 0xb098, Stride: 0x1},
+		unicode.Range16{Lo: 0xb0b4, Hi: 0xb0b4, Stride: 0x1},
+		unicode.Range16{Lo: 0xb0d0, Hi: 0xb0d0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb0ec, Hi: 0xb0ec, Stride: 0x1},
+		unicode.Range16{Lo: 0xb108, Hi: 0xb108, Stride: 0x1},
+		unicode.Range16{Lo: 0xb124, Hi: 0xb124, Stride: 0x1},
+		unicode.Range16{Lo: 0xb140, Hi: 0xb140, Stride: 0x1},
+		unicode.Range16{Lo: 0xb15c, Hi: 0xb15c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb178, Hi: 0xb178, Stride: 0x1},
+		unicode.Range16{Lo: 0xb194, Hi: 0xb194, Stride: 0x1},
+		unicode.Range16{Lo: 0xb1b0, Hi: 0xb1b0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb1cc, Hi: 0xb1cc, Stride: 0x1},
+		unicode.Range16{Lo: 0xb1e8, Hi: 0xb1e8, Stride: 0x1},
+		unicode.Range16{Lo: 0xb204, Hi: 0xb204, Stride: 0x1},
+		unicode.Range16{Lo: 0xb220, Hi: 0xb220, Stride: 0x1},
+		unicode.Range16{Lo: 0xb23c, Hi: 0xb23c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb258, Hi: 0xb258, Stride: 0x1},
+		unicode.Range16{Lo: 0xb274, Hi: 0xb274, Stride: 0x1},
+		unicode.Range16{Lo: 0xb290, Hi: 0xb290, Stride: 0x1},
+		unicode.Range16{Lo: 0xb2ac, Hi: 0xb2ac, Stride: 0x1},
+		unicode.Range16{Lo: 0xb2c8, Hi: 0xb2c8, Stride: 0x1},
+		unicode.Range16{Lo: 0xb2e4, Hi: 0xb2e4, Stride: 0x1},
+		unicode.Range16{Lo: 0xb300, Hi: 0xb300, Stride: 0x1},
+		unicode.Range16{Lo: 0xb31c, Hi: 0xb31c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb338, Hi: 0xb338, Stride: 0x1},
+		unicode.Range16{Lo: 0xb354, Hi: 0xb354, Stride: 0x1},
+		unicode.Range16{Lo: 0xb370, Hi: 0xb370, Stride: 0x1},
+		unicode.Range16{Lo: 0xb38c, Hi: 0xb38c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3a8, Hi: 0xb3a8, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3c4, Hi: 0xb3c4, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3e0, Hi: 0xb3e0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3fc, Hi: 0xb3fc, Stride: 0x1},
+		unicode.Range16{Lo: 0xb418, Hi: 0xb418, Stride: 0x1},
+		unicode.Range16{Lo: 0xb434, Hi: 0xb434, Stride: 0x1},
+		unicode.Range16{Lo: 0xb450, Hi: 0xb450, Stride: 0x1},
+		unicode.Range16{Lo: 0xb46c, Hi: 0xb46c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb488, Hi: 0xb488, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4a4, Hi: 0xb4a4, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4c0, Hi: 0xb4c0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4dc, Hi: 0xb4dc, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4f8, Hi: 0xb4f8, Stride: 0x1},
+		unicode.Range16{Lo: 0xb514, Hi: 0xb514, Stride: 0x1},
+		unicode.Range16{Lo: 0xb530, Hi: 0xb530, Stride: 0x1},
+		unicode.Range16{Lo: 0xb54c, Hi: 0xb54c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb568, Hi: 0xb568, Stride: 0x1},
+		unicode.Range16{Lo: 0xb584, Hi: 0xb584, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5a0, Hi: 0xb5a0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5bc, Hi: 0xb5bc, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5d8, Hi: 0xb5d8, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5f4, Hi: 0xb5f4, Stride: 0x1},
+		unicode.Range16{Lo: 0xb610, Hi: 0xb610, Stride: 0x1},
+		unicode.Range16{Lo: 0xb62c, Hi: 0xb62c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb648, Hi: 0xb648, Stride: 0x1},
+		unicode.Range16{Lo: 0xb664, Hi: 0xb664, Stride: 0x1},
+		unicode.Range16{Lo: 0xb680, Hi: 0xb680, Stride: 0x1},
+		unicode.Range16{Lo: 0xb69c, Hi: 0xb69c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb6b8, Hi: 0xb6b8, Stride: 0x1},
+		unicode.Range16{Lo: 0xb6d4, Hi: 0xb6d4, Stride: 0x1},
+		unicode.Range16{Lo: 0xb6f0, Hi: 0xb6f0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb70c, Hi: 0xb70c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb728, Hi: 0xb728, Stride: 0x1},
+		unicode.Range16{Lo: 0xb744, Hi: 0xb744, Stride: 0x1},
+		unicode.Range16{Lo: 0xb760, Hi: 0xb760, Stride: 0x1},
+		unicode.Range16{Lo: 0xb77c, Hi: 0xb77c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb798, Hi: 0xb798, Stride: 0x1},
+		unicode.Range16{Lo: 0xb7b4, Hi: 0xb7b4, Stride: 0x1},
+		unicode.Range16{Lo: 0xb7d0, Hi: 0xb7d0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb7ec, Hi: 0xb7ec, Stride: 0x1},
+		unicode.Range16{Lo: 0xb808, Hi: 0xb808, Stride: 0x1},
+		unicode.Range16{Lo: 0xb824, Hi: 0xb824, Stride: 0x1},
+		unicode.Range16{Lo: 0xb840, Hi: 0xb840, Stride: 0x1},
+		unicode.Range16{Lo: 0xb85c, Hi: 0xb85c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb878, Hi: 0xb878, Stride: 0x1},
+		unicode.Range16{Lo: 0xb894, Hi: 0xb894, Stride: 0x1},
+		unicode.Range16{Lo: 0xb8b0, Hi: 0xb8b0, Stride: 0x1},
+		unicode.Range16{Lo: 0xb8cc, Hi: 0xb8cc, Stride: 0x1},
+		unicode.Range16{Lo: 0xb8e8, Hi: 0xb8e8, Stride: 0x1},
+		unicode.Range16{Lo: 0xb904, Hi: 0xb904, Stride: 0x1},
+		unicode.Range16{Lo: 0xb920, Hi: 0xb920, Stride: 0x1},
+		unicode.Range16{Lo: 0xb93c, Hi: 0xb93c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb958, Hi: 0xb958, Stride: 0x1},
+		unicode.Range16{Lo: 0xb974, Hi: 0xb974, Stride: 0x1},
+		unicode.Range16{Lo: 0xb990, Hi: 0xb990, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9ac, Hi: 0xb9ac, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9c8, Hi: 0xb9c8, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9e4, Hi: 0xb9e4, Stride: 0x1},
+		unicode.Range16{Lo: 0xba00, Hi: 0xba00, Stride: 0x1},
+		unicode.Range16{Lo: 0xba1c, Hi: 0xba1c, Stride: 0x1},
+		unicode.Range16{Lo: 0xba38, Hi: 0xba38, Stride: 0x1},
+		unicode.Range16{Lo: 0xba54, Hi: 0xba54, Stride: 0x1},
+		unicode.Range16{Lo: 0xba70, Hi: 0xba70, Stride: 0x1},
+		unicode.Range16{Lo: 0xba8c, Hi: 0xba8c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbaa8, Hi: 0xbaa8, Stride: 0x1},
+		unicode.Range16{Lo: 0xbac4, Hi: 0xbac4, Stride: 0x1},
+		unicode.Range16{Lo: 0xbae0, Hi: 0xbae0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbafc, Hi: 0xbafc, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb18, Hi: 0xbb18, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb34, Hi: 0xbb34, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb50, Hi: 0xbb50, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb6c, Hi: 0xbb6c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb88, Hi: 0xbb88, Stride: 0x1},
+		unicode.Range16{Lo: 0xbba4, Hi: 0xbba4, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbc0, Hi: 0xbbc0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbdc, Hi: 0xbbdc, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbf8, Hi: 0xbbf8, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc14, Hi: 0xbc14, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc30, Hi: 0xbc30, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc4c, Hi: 0xbc4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc68, Hi: 0xbc68, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc84, Hi: 0xbc84, Stride: 0x1},
+		unicode.Range16{Lo: 0xbca0, Hi: 0xbca0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcbc, Hi: 0xbcbc, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcd8, Hi: 0xbcd8, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcf4, Hi: 0xbcf4, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd10, Hi: 0xbd10, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd2c, Hi: 0xbd2c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd48, Hi: 0xbd48, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd64, Hi: 0xbd64, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd80, Hi: 0xbd80, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd9c, Hi: 0xbd9c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbdb8, Hi: 0xbdb8, Stride: 0x1},
+		unicode.Range16{Lo: 0xbdd4, Hi: 0xbdd4, Stride: 0x1},
+		unicode.Range16{Lo: 0xbdf0, Hi: 0xbdf0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe0c, Hi: 0xbe0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe28, Hi: 0xbe28, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe44, Hi: 0xbe44, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe60, Hi: 0xbe60, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe7c, Hi: 0xbe7c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe98, Hi: 0xbe98, Stride: 0x1},
+		unicode.Range16{Lo: 0xbeb4, Hi: 0xbeb4, Stride: 0x1},
+		unicode.Range16{Lo: 0xbed0, Hi: 0xbed0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbeec, Hi: 0xbeec, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf08, Hi: 0xbf08, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf24, Hi: 0xbf24, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf40, Hi: 0xbf40, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf5c, Hi: 0xbf5c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf78, Hi: 0xbf78, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf94, Hi: 0xbf94, Stride: 0x1},
+		unicode.Range16{Lo: 0xbfb0, Hi: 0xbfb0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbfcc, Hi: 0xbfcc, Stride: 0x1},
+		unicode.Range16{Lo: 0xbfe8, Hi: 0xbfe8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc004, Hi: 0xc004, Stride: 0x1},
+		unicode.Range16{Lo: 0xc020, Hi: 0xc020, Stride: 0x1},
+		unicode.Range16{Lo: 0xc03c, Hi: 0xc03c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc058, Hi: 0xc058, Stride: 0x1},
+		unicode.Range16{Lo: 0xc074, Hi: 0xc074, Stride: 0x1},
+		unicode.Range16{Lo: 0xc090, Hi: 0xc090, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0ac, Hi: 0xc0ac, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0c8, Hi: 0xc0c8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0e4, Hi: 0xc0e4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc100, Hi: 0xc100, Stride: 0x1},
+		unicode.Range16{Lo: 0xc11c, Hi: 0xc11c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc138, Hi: 0xc138, Stride: 0x1},
+		unicode.Range16{Lo: 0xc154, Hi: 0xc154, Stride: 0x1},
+		unicode.Range16{Lo: 0xc170, Hi: 0xc170, Stride: 0x1},
+		unicode.Range16{Lo: 0xc18c, Hi: 0xc18c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc1a8, Hi: 0xc1a8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc1c4, Hi: 0xc1c4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc1e0, Hi: 0xc1e0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc1fc, Hi: 0xc1fc, Stride: 0x1},
+		unicode.Range16{Lo: 0xc218, Hi: 0xc218, Stride: 0x1},
+		unicode.Range16{Lo: 0xc234, Hi: 0xc234, Stride: 0x1},
+		unicode.Range16{Lo: 0xc250, Hi: 0xc250, Stride: 0x1},
+		unicode.Range16{Lo: 0xc26c, Hi: 0xc26c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc288, Hi: 0xc288, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2a4, Hi: 0xc2a4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2c0, Hi: 0xc2c0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2dc, Hi: 0xc2dc, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2f8, Hi: 0xc2f8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc314, Hi: 0xc314, Stride: 0x1},
+		unicode.Range16{Lo: 0xc330, Hi: 0xc330, Stride: 0x1},
+		unicode.Range16{Lo: 0xc34c, Hi: 0xc34c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc368, Hi: 0xc368, Stride: 0x1},
+		unicode.Range16{Lo: 0xc384, Hi: 0xc384, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3a0, Hi: 0xc3a0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3bc, Hi: 0xc3bc, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3d8, Hi: 0xc3d8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3f4, Hi: 0xc3f4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc410, Hi: 0xc410, Stride: 0x1},
+		unicode.Range16{Lo: 0xc42c, Hi: 0xc42c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc448, Hi: 0xc448, Stride: 0x1},
+		unicode.Range16{Lo: 0xc464, Hi: 0xc464, Stride: 0x1},
+		unicode.Range16{Lo: 0xc480, Hi: 0xc480, Stride: 0x1},
+		unicode.Range16{Lo: 0xc49c, Hi: 0xc49c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4b8, Hi: 0xc4b8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4d4, Hi: 0xc4d4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4f0, Hi: 0xc4f0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc50c, Hi: 0xc50c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc528, Hi: 0xc528, Stride: 0x1},
+		unicode.Range16{Lo: 0xc544, Hi: 0xc544, Stride: 0x1},
+		unicode.Range16{Lo: 0xc560, Hi: 0xc560, Stride: 0x1},
+		unicode.Range16{Lo: 0xc57c, Hi: 0xc57c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc598, Hi: 0xc598, Stride: 0x1},
+		unicode.Range16{Lo: 0xc5b4, Hi: 0xc5b4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc5d0, Hi: 0xc5d0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc5ec, Hi: 0xc5ec, Stride: 0x1},
+		unicode.Range16{Lo: 0xc608, Hi: 0xc608, Stride: 0x1},
+		unicode.Range16{Lo: 0xc624, Hi: 0xc624, Stride: 0x1},
+		unicode.Range16{Lo: 0xc640, Hi: 0xc640, Stride: 0x1},
+		unicode.Range16{Lo: 0xc65c, Hi: 0xc65c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc678, Hi: 0xc678, Stride: 0x1},
+		unicode.Range16{Lo: 0xc694, Hi: 0xc694, Stride: 0x1},
+		unicode.Range16{Lo: 0xc6b0, Hi: 0xc6b0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc6cc, Hi: 0xc6cc, Stride: 0x1},
+		unicode.Range16{Lo: 0xc6e8, Hi: 0xc6e8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc704, Hi: 0xc704, Stride: 0x1},
+		unicode.Range16{Lo: 0xc720, Hi: 0xc720, Stride: 0x1},
+		unicode.Range16{Lo: 0xc73c, Hi: 0xc73c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc758, Hi: 0xc758, Stride: 0x1},
+		unicode.Range16{Lo: 0xc774, Hi: 0xc774, Stride: 0x1},
+		unicode.Range16{Lo: 0xc790, Hi: 0xc790, Stride: 0x1},
+		unicode.Range16{Lo: 0xc7ac, Hi: 0xc7ac, Stride: 0x1},
+		unicode.Range16{Lo: 0xc7c8, Hi: 0xc7c8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc7e4, Hi: 0xc7e4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc800, Hi: 0xc800, Stride: 0x1},
+		unicode.Range16{Lo: 0xc81c, Hi: 0xc81c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc838, Hi: 0xc838, Stride: 0x1},
+		unicode.Range16{Lo: 0xc854, Hi: 0xc854, Stride: 0x1},
+		unicode.Range16{Lo: 0xc870, Hi: 0xc870, Stride: 0x1},
+		unicode.Range16{Lo: 0xc88c, Hi: 0xc88c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8a8, Hi: 0xc8a8, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8c4, Hi: 0xc8c4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8e0, Hi: 0xc8e0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8fc, Hi: 0xc8fc, Stride: 0x1},
+		unicode.Range16{Lo: 0xc918, Hi: 0xc918, Stride: 0x1},
+		unicode.Range16{Lo: 0xc934, Hi: 0xc934, Stride: 0x1},
+		unicode.Range16{Lo: 0xc950, Hi: 0xc950, Stride: 0x1},
+		unicode.Range16{Lo: 0xc96c, Hi: 0xc96c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc988, Hi: 0xc988, Stride: 0x1},
+		unicode.Range16{Lo: 0xc9a4, Hi: 0xc9a4, Stride: 0x1},
+		unicode.Range16{Lo: 0xc9c0, Hi: 0xc9c0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc9dc, Hi: 0xc9dc, Stride: 0x1},
+		unicode.Range16{Lo: 0xc9f8, Hi: 0xc9f8, Stride: 0x1},
+		unicode.Range16{Lo: 0xca14, Hi: 0xca14, Stride: 0x1},
+		unicode.Range16{Lo: 0xca30, Hi: 0xca30, Stride: 0x1},
+		unicode.Range16{Lo: 0xca4c, Hi: 0xca4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xca68, Hi: 0xca68, Stride: 0x1},
+		unicode.Range16{Lo: 0xca84, Hi: 0xca84, Stride: 0x1},
+		unicode.Range16{Lo: 0xcaa0, Hi: 0xcaa0, Stride: 0x1},
+		unicode.Range16{Lo: 0xcabc, Hi: 0xcabc, Stride: 0x1},
+		unicode.Range16{Lo: 0xcad8, Hi: 0xcad8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcaf4, Hi: 0xcaf4, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb10, Hi: 0xcb10, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb2c, Hi: 0xcb2c, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb48, Hi: 0xcb48, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb64, Hi: 0xcb64, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb80, Hi: 0xcb80, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb9c, Hi: 0xcb9c, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbb8, Hi: 0xcbb8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbd4, Hi: 0xcbd4, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbf0, Hi: 0xcbf0, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc0c, Hi: 0xcc0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc28, Hi: 0xcc28, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc44, Hi: 0xcc44, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc60, Hi: 0xcc60, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc7c, Hi: 0xcc7c, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc98, Hi: 0xcc98, Stride: 0x1},
+		unicode.Range16{Lo: 0xccb4, Hi: 0xccb4, Stride: 0x1},
+		unicode.Range16{Lo: 0xccd0, Hi: 0xccd0, Stride: 0x1},
+		unicode.Range16{Lo: 0xccec, Hi: 0xccec, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd08, Hi: 0xcd08, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd24, Hi: 0xcd24, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd40, Hi: 0xcd40, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd5c, Hi: 0xcd5c, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd78, Hi: 0xcd78, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd94, Hi: 0xcd94, Stride: 0x1},
+		unicode.Range16{Lo: 0xcdb0, Hi: 0xcdb0, Stride: 0x1},
+		unicode.Range16{Lo: 0xcdcc, Hi: 0xcdcc, Stride: 0x1},
+		unicode.Range16{Lo: 0xcde8, Hi: 0xcde8, Stride: 0x1},
+		unicode.Range16{Lo: 0xce04, Hi: 0xce04, Stride: 0x1},
+		unicode.Range16{Lo: 0xce20, Hi: 0xce20, Stride: 0x1},
+		unicode.Range16{Lo: 0xce3c, Hi: 0xce3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xce58, Hi: 0xce58, Stride: 0x1},
+		unicode.Range16{Lo: 0xce74, Hi: 0xce74, Stride: 0x1},
+		unicode.Range16{Lo: 0xce90, Hi: 0xce90, Stride: 0x1},
+		unicode.Range16{Lo: 0xceac, Hi: 0xceac, Stride: 0x1},
+		unicode.Range16{Lo: 0xcec8, Hi: 0xcec8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcee4, Hi: 0xcee4, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf00, Hi: 0xcf00, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf1c, Hi: 0xcf1c, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf38, Hi: 0xcf38, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf54, Hi: 0xcf54, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf70, Hi: 0xcf70, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf8c, Hi: 0xcf8c, Stride: 0x1},
+		unicode.Range16{Lo: 0xcfa8, Hi: 0xcfa8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcfc4, Hi: 0xcfc4, Stride: 0x1},
+		unicode.Range16{Lo: 0xcfe0, Hi: 0xcfe0, Stride: 0x1},
+		unicode.Range16{Lo: 0xcffc, Hi: 0xcffc, Stride: 0x1},
+		unicode.Range16{Lo: 0xd018, Hi: 0xd018, Stride: 0x1},
+		unicode.Range16{Lo: 0xd034, Hi: 0xd034, Stride: 0x1},
+		unicode.Range16{Lo: 0xd050, Hi: 0xd050, Stride: 0x1},
+		unicode.Range16{Lo: 0xd06c, Hi: 0xd06c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd088, Hi: 0xd088, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0a4, Hi: 0xd0a4, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0c0, Hi: 0xd0c0, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0dc, Hi: 0xd0dc, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0f8, Hi: 0xd0f8, Stride: 0x1},
+		unicode.Range16{Lo: 0xd114, Hi: 0xd114, Stride: 0x1},
+		unicode.Range16{Lo: 0xd130, Hi: 0xd130, Stride: 0x1},
+		unicode.Range16{Lo: 0xd14c, Hi: 0xd14c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd168, Hi: 0xd168, Stride: 0x1},
+		unicode.Range16{Lo: 0xd184, Hi: 0xd184, Stride: 0x1},
+		unicode.Range16{Lo: 0xd1a0, Hi: 0xd1a0, Stride: 0x1},
+		unicode.Range16{Lo: 0xd1bc, Hi: 0xd1bc, Stride: 0x1},
+		unicode.Range16{Lo: 0xd1d8, Hi: 0xd1d8, Stride: 0x1},
+		unicode.Range16{Lo: 0xd1f4, Hi: 0xd1f4, Stride: 0x1},
+		unicode.Range16{Lo: 0xd210, Hi: 0xd210, Stride: 0x1},
+		unicode.Range16{Lo: 0xd22c, Hi: 0xd22c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd248, Hi: 0xd248, Stride: 0x1},
+		unicode.Range16{Lo: 0xd264, Hi: 0xd264, Stride: 0x1},
+		unicode.Range16{Lo: 0xd280, Hi: 0xd280, Stride: 0x1},
+		unicode.Range16{Lo: 0xd29c, Hi: 0xd29c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd2b8, Hi: 0xd2b8, Stride: 0x1},
+		unicode.Range16{Lo: 0xd2d4, Hi: 0xd2d4, Stride: 0x1},
+		unicode.Range16{Lo: 0xd2f0, Hi: 0xd2f0, Stride: 0x1},
+		unicode.Range16{Lo: 0xd30c, Hi: 0xd30c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd328, Hi: 0xd328, Stride: 0x1},
+		unicode.Range16{Lo: 0xd344, Hi: 0xd344, Stride: 0x1},
+		unicode.Range16{Lo: 0xd360, Hi: 0xd360, Stride: 0x1},
+		unicode.Range16{Lo: 0xd37c, Hi: 0xd37c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd398, Hi: 0xd398, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3b4, Hi: 0xd3b4, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3d0, Hi: 0xd3d0, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3ec, Hi: 0xd3ec, Stride: 0x1},
+		unicode.Range16{Lo: 0xd408, Hi: 0xd408, Stride: 0x1},
+		unicode.Range16{Lo: 0xd424, Hi: 0xd424, Stride: 0x1},
+		unicode.Range16{Lo: 0xd440, Hi: 0xd440, Stride: 0x1},
+		unicode.Range16{Lo: 0xd45c, Hi: 0xd45c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd478, Hi: 0xd478, Stride: 0x1},
+		unicode.Range16{Lo: 0xd494, Hi: 0xd494, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4b0, Hi: 0xd4b0, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4cc, Hi: 0xd4cc, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4e8, Hi: 0xd4e8, Stride: 0x1},
+		unicode.Range16{Lo: 0xd504, Hi: 0xd504, Stride: 0x1},
+		unicode.Range16{Lo: 0xd520, Hi: 0xd520, Stride: 0x1},
+		unicode.Range16{Lo: 0xd53c, Hi: 0xd53c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd558, Hi: 0xd558, Stride: 0x1},
+		unicode.Range16{Lo: 0xd574, Hi: 0xd574, Stride: 0x1},
+		unicode.Range16{Lo: 0xd590, Hi: 0xd590, Stride: 0x1},
+		unicode.Range16{Lo: 0xd5ac, Hi: 0xd5ac, Stride: 0x1},
+		unicode.Range16{Lo: 0xd5c8, Hi: 0xd5c8, Stride: 0x1},
+		unicode.Range16{Lo: 0xd5e4, Hi: 0xd5e4, Stride: 0x1},
+		unicode.Range16{Lo: 0xd600, Hi: 0xd600, Stride: 0x1},
+		unicode.Range16{Lo: 0xd61c, Hi: 0xd61c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd638, Hi: 0xd638, Stride: 0x1},
+		unicode.Range16{Lo: 0xd654, Hi: 0xd654, Stride: 0x1},
+		unicode.Range16{Lo: 0xd670, Hi: 0xd670, Stride: 0x1},
+		unicode.Range16{Lo: 0xd68c, Hi: 0xd68c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd6a8, Hi: 0xd6a8, Stride: 0x1},
+		unicode.Range16{Lo: 0xd6c4, Hi: 0xd6c4, Stride: 0x1},
+		unicode.Range16{Lo: 0xd6e0, Hi: 0xd6e0, Stride: 0x1},
+		unicode.Range16{Lo: 0xd6fc, Hi: 0xd6fc, Stride: 0x1},
+		unicode.Range16{Lo: 0xd718, Hi: 0xd718, Stride: 0x1},
+		unicode.Range16{Lo: 0xd734, Hi: 0xd734, Stride: 0x1},
+		unicode.Range16{Lo: 0xd750, Hi: 0xd750, Stride: 0x1},
+		unicode.Range16{Lo: 0xd76c, Hi: 0xd76c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd788, Hi: 0xd788, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemeLVT = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xac01, Hi: 0xac1b, Stride: 0x1},
+		unicode.Range16{Lo: 0xac1d, Hi: 0xac37, Stride: 0x1},
+		unicode.Range16{Lo: 0xac39, Hi: 0xac53, Stride: 0x1},
+		unicode.Range16{Lo: 0xac55, Hi: 0xac6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xac71, Hi: 0xac8b, Stride: 0x1},
+		unicode.Range16{Lo: 0xac8d, Hi: 0xaca7, Stride: 0x1},
+		unicode.Range16{Lo: 0xaca9, Hi: 0xacc3, Stride: 0x1},
+		unicode.Range16{Lo: 0xacc5, Hi: 0xacdf, Stride: 0x1},
+		unicode.Range16{Lo: 0xace1, Hi: 0xacfb, Stride: 0x1},
+		unicode.Range16{Lo: 0xacfd, Hi: 0xad17, Stride: 0x1},
+		unicode.Range16{Lo: 0xad19, Hi: 0xad33, Stride: 0x1},
+		unicode.Range16{Lo: 0xad35, Hi: 0xad4f, Stride: 0x1},
+		unicode.Range16{Lo: 0xad51, Hi: 0xad6b, Stride: 0x1},
+		unicode.Range16{Lo: 0xad6d, Hi: 0xad87, Stride: 0x1},
+		unicode.Range16{Lo: 0xad89, Hi: 0xada3, Stride: 0x1},
+		unicode.Range16{Lo: 0xada5, Hi: 0xadbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xadc1, Hi: 0xaddb, Stride: 0x1},
+		unicode.Range16{Lo: 0xaddd, Hi: 0xadf7, Stride: 0x1},
+		unicode.Range16{Lo: 0xadf9, Hi: 0xae13, Stride: 0x1},
+		unicode.Range16{Lo: 0xae15, Hi: 0xae2f, Stride: 0x1},
+		unicode.Range16{Lo: 0xae31, Hi: 0xae4b, Stride: 0x1},
+		unicode.Range16{Lo: 0xae4d, Hi: 0xae67, Stride: 0x1},
+		unicode.Range16{Lo: 0xae69, Hi: 0xae83, Stride: 0x1},
+		unicode.Range16{Lo: 0xae85, Hi: 0xae9f, Stride: 0x1},
+		unicode.Range16{Lo: 0xaea1, Hi: 0xaebb, Stride: 0x1},
+		unicode.Range16{Lo: 0xaebd, Hi: 0xaed7, Stride: 0x1},
+		unicode.Range16{Lo: 0xaed9, Hi: 0xaef3, Stride: 0x1},
+		unicode.Range16{Lo: 0xaef5, Hi: 0xaf0f, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf11, Hi: 0xaf2b, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf2d, Hi: 0xaf47, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf49, Hi: 0xaf63, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf65, Hi: 0xaf7f, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf81, Hi: 0xaf9b, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf9d, Hi: 0xafb7, Stride: 0x1},
+		unicode.Range16{Lo: 0xafb9, Hi: 0xafd3, Stride: 0x1},
+		unicode.Range16{Lo: 0xafd5, Hi: 0xafef, Stride: 0x1},
+		unicode.Range16{Lo: 0xaff1, Hi: 0xb00b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb00d, Hi: 0xb027, Stride: 0x1},
+		unicode.Range16{Lo: 0xb029, Hi: 0xb043, Stride: 0x1},
+		unicode.Range16{Lo: 0xb045, Hi: 0xb05f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb061, Hi: 0xb07b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb07d, Hi: 0xb097, Stride: 0x1},
+		unicode.Range16{Lo: 0xb099, Hi: 0xb0b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xb0b5, Hi: 0xb0cf, Stride: 0x1},
+		unicode.Range16{Lo: 0xb0d1, Hi: 0xb0eb, Stride: 0x1},
+		unicode.Range16{Lo: 0xb0ed, Hi: 0xb107, Stride: 0x1},
+		unicode.Range16{Lo: 0xb109, Hi: 0xb123, Stride: 0x1},
+		unicode.Range16{Lo: 0xb125, Hi: 0xb13f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb141, Hi: 0xb15b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb15d, Hi: 0xb177, Stride: 0x1},
+		unicode.Range16{Lo: 0xb179, Hi: 0xb193, Stride: 0x1},
+		unicode.Range16{Lo: 0xb195, Hi: 0xb1af, Stride: 0x1},
+		unicode.Range16{Lo: 0xb1b1, Hi: 0xb1cb, Stride: 0x1},
+		unicode.Range16{Lo: 0xb1cd, Hi: 0xb1e7, Stride: 0x1},
+		unicode.Range16{Lo: 0xb1e9, Hi: 0xb203, Stride: 0x1},
+		unicode.Range16{Lo: 0xb205, Hi: 0xb21f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb221, Hi: 0xb23b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb23d, Hi: 0xb257, Stride: 0x1},
+		unicode.Range16{Lo: 0xb259, Hi: 0xb273, Stride: 0x1},
+		unicode.Range16{Lo: 0xb275, Hi: 0xb28f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb291, Hi: 0xb2ab, Stride: 0x1},
+		unicode.Range16{Lo: 0xb2ad, Hi: 0xb2c7, Stride: 0x1},
+		unicode.Range16{Lo: 0xb2c9, Hi: 0xb2e3, Stride: 0x1},
+		unicode.Range16{Lo: 0xb2e5, Hi: 0xb2ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xb301, Hi: 0xb31b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb31d, Hi: 0xb337, Stride: 0x1},
+		unicode.Range16{Lo: 0xb339, Hi: 0xb353, Stride: 0x1},
+		unicode.Range16{Lo: 0xb355, Hi: 0xb36f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb371, Hi: 0xb38b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb38d, Hi: 0xb3a7, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3a9, Hi: 0xb3c3, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3c5, Hi: 0xb3df, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3e1, Hi: 0xb3fb, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3fd, Hi: 0xb417, Stride: 0x1},
+		unicode.Range16{Lo: 0xb419, Hi: 0xb433, Stride: 0x1},
+		unicode.Range16{Lo: 0xb435, Hi: 0xb44f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb451, Hi: 0xb46b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb46d, Hi: 0xb487, Stride: 0x1},
+		unicode.Range16{Lo: 0xb489, Hi: 0xb4a3, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4a5, Hi: 0xb4bf, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4c1, Hi: 0xb4db, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4dd, Hi: 0xb4f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4f9, Hi: 0xb513, Stride: 0x1},
+		unicode.Range16{Lo: 0xb515, Hi: 0xb52f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb531, Hi: 0xb54b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb54d, Hi: 0xb567, Stride: 0x1},
+		unicode.Range16{Lo: 0xb569, Hi: 0xb583, Stride: 0x1},
+		unicode.Range16{Lo: 0xb585, Hi: 0xb59f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5a1, Hi: 0xb5bb, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5bd, Hi: 0xb5d7, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5d9, Hi: 0xb5f3, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5f5, Hi: 0xb60f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb611, Hi: 0xb62b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb62d, Hi: 0xb647, Stride: 0x1},
+		unicode.Range16{Lo: 0xb649, Hi: 0xb663, Stride: 0x1},
+		unicode.Range16{Lo: 0xb665, Hi: 0xb67f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb681, Hi: 0xb69b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb69d, Hi: 0xb6b7, Stride: 0x1},
+		unicode.Range16{Lo: 0xb6b9, Hi: 0xb6d3, Stride: 0x1},
+		unicode.Range16{Lo: 0xb6d5, Hi: 0xb6ef, Stride: 0x1},
+		unicode.Range16{Lo: 0xb6f1, Hi: 0xb70b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb70d, Hi: 0xb727, Stride: 0x1},
+		unicode.Range16{Lo: 0xb729, Hi: 0xb743, Stride: 0x1},
+		unicode.Range16{Lo: 0xb745, Hi: 0xb75f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb761, Hi: 0xb77b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb77d, Hi: 0xb797, Stride: 0x1},
+		unicode.Range16{Lo: 0xb799, Hi: 0xb7b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xb7b5, Hi: 0xb7cf, Stride: 0x1},
+		unicode.Range16{Lo: 0xb7d1, Hi: 0xb7eb, Stride: 0x1},
+		unicode.Range16{Lo: 0xb7ed, Hi: 0xb807, Stride: 0x1},
+		unicode.Range16{Lo: 0xb809, Hi: 0xb823, Stride: 0x1},
+		unicode.Range16{Lo: 0xb825, Hi: 0xb83f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb841, Hi: 0xb85b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb85d, Hi: 0xb877, Stride: 0x1},
+		unicode.Range16{Lo: 0xb879, Hi: 0xb893, Stride: 0x1},
+		unicode.Range16{Lo: 0xb895, Hi: 0xb8af, Stride: 0x1},
+		unicode.Range16{Lo: 0xb8b1, Hi: 0xb8cb, Stride: 0x1},
+		unicode.Range16{Lo: 0xb8cd, Hi: 0xb8e7, Stride: 0x1},
+		unicode.Range16{Lo: 0xb8e9, Hi: 0xb903, Stride: 0x1},
+		unicode.Range16{Lo: 0xb905, Hi: 0xb91f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb921, Hi: 0xb93b, Stride: 0x1},
+		unicode.Range16{Lo: 0xb93d, Hi: 0xb957, Stride: 0x1},
+		unicode.Range16{Lo: 0xb959, Hi: 0xb973, Stride: 0x1},
+		unicode.Range16{Lo: 0xb975, Hi: 0xb98f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb991, Hi: 0xb9ab, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9ad, Hi: 0xb9c7, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9c9, Hi: 0xb9e3, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9e5, Hi: 0xb9ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xba01, Hi: 0xba1b, Stride: 0x1},
+		unicode.Range16{Lo: 0xba1d, Hi: 0xba37, Stride: 0x1},
+		unicode.Range16{Lo: 0xba39, Hi: 0xba53, Stride: 0x1},
+		unicode.Range16{Lo: 0xba55, Hi: 0xba6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xba71, Hi: 0xba8b, Stride: 0x1},
+		unicode.Range16{Lo: 0xba8d, Hi: 0xbaa7, Stride: 0x1},
+		unicode.Range16{Lo: 0xbaa9, Hi: 0xbac3, Stride: 0x1},
+		unicode.Range16{Lo: 0xbac5, Hi: 0xbadf, Stride: 0x1},
+		unicode.Range16{Lo: 0xbae1, Hi: 0xbafb, Stride: 0x1},
+		unicode.Range16{Lo: 0xbafd, Hi: 0xbb17, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb19, Hi: 0xbb33, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb35, Hi: 0xbb4f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb51, Hi: 0xbb6b, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb6d, Hi: 0xbb87, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb89, Hi: 0xbba3, Stride: 0x1},
+		unicode.Range16{Lo: 0xbba5, Hi: 0xbbbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbc1, Hi: 0xbbdb, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbdd, Hi: 0xbbf7, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbf9, Hi: 0xbc13, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc15, Hi: 0xbc2f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc31, Hi: 0xbc4b, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc4d, Hi: 0xbc67, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc69, Hi: 0xbc83, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc85, Hi: 0xbc9f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbca1, Hi: 0xbcbb, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcbd, Hi: 0xbcd7, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcd9, Hi: 0xbcf3, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcf5, Hi: 0xbd0f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd11, Hi: 0xbd2b, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd2d, Hi: 0xbd47, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd49, Hi: 0xbd63, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd65, Hi: 0xbd7f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd81, Hi: 0xbd9b, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd9d, Hi: 0xbdb7, Stride: 0x1},
+		unicode.Range16{Lo: 0xbdb9, Hi: 0xbdd3, Stride: 0x1},
+		unicode.Range16{Lo: 0xbdd5, Hi: 0xbdef, Stride: 0x1},
+		unicode.Range16{Lo: 0xbdf1, Hi: 0xbe0b, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe0d, Hi: 0xbe27, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe29, Hi: 0xbe43, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe45, Hi: 0xbe5f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe61, Hi: 0xbe7b, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe7d, Hi: 0xbe97, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe99, Hi: 0xbeb3, Stride: 0x1},
+		unicode.Range16{Lo: 0xbeb5, Hi: 0xbecf, Stride: 0x1},
+		unicode.Range16{Lo: 0xbed1, Hi: 0xbeeb, Stride: 0x1},
+		unicode.Range16{Lo: 0xbeed, Hi: 0xbf07, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf09, Hi: 0xbf23, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf25, Hi: 0xbf3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf41, Hi: 0xbf5b, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf5d, Hi: 0xbf77, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf79, Hi: 0xbf93, Stride: 0x1},
+		unicode.Range16{Lo: 0xbf95, Hi: 0xbfaf, Stride: 0x1},
+		unicode.Range16{Lo: 0xbfb1, Hi: 0xbfcb, Stride: 0x1},
+		unicode.Range16{Lo: 0xbfcd, Hi: 0xbfe7, Stride: 0x1},
+		unicode.Range16{Lo: 0xbfe9, Hi: 0xc003, Stride: 0x1},
+		unicode.Range16{Lo: 0xc005, Hi: 0xc01f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc021, Hi: 0xc03b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc03d, Hi: 0xc057, Stride: 0x1},
+		unicode.Range16{Lo: 0xc059, Hi: 0xc073, Stride: 0x1},
+		unicode.Range16{Lo: 0xc075, Hi: 0xc08f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc091, Hi: 0xc0ab, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0ad, Hi: 0xc0c7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0c9, Hi: 0xc0e3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0e5, Hi: 0xc0ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xc101, Hi: 0xc11b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc11d, Hi: 0xc137, Stride: 0x1},
+		unicode.Range16{Lo: 0xc139, Hi: 0xc153, Stride: 0x1},
+		unicode.Range16{Lo: 0xc155, Hi: 0xc16f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc171, Hi: 0xc18b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc18d, Hi: 0xc1a7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc1a9, Hi: 0xc1c3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc1c5, Hi: 0xc1df, Stride: 0x1},
+		unicode.Range16{Lo: 0xc1e1, Hi: 0xc1fb, Stride: 0x1},
+		unicode.Range16{Lo: 0xc1fd, Hi: 0xc217, Stride: 0x1},
+		unicode.Range16{Lo: 0xc219, Hi: 0xc233, Stride: 0x1},
+		unicode.Range16{Lo: 0xc235, Hi: 0xc24f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc251, Hi: 0xc26b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc26d, Hi: 0xc287, Stride: 0x1},
+		unicode.Range16{Lo: 0xc289, Hi: 0xc2a3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2a5, Hi: 0xc2bf, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2c1, Hi: 0xc2db, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2dd, Hi: 0xc2f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2f9, Hi: 0xc313, Stride: 0x1},
+		unicode.Range16{Lo: 0xc315, Hi: 0xc32f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc331, Hi: 0xc34b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc34d, Hi: 0xc367, Stride: 0x1},
+		unicode.Range16{Lo: 0xc369, Hi: 0xc383, Stride: 0x1},
+		unicode.Range16{Lo: 0xc385, Hi: 0xc39f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3a1, Hi: 0xc3bb, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3bd, Hi: 0xc3d7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3d9, Hi: 0xc3f3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3f5, Hi: 0xc40f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc411, Hi: 0xc42b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc42d, Hi: 0xc447, Stride: 0x1},
+		unicode.Range16{Lo: 0xc449, Hi: 0xc463, Stride: 0x1},
+		unicode.Range16{Lo: 0xc465, Hi: 0xc47f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc481, Hi: 0xc49b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc49d, Hi: 0xc4b7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4b9, Hi: 0xc4d3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4d5, Hi: 0xc4ef, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4f1, Hi: 0xc50b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc50d, Hi: 0xc527, Stride: 0x1},
+		unicode.Range16{Lo: 0xc529, Hi: 0xc543, Stride: 0x1},
+		unicode.Range16{Lo: 0xc545, Hi: 0xc55f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc561, Hi: 0xc57b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc57d, Hi: 0xc597, Stride: 0x1},
+		unicode.Range16{Lo: 0xc599, Hi: 0xc5b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc5b5, Hi: 0xc5cf, Stride: 0x1},
+		unicode.Range16{Lo: 0xc5d1, Hi: 0xc5eb, Stride: 0x1},
+		unicode.Range16{Lo: 0xc5ed, Hi: 0xc607, Stride: 0x1},
+		unicode.Range16{Lo: 0xc609, Hi: 0xc623, Stride: 0x1},
+		unicode.Range16{Lo: 0xc625, Hi: 0xc63f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc641, Hi: 0xc65b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc65d, Hi: 0xc677, Stride: 0x1},
+		unicode.Range16{Lo: 0xc679, Hi: 0xc693, Stride: 0x1},
+		unicode.Range16{Lo: 0xc695, Hi: 0xc6af, Stride: 0x1},
+		unicode.Range16{Lo: 0xc6b1, Hi: 0xc6cb, Stride: 0x1},
+		unicode.Range16{Lo: 0xc6cd, Hi: 0xc6e7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc6e9, Hi: 0xc703, Stride: 0x1},
+		unicode.Range16{Lo: 0xc705, Hi: 0xc71f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc721, Hi: 0xc73b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc73d, Hi: 0xc757, Stride: 0x1},
+		unicode.Range16{Lo: 0xc759, Hi: 0xc773, Stride: 0x1},
+		unicode.Range16{Lo: 0xc775, Hi: 0xc78f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc791, Hi: 0xc7ab, Stride: 0x1},
+		unicode.Range16{Lo: 0xc7ad, Hi: 0xc7c7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc7c9, Hi: 0xc7e3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc7e5, Hi: 0xc7ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xc801, Hi: 0xc81b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc81d, Hi: 0xc837, Stride: 0x1},
+		unicode.Range16{Lo: 0xc839, Hi: 0xc853, Stride: 0x1},
+		unicode.Range16{Lo: 0xc855, Hi: 0xc86f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc871, Hi: 0xc88b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc88d, Hi: 0xc8a7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8a9, Hi: 0xc8c3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8c5, Hi: 0xc8df, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8e1, Hi: 0xc8fb, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8fd, Hi: 0xc917, Stride: 0x1},
+		unicode.Range16{Lo: 0xc919, Hi: 0xc933, Stride: 0x1},
+		unicode.Range16{Lo: 0xc935, Hi: 0xc94f, Stride: 0x1},
+		unicode.Range16{Lo: 0xc951, Hi: 0xc96b, Stride: 0x1},
+		unicode.Range16{Lo: 0xc96d, Hi: 0xc987, Stride: 0x1},
+		unicode.Range16{Lo: 0xc989, Hi: 0xc9a3, Stride: 0x1},
+		unicode.Range16{Lo: 0xc9a5, Hi: 0xc9bf, Stride: 0x1},
+		unicode.Range16{Lo: 0xc9c1, Hi: 0xc9db, Stride: 0x1},
+		unicode.Range16{Lo: 0xc9dd, Hi: 0xc9f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc9f9, Hi: 0xca13, Stride: 0x1},
+		unicode.Range16{Lo: 0xca15, Hi: 0xca2f, Stride: 0x1},
+		unicode.Range16{Lo: 0xca31, Hi: 0xca4b, Stride: 0x1},
+		unicode.Range16{Lo: 0xca4d, Hi: 0xca67, Stride: 0x1},
+		unicode.Range16{Lo: 0xca69, Hi: 0xca83, Stride: 0x1},
+		unicode.Range16{Lo: 0xca85, Hi: 0xca9f, Stride: 0x1},
+		unicode.Range16{Lo: 0xcaa1, Hi: 0xcabb, Stride: 0x1},
+		unicode.Range16{Lo: 0xcabd, Hi: 0xcad7, Stride: 0x1},
+		unicode.Range16{Lo: 0xcad9, Hi: 0xcaf3, Stride: 0x1},
+		unicode.Range16{Lo: 0xcaf5, Hi: 0xcb0f, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb11, Hi: 0xcb2b, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb2d, Hi: 0xcb47, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb49, Hi: 0xcb63, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb65, Hi: 0xcb7f, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb81, Hi: 0xcb9b, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb9d, Hi: 0xcbb7, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbb9, Hi: 0xcbd3, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbd5, Hi: 0xcbef, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbf1, Hi: 0xcc0b, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc0d, Hi: 0xcc27, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc29, Hi: 0xcc43, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc45, Hi: 0xcc5f, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc61, Hi: 0xcc7b, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc7d, Hi: 0xcc97, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc99, Hi: 0xccb3, Stride: 0x1},
+		unicode.Range16{Lo: 0xccb5, Hi: 0xcccf, Stride: 0x1},
+		unicode.Range16{Lo: 0xccd1, Hi: 0xcceb, Stride: 0x1},
+		unicode.Range16{Lo: 0xcced, Hi: 0xcd07, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd09, Hi: 0xcd23, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd25, Hi: 0xcd3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd41, Hi: 0xcd5b, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd5d, Hi: 0xcd77, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd79, Hi: 0xcd93, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd95, Hi: 0xcdaf, Stride: 0x1},
+		unicode.Range16{Lo: 0xcdb1, Hi: 0xcdcb, Stride: 0x1},
+		unicode.Range16{Lo: 0xcdcd, Hi: 0xcde7, Stride: 0x1},
+		unicode.Range16{Lo: 0xcde9, Hi: 0xce03, Stride: 0x1},
+		unicode.Range16{Lo: 0xce05, Hi: 0xce1f, Stride: 0x1},
+		unicode.Range16{Lo: 0xce21, Hi: 0xce3b, Stride: 0x1},
+		unicode.Range16{Lo: 0xce3d, Hi: 0xce57, Stride: 0x1},
+		unicode.Range16{Lo: 0xce59, Hi: 0xce73, Stride: 0x1},
+		unicode.Range16{Lo: 0xce75, Hi: 0xce8f, Stride: 0x1},
+		unicode.Range16{Lo: 0xce91, Hi: 0xceab, Stride: 0x1},
+		unicode.Range16{Lo: 0xcead, Hi: 0xcec7, Stride: 0x1},
+		unicode.Range16{Lo: 0xcec9, Hi: 0xcee3, Stride: 0x1},
+		unicode.Range16{Lo: 0xcee5, Hi: 0xceff, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf01, Hi: 0xcf1b, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf1d, Hi: 0xcf37, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf39, Hi: 0xcf53, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf55, Hi: 0xcf6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf71, Hi: 0xcf8b, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf8d, Hi: 0xcfa7, Stride: 0x1},
+		unicode.Range16{Lo: 0xcfa9, Hi: 0xcfc3, Stride: 0x1},
+		unicode.Range16{Lo: 0xcfc5, Hi: 0xcfdf, Stride: 0x1},
+		unicode.Range16{Lo: 0xcfe1, Hi: 0xcffb, Stride: 0x1},
+		unicode.Range16{Lo: 0xcffd, Hi: 0xd017, Stride: 0x1},
+		unicode.Range16{Lo: 0xd019, Hi: 0xd033, Stride: 0x1},
+		unicode.Range16{Lo: 0xd035, Hi: 0xd04f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd051, Hi: 0xd06b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd06d, Hi: 0xd087, Stride: 0x1},
+		unicode.Range16{Lo: 0xd089, Hi: 0xd0a3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0a5, Hi: 0xd0bf, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0c1, Hi: 0xd0db, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0dd, Hi: 0xd0f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0f9, Hi: 0xd113, Stride: 0x1},
+		unicode.Range16{Lo: 0xd115, Hi: 0xd12f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd131, Hi: 0xd14b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd14d, Hi: 0xd167, Stride: 0x1},
+		unicode.Range16{Lo: 0xd169, Hi: 0xd183, Stride: 0x1},
+		unicode.Range16{Lo: 0xd185, Hi: 0xd19f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd1a1, Hi: 0xd1bb, Stride: 0x1},
+		unicode.Range16{Lo: 0xd1bd, Hi: 0xd1d7, Stride: 0x1},
+		unicode.Range16{Lo: 0xd1d9, Hi: 0xd1f3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd1f5, Hi: 0xd20f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd211, Hi: 0xd22b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd22d, Hi: 0xd247, Stride: 0x1},
+		unicode.Range16{Lo: 0xd249, Hi: 0xd263, Stride: 0x1},
+		unicode.Range16{Lo: 0xd265, Hi: 0xd27f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd281, Hi: 0xd29b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd29d, Hi: 0xd2b7, Stride: 0x1},
+		unicode.Range16{Lo: 0xd2b9, Hi: 0xd2d3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd2d5, Hi: 0xd2ef, Stride: 0x1},
+		unicode.Range16{Lo: 0xd2f1, Hi: 0xd30b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd30d, Hi: 0xd327, Stride: 0x1},
+		unicode.Range16{Lo: 0xd329, Hi: 0xd343, Stride: 0x1},
+		unicode.Range16{Lo: 0xd345, Hi: 0xd35f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd361, Hi: 0xd37b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd37d, Hi: 0xd397, Stride: 0x1},
+		unicode.Range16{Lo: 0xd399, Hi: 0xd3b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3b5, Hi: 0xd3cf, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3d1, Hi: 0xd3eb, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3ed, Hi: 0xd407, Stride: 0x1},
+		unicode.Range16{Lo: 0xd409, Hi: 0xd423, Stride: 0x1},
+		unicode.Range16{Lo: 0xd425, Hi: 0xd43f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd441, Hi: 0xd45b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd45d, Hi: 0xd477, Stride: 0x1},
+		unicode.Range16{Lo: 0xd479, Hi: 0xd493, Stride: 0x1},
+		unicode.Range16{Lo: 0xd495, Hi: 0xd4af, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4b1, Hi: 0xd4cb, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4cd, Hi: 0xd4e7, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4e9, Hi: 0xd503, Stride: 0x1},
+		unicode.Range16{Lo: 0xd505, Hi: 0xd51f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd521, Hi: 0xd53b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd53d, Hi: 0xd557, Stride: 0x1},
+		unicode.Range16{Lo: 0xd559, Hi: 0xd573, Stride: 0x1},
+		unicode.Range16{Lo: 0xd575, Hi: 0xd58f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd591, Hi: 0xd5ab, Stride: 0x1},
+		unicode.Range16{Lo: 0xd5ad, Hi: 0xd5c7, Stride: 0x1},
+		unicode.Range16{Lo: 0xd5c9, Hi: 0xd5e3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd5e5, Hi: 0xd5ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xd601, Hi: 0xd61b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd61d, Hi: 0xd637, Stride: 0x1},
+		unicode.Range16{Lo: 0xd639, Hi: 0xd653, Stride: 0x1},
+		unicode.Range16{Lo: 0xd655, Hi: 0xd66f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd671, Hi: 0xd68b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd68d, Hi: 0xd6a7, Stride: 0x1},
+		unicode.Range16{Lo: 0xd6a9, Hi: 0xd6c3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd6c5, Hi: 0xd6df, Stride: 0x1},
+		unicode.Range16{Lo: 0xd6e1, Hi: 0xd6fb, Stride: 0x1},
+		unicode.Range16{Lo: 0xd6fd, Hi: 0xd717, Stride: 0x1},
+		unicode.Range16{Lo: 0xd719, Hi: 0xd733, Stride: 0x1},
+		unicode.Range16{Lo: 0xd735, Hi: 0xd74f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd751, Hi: 0xd76b, Stride: 0x1},
+		unicode.Range16{Lo: 0xd76d, Hi: 0xd787, Stride: 0x1},
+		unicode.Range16{Lo: 0xd789, Hi: 0xd7a3, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemePrepend = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1},
+		unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1},
+		unicode.Range16{Lo: 0x890, Hi: 0x891, Stride: 0x1},
+		unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1},
+		unicode.Range32{Lo: 0x110cd, Hi: 0x110cd, Stride: 0x1},
+		unicode.Range32{Lo: 0x111c2, Hi: 0x111c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193f, Hi: 0x1193f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11941, Hi: 0x11941, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a3a, Hi: 0x11a3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a84, Hi: 0x11a89, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d46, Hi: 0x11d46, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f02, Hi: 0x11f02, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemeRegional_Indicator = &unicode.RangeTable{
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemeSpacingMark = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1},
+		unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1},
+		unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1},
+		unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1},
+		unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1},
+		unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1},
+		unicode.Range16{Lo: 0x9bf, Hi: 0x9c0, Stride: 0x1},
+		unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1},
+		unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1},
+		unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1},
+		unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1},
+		unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1},
+		unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1},
+		unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1},
+		unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1},
+		unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbf, Hi: 0xbbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1},
+		unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1},
+		unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1},
+		unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1},
+		unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc0, Hi: 0xcc1, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc3, Hi: 0xcc4, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf3, Hi: 0xcf3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3f, Hi: 0xd40, Stride: 0x1},
+		unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd0, Hi: 0xdd1, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd8, Hi: 0xdde, Stride: 0x1},
+		unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1},
+		unicode.Range16{Lo: 0xe33, Hi: 0xe33, Stride: 0x1},
+		unicode.Range16{Lo: 0xeb3, Hi: 0xeb3, Stride: 0x1},
+		unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1},
+		unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1},
+		unicode.Range16{Lo: 0x1084, Hi: 0x1084, Stride: 0x1},
+		unicode.Range16{Lo: 0x1715, Hi: 0x1715, Stride: 0x1},
+		unicode.Range16{Lo: 0x1734, Hi: 0x1734, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1},
+		unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1},
+		unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1},
+		unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf7, Hi: 0x1cf7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1},
+		unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1},
+		unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1},
+		unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9be, Hi: 0xa9c0, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1},
+		unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1},
+		unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1},
+		unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1},
+		unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1},
+		unicode.Range32{Lo: 0x11145, Hi: 0x11146, Stride: 0x1},
+		unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1},
+		unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x111ce, Hi: 0x111ce, Stride: 0x1},
+		unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1},
+		unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1},
+		unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1},
+		unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133f, Hi: 0x1133f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1},
+		unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1},
+		unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1},
+		unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1},
+		unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1},
+		unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b1, Hi: 0x114b2, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1},
+		unicode.Range32{Lo: 0x114bb, Hi: 0x114bc, Stride: 0x1},
+		unicode.Range32{Lo: 0x114be, Hi: 0x114be, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1},
+		unicode.Range32{Lo: 0x115b0, Hi: 0x115b1, Stride: 0x1},
+		unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1},
+		unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1},
+		unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1},
+		unicode.Range32{Lo: 0x1182c, Hi: 0x1182e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11838, Hi: 0x11838, Stride: 0x1},
+		unicode.Range32{Lo: 0x11931, Hi: 0x11935, Stride: 0x1},
+		unicode.Range32{Lo: 0x11937, Hi: 0x11938, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193d, Hi: 0x1193d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11940, Hi: 0x11940, Stride: 0x1},
+		unicode.Range32{Lo: 0x11942, Hi: 0x11942, Stride: 0x1},
+		unicode.Range32{Lo: 0x119d1, Hi: 0x119d3, Stride: 0x1},
+		unicode.Range32{Lo: 0x119dc, Hi: 0x119df, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e4, Hi: 0x119e4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a39, Hi: 0x11a39, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a57, Hi: 0x11a58, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a97, Hi: 0x11a97, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d8a, Hi: 0x11d8e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d93, Hi: 0x11d94, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d96, Hi: 0x11d96, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ef5, Hi: 0x11ef6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f03, Hi: 0x11f03, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f34, Hi: 0x11f35, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f3e, Hi: 0x11f3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f41, Hi: 0x11f41, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f51, Hi: 0x16f87, Stride: 0x1},
+		unicode.Range32{Lo: 0x16ff0, Hi: 0x16ff1, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d166, Hi: 0x1d166, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d16d, Hi: 0x1d16d, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemeT = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x11a8, Hi: 0x11ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemeV = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x1160, Hi: 0x11a7, Stride: 0x1},
+		unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _GraphemeZWJ = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+type _GraphemeRuneRange unicode.RangeTable
+
+func _GraphemeRuneType(r rune) *_GraphemeRuneRange {
+	switch {
+	case unicode.Is(_GraphemeCR, r):
+		return (*_GraphemeRuneRange)(_GraphemeCR)
+	case unicode.Is(_GraphemeControl, r):
+		return (*_GraphemeRuneRange)(_GraphemeControl)
+	case unicode.Is(_GraphemeExtend, r):
+		return (*_GraphemeRuneRange)(_GraphemeExtend)
+	case unicode.Is(_GraphemeL, r):
+		return (*_GraphemeRuneRange)(_GraphemeL)
+	case unicode.Is(_GraphemeLF, r):
+		return (*_GraphemeRuneRange)(_GraphemeLF)
+	case unicode.Is(_GraphemeLV, r):
+		return (*_GraphemeRuneRange)(_GraphemeLV)
+	case unicode.Is(_GraphemeLVT, r):
+		return (*_GraphemeRuneRange)(_GraphemeLVT)
+	case unicode.Is(_GraphemePrepend, r):
+		return (*_GraphemeRuneRange)(_GraphemePrepend)
+	case unicode.Is(_GraphemeRegional_Indicator, r):
+		return (*_GraphemeRuneRange)(_GraphemeRegional_Indicator)
+	case unicode.Is(_GraphemeSpacingMark, r):
+		return (*_GraphemeRuneRange)(_GraphemeSpacingMark)
+	case unicode.Is(_GraphemeT, r):
+		return (*_GraphemeRuneRange)(_GraphemeT)
+	case unicode.Is(_GraphemeV, r):
+		return (*_GraphemeRuneRange)(_GraphemeV)
+	case unicode.Is(_GraphemeZWJ, r):
+		return (*_GraphemeRuneRange)(_GraphemeZWJ)
+	default:
+		return nil
+	}
+}
+func (rng *_GraphemeRuneRange) String() string {
+	switch (*unicode.RangeTable)(rng) {
+	case _GraphemeCR:
+		return "CR"
+	case _GraphemeControl:
+		return "Control"
+	case _GraphemeExtend:
+		return "Extend"
+	case _GraphemeL:
+		return "L"
+	case _GraphemeLF:
+		return "LF"
+	case _GraphemeLV:
+		return "LV"
+	case _GraphemeLVT:
+		return "LVT"
+	case _GraphemePrepend:
+		return "Prepend"
+	case _GraphemeRegional_Indicator:
+		return "Regional_Indicator"
+	case _GraphemeSpacingMark:
+		return "SpacingMark"
+	case _GraphemeT:
+		return "T"
+	case _GraphemeV:
+		return "V"
+	case _GraphemeZWJ:
+		return "ZWJ"
+	default:
+		return "Other"
+	}
+}
+
+var _WordALetter = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1},
+		unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1},
+		unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1},
+		unicode.Range16{Lo: 0xd8, Hi: 0xf6, Stride: 0x1},
+		unicode.Range16{Lo: 0xf8, Hi: 0x1ba, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bc, Hi: 0x1bf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c4, Hi: 0x293, Stride: 0x1},
+		unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1},
+		unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1},
+		unicode.Range16{Lo: 0x2b0, Hi: 0x2c1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c2, Hi: 0x2c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d2, Hi: 0x2d7, Stride: 0x1},
+		unicode.Range16{Lo: 0x2de, Hi: 0x2df, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e5, Hi: 0x2eb, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ed, Hi: 0x2ed, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ef, Hi: 0x2ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x370, Hi: 0x373, Stride: 0x1},
+		unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1},
+		unicode.Range16{Lo: 0x376, Hi: 0x377, Stride: 0x1},
+		unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1},
+		unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1},
+		unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1},
+		unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1},
+		unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1},
+		unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1},
+		unicode.Range16{Lo: 0x38e, Hi: 0x3a1, Stride: 0x1},
+		unicode.Range16{Lo: 0x3a3, Hi: 0x3f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x3f7, Hi: 0x481, Stride: 0x1},
+		unicode.Range16{Lo: 0x48a, Hi: 0x52f, Stride: 0x1},
+		unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1},
+		unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1},
+		unicode.Range16{Lo: 0x55a, Hi: 0x55c, Stride: 0x1},
+		unicode.Range16{Lo: 0x55e, Hi: 0x55e, Stride: 0x1},
+		unicode.Range16{Lo: 0x560, Hi: 0x588, Stride: 0x1},
+		unicode.Range16{Lo: 0x58a, Hi: 0x58a, Stride: 0x1},
+		unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1},
+		unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1},
+		unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1},
+		unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1},
+		unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1},
+		unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1},
+		unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1},
+		unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1},
+		unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1},
+		unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1},
+		unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1},
+		unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1},
+		unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1},
+		unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1},
+		unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1},
+		unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1},
+		unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1},
+		unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1},
+		unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1},
+		unicode.Range16{Lo: 0x860, Hi: 0x86a, Stride: 0x1},
+		unicode.Range16{Lo: 0x870, Hi: 0x887, Stride: 0x1},
+		unicode.Range16{Lo: 0x889, Hi: 0x88e, Stride: 0x1},
+		unicode.Range16{Lo: 0x8a0, Hi: 0x8c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x8c9, Hi: 0x8c9, Stride: 0x1},
+		unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1},
+		unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1},
+		unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1},
+		unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1},
+		unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1},
+		unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1},
+		unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1},
+		unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1},
+		unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1},
+		unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1},
+		unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1},
+		unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1},
+		unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1},
+		unicode.Range16{Lo: 0x9fc, Hi: 0x9fc, Stride: 0x1},
+		unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1},
+		unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1},
+		unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1},
+		unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1},
+		unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1},
+		unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1},
+		unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1},
+		unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1},
+		unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1},
+		unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1},
+		unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1},
+		unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1},
+		unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1},
+		unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1},
+		unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1},
+		unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1},
+		unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1},
+		unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1},
+		unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1},
+		unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1},
+		unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1},
+		unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1},
+		unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1},
+		unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1},
+		unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1},
+		unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1},
+		unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1},
+		unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1},
+		unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1},
+		unicode.Range16{Lo: 0xc5d, Hi: 0xc5d, Stride: 0x1},
+		unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1},
+		unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1},
+		unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1},
+		unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1},
+		unicode.Range16{Lo: 0xcdd, Hi: 0xcde, Stride: 0x1},
+		unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1},
+		unicode.Range16{Lo: 0xd04, Hi: 0xd0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1},
+		unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1},
+		unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1},
+		unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1},
+		unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1},
+		unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1},
+		unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1},
+		unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1},
+		unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1},
+		unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1},
+		unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1},
+		unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1},
+		unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1},
+		unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1},
+		unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1},
+		unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1},
+		unicode.Range16{Lo: 0x10fd, Hi: 0x10ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x1100, Hi: 0x1248, Stride: 0x1},
+		unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1},
+		unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1},
+		unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1},
+		unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1},
+		unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1},
+		unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1},
+		unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1},
+		unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1},
+		unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1},
+		unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1},
+		unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1},
+		unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1},
+		unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1},
+		unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1700, Hi: 0x1711, Stride: 0x1},
+		unicode.Range16{Lo: 0x171f, Hi: 0x1731, Stride: 0x1},
+		unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1},
+		unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1},
+		unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1},
+		unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1},
+		unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1},
+		unicode.Range16{Lo: 0x1844, Hi: 0x1878, Stride: 0x1},
+		unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1},
+		unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1},
+		unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1},
+		unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b45, Hi: 0x1b4c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c90, Hi: 0x1cba, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cbd, Hi: 0x1cbf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cee, Hi: 0x1cf3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cfa, Hi: 0x1cfa, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e00, Hi: 0x1f15, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f20, Hi: 0x1f45, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f80, Hi: 0x1fb4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fe0, Hi: 0x1fec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 0x1},
+		unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1},
+		unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1},
+		unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1},
+		unicode.Range16{Lo: 0x210a, Hi: 0x2113, Stride: 0x1},
+		unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1},
+		unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1},
+		unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1},
+		unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1},
+		unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1},
+		unicode.Range16{Lo: 0x212f, Hi: 0x2134, Stride: 0x1},
+		unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1},
+		unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1},
+		unicode.Range16{Lo: 0x213c, Hi: 0x213f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2145, Hi: 0x2149, Stride: 0x1},
+		unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2160, Hi: 0x2182, Stride: 0x1},
+		unicode.Range16{Lo: 0x2183, Hi: 0x2184, Stride: 0x1},
+		unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1},
+		unicode.Range16{Lo: 0x24b6, Hi: 0x24e9, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c00, Hi: 0x2c7b, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c7e, Hi: 0x2ce4, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ceb, Hi: 0x2cee, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf3, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1},
+		unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1},
+		unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1},
+		unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1},
+		unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1},
+		unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1},
+		unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1},
+		unicode.Range16{Lo: 0x3105, Hi: 0x312f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1},
+		unicode.Range16{Lo: 0x31a0, Hi: 0x31bf, Stride: 0x1},
+		unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1},
+		unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1},
+		unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1},
+		unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa640, Hi: 0xa66d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa680, Hi: 0xa69b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1},
+		unicode.Range16{Lo: 0xa708, Hi: 0xa716, Stride: 0x1},
+		unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa720, Hi: 0xa721, Stride: 0x1},
+		unicode.Range16{Lo: 0xa722, Hi: 0xa76f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1},
+		unicode.Range16{Lo: 0xa771, Hi: 0xa787, Stride: 0x1},
+		unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1},
+		unicode.Range16{Lo: 0xa789, Hi: 0xa78a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa78b, Hi: 0xa78e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa790, Hi: 0xa7ca, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d0, Hi: 0xa7d1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d3, Hi: 0xa7d3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d5, Hi: 0xa7d9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f2, Hi: 0xa7f4, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f5, Hi: 0xa7f6, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1},
+		unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1},
+		unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1},
+		unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1},
+		unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fe, Stride: 0x1},
+		unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1},
+		unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1},
+		unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1},
+		unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1},
+		unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1},
+		unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1},
+		unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1},
+		unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1},
+		unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1},
+		unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1},
+		unicode.Range16{Lo: 0xab5b, Hi: 0xab5b, Stride: 0x1},
+		unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1},
+		unicode.Range16{Lo: 0xab60, Hi: 0xab68, Stride: 0x1},
+		unicode.Range16{Lo: 0xab69, Hi: 0xab69, Stride: 0x1},
+		unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1},
+		unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1},
+		unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb50, Hi: 0xfbb1, Stride: 0x1},
+		unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1},
+		unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1},
+		unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1},
+		unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1},
+		unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1},
+		unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1},
+		unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1},
+		unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1},
+		unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1},
+		unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1},
+		unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1},
+		unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1},
+		unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1},
+		unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1},
+		unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1032d, Hi: 0x10340, Stride: 0x1},
+		unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1},
+		unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1},
+		unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1},
+		unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1},
+		unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1},
+		unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1},
+		unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1},
+		unicode.Range32{Lo: 0x10400, Hi: 0x1044f, Stride: 0x1},
+		unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1},
+		unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1},
+		unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1},
+		unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1},
+		unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1},
+		unicode.Range32{Lo: 0x10570, Hi: 0x1057a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1057c, Hi: 0x1058a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1058c, Hi: 0x10592, Stride: 0x1},
+		unicode.Range32{Lo: 0x10594, Hi: 0x10595, Stride: 0x1},
+		unicode.Range32{Lo: 0x10597, Hi: 0x105a1, Stride: 0x1},
+		unicode.Range32{Lo: 0x105a3, Hi: 0x105b1, Stride: 0x1},
+		unicode.Range32{Lo: 0x105b3, Hi: 0x105b9, Stride: 0x1},
+		unicode.Range32{Lo: 0x105bb, Hi: 0x105bc, Stride: 0x1},
+		unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1},
+		unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1},
+		unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1},
+		unicode.Range32{Lo: 0x10780, Hi: 0x10785, Stride: 0x1},
+		unicode.Range32{Lo: 0x10787, Hi: 0x107b0, Stride: 0x1},
+		unicode.Range32{Lo: 0x107b2, Hi: 0x107ba, Stride: 0x1},
+		unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1},
+		unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1},
+		unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1},
+		unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1},
+		unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1},
+		unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1},
+		unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1},
+		unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1},
+		unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1},
+		unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1},
+		unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1},
+		unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1},
+		unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a19, Hi: 0x10a35, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1},
+		unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1},
+		unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1},
+		unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1},
+		unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1},
+		unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1},
+		unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1},
+		unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1},
+		unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1},
+		unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1},
+		unicode.Range32{Lo: 0x10d00, Hi: 0x10d23, Stride: 0x1},
+		unicode.Range32{Lo: 0x10e80, Hi: 0x10ea9, Stride: 0x1},
+		unicode.Range32{Lo: 0x10eb0, Hi: 0x10eb1, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f00, Hi: 0x10f1c, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f27, Hi: 0x10f27, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f30, Hi: 0x10f45, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f70, Hi: 0x10f81, Stride: 0x1},
+		unicode.Range32{Lo: 0x10fb0, Hi: 0x10fc4, Stride: 0x1},
+		unicode.Range32{Lo: 0x10fe0, Hi: 0x10ff6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1},
+		unicode.Range32{Lo: 0x11071, Hi: 0x11072, Stride: 0x1},
+		unicode.Range32{Lo: 0x11075, Hi: 0x11075, Stride: 0x1},
+		unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1},
+		unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1},
+		unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1},
+		unicode.Range32{Lo: 0x11144, Hi: 0x11144, Stride: 0x1},
+		unicode.Range32{Lo: 0x11147, Hi: 0x11147, Stride: 0x1},
+		unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1},
+		unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1},
+		unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1},
+		unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1},
+		unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1},
+		unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1},
+		unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1},
+		unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1123f, Hi: 0x11240, Stride: 0x1},
+		unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1},
+		unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1},
+		unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1},
+		unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1},
+		unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1},
+		unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1},
+		unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1},
+		unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1},
+		unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1},
+		unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1},
+		unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1},
+		unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1145f, Hi: 0x11461, Stride: 0x1},
+		unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1},
+		unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1},
+		unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1},
+		unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1},
+		unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b8, Hi: 0x116b8, Stride: 0x1},
+		unicode.Range32{Lo: 0x11800, Hi: 0x1182b, Stride: 0x1},
+		unicode.Range32{Lo: 0x118a0, Hi: 0x118df, Stride: 0x1},
+		unicode.Range32{Lo: 0x118ff, Hi: 0x11906, Stride: 0x1},
+		unicode.Range32{Lo: 0x11909, Hi: 0x11909, Stride: 0x1},
+		unicode.Range32{Lo: 0x1190c, Hi: 0x11913, Stride: 0x1},
+		unicode.Range32{Lo: 0x11915, Hi: 0x11916, Stride: 0x1},
+		unicode.Range32{Lo: 0x11918, Hi: 0x1192f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193f, Hi: 0x1193f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11941, Hi: 0x11941, Stride: 0x1},
+		unicode.Range32{Lo: 0x119a0, Hi: 0x119a7, Stride: 0x1},
+		unicode.Range32{Lo: 0x119aa, Hi: 0x119d0, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e1, Hi: 0x119e1, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e3, Hi: 0x119e3, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a00, Hi: 0x11a00, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a0b, Hi: 0x11a32, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a3a, Hi: 0x11a3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a50, Hi: 0x11a50, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a5c, Hi: 0x11a89, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a9d, Hi: 0x11a9d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ab0, Hi: 0x11af8, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d00, Hi: 0x11d06, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d08, Hi: 0x11d09, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d0b, Hi: 0x11d30, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d46, Hi: 0x11d46, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d60, Hi: 0x11d65, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d67, Hi: 0x11d68, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d6a, Hi: 0x11d89, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d98, Hi: 0x11d98, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ee0, Hi: 0x11ef2, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f02, Hi: 0x11f02, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f04, Hi: 0x11f10, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f12, Hi: 0x11f33, Stride: 0x1},
+		unicode.Range32{Lo: 0x11fb0, Hi: 0x11fb0, Stride: 0x1},
+		unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1},
+		unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1},
+		unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1},
+		unicode.Range32{Lo: 0x12f90, Hi: 0x12ff0, Stride: 0x1},
+		unicode.Range32{Lo: 0x13000, Hi: 0x1342f, Stride: 0x1},
+		unicode.Range32{Lo: 0x13441, Hi: 0x13446, Stride: 0x1},
+		unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1},
+		unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1},
+		unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1},
+		unicode.Range32{Lo: 0x16a70, Hi: 0x16abe, Stride: 0x1},
+		unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16e40, Hi: 0x16e7f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f00, Hi: 0x16f4a, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe1, Stride: 0x1},
+		unicode.Range32{Lo: 0x16fe3, Hi: 0x16fe3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d400, Hi: 0x1d454, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d456, Hi: 0x1d49c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d505, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d51e, Hi: 0x1d539, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d552, Hi: 0x1d6a5, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6fa, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d716, Hi: 0x1d734, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d750, Hi: 0x1d76e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d78a, Hi: 0x1d7a8, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7cb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1df00, Hi: 0x1df09, Stride: 0x1},
+		unicode.Range32{Lo: 0x1df0a, Hi: 0x1df0a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1df0b, Hi: 0x1df1e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1df25, Hi: 0x1df2a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e030, Hi: 0x1e06d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e100, Hi: 0x1e12c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e137, Hi: 0x1e13d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e14e, Hi: 0x1e14e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e290, Hi: 0x1e2ad, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2c0, Hi: 0x1e2eb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4d0, Hi: 0x1e4ea, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4eb, Hi: 0x1e4eb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e7e0, Hi: 0x1e7e6, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e7e8, Hi: 0x1e7eb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e7ed, Hi: 0x1e7ee, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e7f0, Hi: 0x1e7fe, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e900, Hi: 0x1e943, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e94b, Hi: 0x1e94b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1},
+		unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1},
+		unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1},
+	},
+	LatinOffset: 7,
+}
+
+var _WordCR = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordDouble_Quote = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordExtend = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1},
+		unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1},
+		unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1},
+		unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1},
+		unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1},
+		unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1},
+		unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1},
+		unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1},
+		unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1},
+		unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1},
+		unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1},
+		unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1},
+		unicode.Range16{Lo: 0x7fd, Hi: 0x7fd, Stride: 0x1},
+		unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1},
+		unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1},
+		unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1},
+		unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1},
+		unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1},
+		unicode.Range16{Lo: 0x898, Hi: 0x89f, Stride: 0x1},
+		unicode.Range16{Lo: 0x8ca, Hi: 0x8e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1},
+		unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1},
+		unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1},
+		unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1},
+		unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1},
+		unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1},
+		unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1},
+		unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1},
+		unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1},
+		unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1},
+		unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1},
+		unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1},
+		unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1},
+		unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1},
+		unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1},
+		unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1},
+		unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1},
+		unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1},
+		unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1},
+		unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1},
+		unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1},
+		unicode.Range16{Lo: 0x9fe, Hi: 0x9fe, Stride: 0x1},
+		unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1},
+		unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1},
+		unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1},
+		unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1},
+		unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1},
+		unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1},
+		unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1},
+		unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1},
+		unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1},
+		unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1},
+		unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1},
+		unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1},
+		unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1},
+		unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1},
+		unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1},
+		unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1},
+		unicode.Range16{Lo: 0xafa, Hi: 0xaff, Stride: 0x1},
+		unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1},
+		unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1},
+		unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1},
+		unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xb55, Hi: 0xb56, Stride: 0x1},
+		unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1},
+		unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1},
+		unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1},
+		unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1},
+		unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1},
+		unicode.Range16{Lo: 0xc04, Hi: 0xc04, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3c, Hi: 0xc3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1},
+		unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1},
+		unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1},
+		unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1},
+		unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1},
+		unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1},
+		unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1},
+		unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf3, Hi: 0xcf3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd00, Hi: 0xd01, Stride: 0x1},
+		unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3b, Hi: 0xd3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1},
+		unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1},
+		unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1},
+		unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1},
+		unicode.Range16{Lo: 0xd81, Hi: 0xd81, Stride: 0x1},
+		unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1},
+		unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1},
+		unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1},
+		unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1},
+		unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1},
+		unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1},
+		unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1},
+		unicode.Range16{Lo: 0xeb4, Hi: 0xebc, Stride: 0x1},
+		unicode.Range16{Lo: 0xec8, Hi: 0xece, Stride: 0x1},
+		unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1},
+		unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1},
+		unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1},
+		unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1},
+		unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1},
+		unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1},
+		unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1},
+		unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1},
+		unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1},
+		unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1},
+		unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1},
+		unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1},
+		unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1},
+		unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1},
+		unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1},
+		unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1},
+		unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1},
+		unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1},
+		unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1},
+		unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1},
+		unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1},
+		unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1},
+		unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1},
+		unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1},
+		unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1},
+		unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1},
+		unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1},
+		unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1},
+		unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1},
+		unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1},
+		unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1},
+		unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1},
+		unicode.Range16{Lo: 0x1715, Hi: 0x1715, Stride: 0x1},
+		unicode.Range16{Lo: 0x1732, Hi: 0x1733, Stride: 0x1},
+		unicode.Range16{Lo: 0x1734, Hi: 0x1734, Stride: 0x1},
+		unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1},
+		unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1},
+		unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1},
+		unicode.Range16{Lo: 0x180f, Hi: 0x180f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1},
+		unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1},
+		unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1},
+		unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1},
+		unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1},
+		unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1},
+		unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1},
+		unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1},
+		unicode.Range16{Lo: 0x1abf, Hi: 0x1ace, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf7, Hi: 0x1cf7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1dc0, Hi: 0x1dff, Stride: 0x1},
+		unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1},
+		unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1},
+		unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1},
+		unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1},
+		unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1},
+		unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1},
+		unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1},
+		unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1},
+		unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1},
+		unicode.Range16{Lo: 0xa82c, Hi: 0xa82c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8ff, Hi: 0xa8ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1},
+		unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1},
+		unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1},
+		unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bd, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9be, Hi: 0xa9c0, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1},
+		unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1},
+		unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1},
+		unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1},
+		unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1},
+		unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1},
+		unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1},
+		unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1},
+		unicode.Range32{Lo: 0x10d24, Hi: 0x10d27, Stride: 0x1},
+		unicode.Range32{Lo: 0x10eab, Hi: 0x10eac, Stride: 0x1},
+		unicode.Range32{Lo: 0x10efd, Hi: 0x10eff, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f46, Hi: 0x10f50, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f82, Hi: 0x10f85, Stride: 0x1},
+		unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1},
+		unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1},
+		unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1},
+		unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1},
+		unicode.Range32{Lo: 0x11070, Hi: 0x11070, Stride: 0x1},
+		unicode.Range32{Lo: 0x11073, Hi: 0x11074, Stride: 0x1},
+		unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1},
+		unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1},
+		unicode.Range32{Lo: 0x110c2, Hi: 0x110c2, Stride: 0x1},
+		unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1},
+		unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1},
+		unicode.Range32{Lo: 0x11145, Hi: 0x11146, Stride: 0x1},
+		unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1},
+		unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1},
+		unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1},
+		unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1},
+		unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x111c9, Hi: 0x111cc, Stride: 0x1},
+		unicode.Range32{Lo: 0x111ce, Hi: 0x111ce, Stride: 0x1},
+		unicode.Range32{Lo: 0x111cf, Hi: 0x111cf, Stride: 0x1},
+		unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1},
+		unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1},
+		unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1},
+		unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1},
+		unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1},
+		unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11241, Hi: 0x11241, Stride: 0x1},
+		unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1},
+		unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1},
+		unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1},
+		unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1},
+		unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133b, Hi: 0x1133c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1},
+		unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1},
+		unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1},
+		unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1},
+		unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1},
+		unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1},
+		unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1},
+		unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1},
+		unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1},
+		unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1},
+		unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1},
+		unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1},
+		unicode.Range32{Lo: 0x1145e, Hi: 0x1145e, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1},
+		unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1},
+		unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1},
+		unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1},
+		unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1},
+		unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1},
+		unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1},
+		unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1},
+		unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1},
+		unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1},
+		unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1},
+		unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1},
+		unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1},
+		unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1182c, Hi: 0x1182e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1182f, Hi: 0x11837, Stride: 0x1},
+		unicode.Range32{Lo: 0x11838, Hi: 0x11838, Stride: 0x1},
+		unicode.Range32{Lo: 0x11839, Hi: 0x1183a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11930, Hi: 0x11935, Stride: 0x1},
+		unicode.Range32{Lo: 0x11937, Hi: 0x11938, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193b, Hi: 0x1193c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193d, Hi: 0x1193d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193e, Hi: 0x1193e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11940, Hi: 0x11940, Stride: 0x1},
+		unicode.Range32{Lo: 0x11942, Hi: 0x11942, Stride: 0x1},
+		unicode.Range32{Lo: 0x11943, Hi: 0x11943, Stride: 0x1},
+		unicode.Range32{Lo: 0x119d1, Hi: 0x119d3, Stride: 0x1},
+		unicode.Range32{Lo: 0x119d4, Hi: 0x119d7, Stride: 0x1},
+		unicode.Range32{Lo: 0x119da, Hi: 0x119db, Stride: 0x1},
+		unicode.Range32{Lo: 0x119dc, Hi: 0x119df, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e0, Hi: 0x119e0, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e4, Hi: 0x119e4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a01, Hi: 0x11a0a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a33, Hi: 0x11a38, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a39, Hi: 0x11a39, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a3b, Hi: 0x11a3e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a47, Hi: 0x11a47, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a51, Hi: 0x11a56, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a57, Hi: 0x11a58, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a59, Hi: 0x11a5b, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a8a, Hi: 0x11a96, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a97, Hi: 0x11a97, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a98, Hi: 0x11a99, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d31, Hi: 0x11d36, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3a, Hi: 0x11d3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3c, Hi: 0x11d3d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3f, Hi: 0x11d45, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d47, Hi: 0x11d47, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d8a, Hi: 0x11d8e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d90, Hi: 0x11d91, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d93, Hi: 0x11d94, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d95, Hi: 0x11d95, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d96, Hi: 0x11d96, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d97, Hi: 0x11d97, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ef3, Hi: 0x11ef4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ef5, Hi: 0x11ef6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f00, Hi: 0x11f01, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f03, Hi: 0x11f03, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f34, Hi: 0x11f35, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f36, Hi: 0x11f3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f3e, Hi: 0x11f3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f40, Hi: 0x11f40, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f41, Hi: 0x11f41, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f42, Hi: 0x11f42, Stride: 0x1},
+		unicode.Range32{Lo: 0x13440, Hi: 0x13440, Stride: 0x1},
+		unicode.Range32{Lo: 0x13447, Hi: 0x13455, Stride: 0x1},
+		unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f4f, Hi: 0x16f4f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f51, Hi: 0x16f87, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1},
+		unicode.Range32{Lo: 0x16fe4, Hi: 0x16fe4, Stride: 0x1},
+		unicode.Range32{Lo: 0x16ff0, Hi: 0x16ff1, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1cf00, Hi: 0x1cf2d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1cf30, Hi: 0x1cf46, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e08f, Hi: 0x1e08f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e130, Hi: 0x1e136, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2ae, Hi: 0x1e2ae, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2ec, Hi: 0x1e2ef, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4ec, Hi: 0x1e4ef, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _WordExtendNumLet = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x5f, Hi: 0x5f, Stride: 0x1},
+		unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1},
+		unicode.Range16{Lo: 0x203f, Hi: 0x2040, Stride: 0x1},
+		unicode.Range16{Lo: 0x2054, Hi: 0x2054, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe33, Hi: 0xfe34, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe4d, Hi: 0xfe4f, Stride: 0x1},
+		unicode.Range16{Lo: 0xff3f, Hi: 0xff3f, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordFormat = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1},
+		unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1},
+		unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1},
+		unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1},
+		unicode.Range16{Lo: 0x890, Hi: 0x891, Stride: 0x1},
+		unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1},
+		unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1},
+		unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1},
+		unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1},
+		unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1},
+		unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1},
+		unicode.Range32{Lo: 0x110cd, Hi: 0x110cd, Stride: 0x1},
+		unicode.Range32{Lo: 0x13430, Hi: 0x1343f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordHebrew_Letter = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x5ef, Hi: 0x5f2, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb46, Hi: 0xfb4f, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _WordKatakana = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1},
+		unicode.Range16{Lo: 0x309b, Hi: 0x309c, Stride: 0x1},
+		unicode.Range16{Lo: 0x30a0, Hi: 0x30a0, Stride: 0x1},
+		unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1},
+		unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x32d0, Hi: 0x32fe, Stride: 0x1},
+		unicode.Range16{Lo: 0x3300, Hi: 0x3357, Stride: 0x1},
+		unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1},
+		unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x1aff0, Hi: 0x1aff3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1aff5, Hi: 0x1affb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1affd, Hi: 0x1affe, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b000, Hi: 0x1b000, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b120, Hi: 0x1b122, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b155, Hi: 0x1b155, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b164, Hi: 0x1b167, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _WordLF = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordMidLetter = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xb7, Hi: 0xb7, Stride: 0x1},
+		unicode.Range16{Lo: 0x387, Hi: 0x387, Stride: 0x1},
+		unicode.Range16{Lo: 0x55f, Hi: 0x55f, Stride: 0x1},
+		unicode.Range16{Lo: 0x5f4, Hi: 0x5f4, Stride: 0x1},
+		unicode.Range16{Lo: 0x2027, Hi: 0x2027, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1},
+		unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1},
+	},
+	LatinOffset: 2,
+}
+
+var _WordMidNum = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1},
+		unicode.Range16{Lo: 0x3b, Hi: 0x3b, Stride: 0x1},
+		unicode.Range16{Lo: 0x37e, Hi: 0x37e, Stride: 0x1},
+		unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1},
+		unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1},
+		unicode.Range16{Lo: 0x66c, Hi: 0x66c, Stride: 0x1},
+		unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1},
+		unicode.Range16{Lo: 0x2044, Hi: 0x2044, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe10, Hi: 0xfe10, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe14, Hi: 0xfe14, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe50, Hi: 0xfe50, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe54, Hi: 0xfe54, Stride: 0x1},
+		unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xff1b, Hi: 0xff1b, Stride: 0x1},
+	},
+	LatinOffset: 2,
+}
+
+var _WordMidNumLet = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1},
+		unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1},
+		unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1},
+		unicode.Range16{Lo: 0xff07, Hi: 0xff07, Stride: 0x1},
+		unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordNewline = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1},
+		unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1},
+		unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1},
+		unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1},
+	},
+	LatinOffset: 2,
+}
+
+var _WordNumeric = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1},
+		unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1},
+		unicode.Range16{Lo: 0x66b, Hi: 0x66b, Stride: 0x1},
+		unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1},
+		unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1},
+		unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1},
+		unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1},
+		unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1},
+		unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1},
+		unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1},
+		unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1},
+		unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1},
+		unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1},
+		unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1},
+		unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1},
+		unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1},
+		unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1},
+		unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1},
+		unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1},
+		unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1},
+		unicode.Range16{Lo: 0xff10, Hi: 0xff19, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1},
+		unicode.Range32{Lo: 0x10d30, Hi: 0x10d39, Stride: 0x1},
+		unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1},
+		unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1},
+		unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1},
+		unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1},
+		unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1},
+		unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1},
+		unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11950, Hi: 0x11959, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d50, Hi: 0x11d59, Stride: 0x1},
+		unicode.Range32{Lo: 0x11da0, Hi: 0x11da9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f50, Hi: 0x11f59, Stride: 0x1},
+		unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1},
+		unicode.Range32{Lo: 0x16ac0, Hi: 0x16ac9, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e140, Hi: 0x1e149, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2f0, Hi: 0x1e2f9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4f0, Hi: 0x1e4f9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1},
+		unicode.Range32{Lo: 0x1fbf0, Hi: 0x1fbf9, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordRegional_Indicator = &unicode.RangeTable{
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _WordSingle_Quote = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordWSegSpace = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x20, Hi: 0x20, Stride: 0x1},
+		unicode.Range16{Lo: 0x1680, Hi: 0x1680, Stride: 0x1},
+		unicode.Range16{Lo: 0x2000, Hi: 0x2006, Stride: 0x1},
+		unicode.Range16{Lo: 0x2008, Hi: 0x200a, Stride: 0x1},
+		unicode.Range16{Lo: 0x205f, Hi: 0x205f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3000, Hi: 0x3000, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _WordZWJ = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+type _WordRuneRange unicode.RangeTable
+
+func _WordRuneType(r rune) *_WordRuneRange {
+	switch {
+	case unicode.Is(_WordALetter, r):
+		return (*_WordRuneRange)(_WordALetter)
+	case unicode.Is(_WordCR, r):
+		return (*_WordRuneRange)(_WordCR)
+	case unicode.Is(_WordDouble_Quote, r):
+		return (*_WordRuneRange)(_WordDouble_Quote)
+	case unicode.Is(_WordExtend, r):
+		return (*_WordRuneRange)(_WordExtend)
+	case unicode.Is(_WordExtendNumLet, r):
+		return (*_WordRuneRange)(_WordExtendNumLet)
+	case unicode.Is(_WordFormat, r):
+		return (*_WordRuneRange)(_WordFormat)
+	case unicode.Is(_WordHebrew_Letter, r):
+		return (*_WordRuneRange)(_WordHebrew_Letter)
+	case unicode.Is(_WordKatakana, r):
+		return (*_WordRuneRange)(_WordKatakana)
+	case unicode.Is(_WordLF, r):
+		return (*_WordRuneRange)(_WordLF)
+	case unicode.Is(_WordMidLetter, r):
+		return (*_WordRuneRange)(_WordMidLetter)
+	case unicode.Is(_WordMidNum, r):
+		return (*_WordRuneRange)(_WordMidNum)
+	case unicode.Is(_WordMidNumLet, r):
+		return (*_WordRuneRange)(_WordMidNumLet)
+	case unicode.Is(_WordNewline, r):
+		return (*_WordRuneRange)(_WordNewline)
+	case unicode.Is(_WordNumeric, r):
+		return (*_WordRuneRange)(_WordNumeric)
+	case unicode.Is(_WordRegional_Indicator, r):
+		return (*_WordRuneRange)(_WordRegional_Indicator)
+	case unicode.Is(_WordSingle_Quote, r):
+		return (*_WordRuneRange)(_WordSingle_Quote)
+	case unicode.Is(_WordWSegSpace, r):
+		return (*_WordRuneRange)(_WordWSegSpace)
+	case unicode.Is(_WordZWJ, r):
+		return (*_WordRuneRange)(_WordZWJ)
+	default:
+		return nil
+	}
+}
+func (rng *_WordRuneRange) String() string {
+	switch (*unicode.RangeTable)(rng) {
+	case _WordALetter:
+		return "ALetter"
+	case _WordCR:
+		return "CR"
+	case _WordDouble_Quote:
+		return "Double_Quote"
+	case _WordExtend:
+		return "Extend"
+	case _WordExtendNumLet:
+		return "ExtendNumLet"
+	case _WordFormat:
+		return "Format"
+	case _WordHebrew_Letter:
+		return "Hebrew_Letter"
+	case _WordKatakana:
+		return "Katakana"
+	case _WordLF:
+		return "LF"
+	case _WordMidLetter:
+		return "MidLetter"
+	case _WordMidNum:
+		return "MidNum"
+	case _WordMidNumLet:
+		return "MidNumLet"
+	case _WordNewline:
+		return "Newline"
+	case _WordNumeric:
+		return "Numeric"
+	case _WordRegional_Indicator:
+		return "Regional_Indicator"
+	case _WordSingle_Quote:
+		return "Single_Quote"
+	case _WordWSegSpace:
+		return "WSegSpace"
+	case _WordZWJ:
+		return "ZWJ"
+	default:
+		return "Other"
+	}
+}
+
+var _SentenceATerm = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1},
+		unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _SentenceCR = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _SentenceClose = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1},
+		unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1},
+		unicode.Range16{Lo: 0x28, Hi: 0x28, Stride: 0x1},
+		unicode.Range16{Lo: 0x29, Hi: 0x29, Stride: 0x1},
+		unicode.Range16{Lo: 0x5b, Hi: 0x5b, Stride: 0x1},
+		unicode.Range16{Lo: 0x5d, Hi: 0x5d, Stride: 0x1},
+		unicode.Range16{Lo: 0x7b, Hi: 0x7b, Stride: 0x1},
+		unicode.Range16{Lo: 0x7d, Hi: 0x7d, Stride: 0x1},
+		unicode.Range16{Lo: 0xab, Hi: 0xab, Stride: 0x1},
+		unicode.Range16{Lo: 0xbb, Hi: 0xbb, Stride: 0x1},
+		unicode.Range16{Lo: 0xf3a, Hi: 0xf3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xf3b, Hi: 0xf3b, Stride: 0x1},
+		unicode.Range16{Lo: 0xf3c, Hi: 0xf3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xf3d, Hi: 0xf3d, Stride: 0x1},
+		unicode.Range16{Lo: 0x169b, Hi: 0x169b, Stride: 0x1},
+		unicode.Range16{Lo: 0x169c, Hi: 0x169c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1},
+		unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1},
+		unicode.Range16{Lo: 0x201a, Hi: 0x201a, Stride: 0x1},
+		unicode.Range16{Lo: 0x201b, Hi: 0x201c, Stride: 0x1},
+		unicode.Range16{Lo: 0x201d, Hi: 0x201d, Stride: 0x1},
+		unicode.Range16{Lo: 0x201e, Hi: 0x201e, Stride: 0x1},
+		unicode.Range16{Lo: 0x201f, Hi: 0x201f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2039, Hi: 0x2039, Stride: 0x1},
+		unicode.Range16{Lo: 0x203a, Hi: 0x203a, Stride: 0x1},
+		unicode.Range16{Lo: 0x2045, Hi: 0x2045, Stride: 0x1},
+		unicode.Range16{Lo: 0x2046, Hi: 0x2046, Stride: 0x1},
+		unicode.Range16{Lo: 0x207d, Hi: 0x207d, Stride: 0x1},
+		unicode.Range16{Lo: 0x207e, Hi: 0x207e, Stride: 0x1},
+		unicode.Range16{Lo: 0x208d, Hi: 0x208d, Stride: 0x1},
+		unicode.Range16{Lo: 0x208e, Hi: 0x208e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2308, Hi: 0x2308, Stride: 0x1},
+		unicode.Range16{Lo: 0x2309, Hi: 0x2309, Stride: 0x1},
+		unicode.Range16{Lo: 0x230a, Hi: 0x230a, Stride: 0x1},
+		unicode.Range16{Lo: 0x230b, Hi: 0x230b, Stride: 0x1},
+		unicode.Range16{Lo: 0x2329, Hi: 0x2329, Stride: 0x1},
+		unicode.Range16{Lo: 0x232a, Hi: 0x232a, Stride: 0x1},
+		unicode.Range16{Lo: 0x275b, Hi: 0x2760, Stride: 0x1},
+		unicode.Range16{Lo: 0x2768, Hi: 0x2768, Stride: 0x1},
+		unicode.Range16{Lo: 0x2769, Hi: 0x2769, Stride: 0x1},
+		unicode.Range16{Lo: 0x276a, Hi: 0x276a, Stride: 0x1},
+		unicode.Range16{Lo: 0x276b, Hi: 0x276b, Stride: 0x1},
+		unicode.Range16{Lo: 0x276c, Hi: 0x276c, Stride: 0x1},
+		unicode.Range16{Lo: 0x276d, Hi: 0x276d, Stride: 0x1},
+		unicode.Range16{Lo: 0x276e, Hi: 0x276e, Stride: 0x1},
+		unicode.Range16{Lo: 0x276f, Hi: 0x276f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2770, Hi: 0x2770, Stride: 0x1},
+		unicode.Range16{Lo: 0x2771, Hi: 0x2771, Stride: 0x1},
+		unicode.Range16{Lo: 0x2772, Hi: 0x2772, Stride: 0x1},
+		unicode.Range16{Lo: 0x2773, Hi: 0x2773, Stride: 0x1},
+		unicode.Range16{Lo: 0x2774, Hi: 0x2774, Stride: 0x1},
+		unicode.Range16{Lo: 0x2775, Hi: 0x2775, Stride: 0x1},
+		unicode.Range16{Lo: 0x27c5, Hi: 0x27c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x27c6, Hi: 0x27c6, Stride: 0x1},
+		unicode.Range16{Lo: 0x27e6, Hi: 0x27e6, Stride: 0x1},
+		unicode.Range16{Lo: 0x27e7, Hi: 0x27e7, Stride: 0x1},
+		unicode.Range16{Lo: 0x27e8, Hi: 0x27e8, Stride: 0x1},
+		unicode.Range16{Lo: 0x27e9, Hi: 0x27e9, Stride: 0x1},
+		unicode.Range16{Lo: 0x27ea, Hi: 0x27ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x27eb, Hi: 0x27eb, Stride: 0x1},
+		unicode.Range16{Lo: 0x27ec, Hi: 0x27ec, Stride: 0x1},
+		unicode.Range16{Lo: 0x27ed, Hi: 0x27ed, Stride: 0x1},
+		unicode.Range16{Lo: 0x27ee, Hi: 0x27ee, Stride: 0x1},
+		unicode.Range16{Lo: 0x27ef, Hi: 0x27ef, Stride: 0x1},
+		unicode.Range16{Lo: 0x2983, Hi: 0x2983, Stride: 0x1},
+		unicode.Range16{Lo: 0x2984, Hi: 0x2984, Stride: 0x1},
+		unicode.Range16{Lo: 0x2985, Hi: 0x2985, Stride: 0x1},
+		unicode.Range16{Lo: 0x2986, Hi: 0x2986, Stride: 0x1},
+		unicode.Range16{Lo: 0x2987, Hi: 0x2987, Stride: 0x1},
+		unicode.Range16{Lo: 0x2988, Hi: 0x2988, Stride: 0x1},
+		unicode.Range16{Lo: 0x2989, Hi: 0x2989, Stride: 0x1},
+		unicode.Range16{Lo: 0x298a, Hi: 0x298a, Stride: 0x1},
+		unicode.Range16{Lo: 0x298b, Hi: 0x298b, Stride: 0x1},
+		unicode.Range16{Lo: 0x298c, Hi: 0x298c, Stride: 0x1},
+		unicode.Range16{Lo: 0x298d, Hi: 0x298d, Stride: 0x1},
+		unicode.Range16{Lo: 0x298e, Hi: 0x298e, Stride: 0x1},
+		unicode.Range16{Lo: 0x298f, Hi: 0x298f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2990, Hi: 0x2990, Stride: 0x1},
+		unicode.Range16{Lo: 0x2991, Hi: 0x2991, Stride: 0x1},
+		unicode.Range16{Lo: 0x2992, Hi: 0x2992, Stride: 0x1},
+		unicode.Range16{Lo: 0x2993, Hi: 0x2993, Stride: 0x1},
+		unicode.Range16{Lo: 0x2994, Hi: 0x2994, Stride: 0x1},
+		unicode.Range16{Lo: 0x2995, Hi: 0x2995, Stride: 0x1},
+		unicode.Range16{Lo: 0x2996, Hi: 0x2996, Stride: 0x1},
+		unicode.Range16{Lo: 0x2997, Hi: 0x2997, Stride: 0x1},
+		unicode.Range16{Lo: 0x2998, Hi: 0x2998, Stride: 0x1},
+		unicode.Range16{Lo: 0x29d8, Hi: 0x29d8, Stride: 0x1},
+		unicode.Range16{Lo: 0x29d9, Hi: 0x29d9, Stride: 0x1},
+		unicode.Range16{Lo: 0x29da, Hi: 0x29da, Stride: 0x1},
+		unicode.Range16{Lo: 0x29db, Hi: 0x29db, Stride: 0x1},
+		unicode.Range16{Lo: 0x29fc, Hi: 0x29fc, Stride: 0x1},
+		unicode.Range16{Lo: 0x29fd, Hi: 0x29fd, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e00, Hi: 0x2e01, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e02, Hi: 0x2e02, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e03, Hi: 0x2e03, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e04, Hi: 0x2e04, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e05, Hi: 0x2e05, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e06, Hi: 0x2e08, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e09, Hi: 0x2e09, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e0a, Hi: 0x2e0a, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e0b, Hi: 0x2e0b, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e0c, Hi: 0x2e0c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e0d, Hi: 0x2e0d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e1c, Hi: 0x2e1c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e1d, Hi: 0x2e1d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e20, Hi: 0x2e20, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e21, Hi: 0x2e21, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e22, Hi: 0x2e22, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e23, Hi: 0x2e23, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e24, Hi: 0x2e24, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e25, Hi: 0x2e25, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e26, Hi: 0x2e26, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e27, Hi: 0x2e27, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e28, Hi: 0x2e28, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e29, Hi: 0x2e29, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e42, Hi: 0x2e42, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e55, Hi: 0x2e55, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e56, Hi: 0x2e56, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e57, Hi: 0x2e57, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e58, Hi: 0x2e58, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e59, Hi: 0x2e59, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e5a, Hi: 0x2e5a, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e5b, Hi: 0x2e5b, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e5c, Hi: 0x2e5c, Stride: 0x1},
+		unicode.Range16{Lo: 0x3008, Hi: 0x3008, Stride: 0x1},
+		unicode.Range16{Lo: 0x3009, Hi: 0x3009, Stride: 0x1},
+		unicode.Range16{Lo: 0x300a, Hi: 0x300a, Stride: 0x1},
+		unicode.Range16{Lo: 0x300b, Hi: 0x300b, Stride: 0x1},
+		unicode.Range16{Lo: 0x300c, Hi: 0x300c, Stride: 0x1},
+		unicode.Range16{Lo: 0x300d, Hi: 0x300d, Stride: 0x1},
+		unicode.Range16{Lo: 0x300e, Hi: 0x300e, Stride: 0x1},
+		unicode.Range16{Lo: 0x300f, Hi: 0x300f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3010, Hi: 0x3010, Stride: 0x1},
+		unicode.Range16{Lo: 0x3011, Hi: 0x3011, Stride: 0x1},
+		unicode.Range16{Lo: 0x3014, Hi: 0x3014, Stride: 0x1},
+		unicode.Range16{Lo: 0x3015, Hi: 0x3015, Stride: 0x1},
+		unicode.Range16{Lo: 0x3016, Hi: 0x3016, Stride: 0x1},
+		unicode.Range16{Lo: 0x3017, Hi: 0x3017, Stride: 0x1},
+		unicode.Range16{Lo: 0x3018, Hi: 0x3018, Stride: 0x1},
+		unicode.Range16{Lo: 0x3019, Hi: 0x3019, Stride: 0x1},
+		unicode.Range16{Lo: 0x301a, Hi: 0x301a, Stride: 0x1},
+		unicode.Range16{Lo: 0x301b, Hi: 0x301b, Stride: 0x1},
+		unicode.Range16{Lo: 0x301d, Hi: 0x301d, Stride: 0x1},
+		unicode.Range16{Lo: 0x301e, Hi: 0x301f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfd3e, Hi: 0xfd3e, Stride: 0x1},
+		unicode.Range16{Lo: 0xfd3f, Hi: 0xfd3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe17, Hi: 0xfe17, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe18, Hi: 0xfe18, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe35, Hi: 0xfe35, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe36, Hi: 0xfe36, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe37, Hi: 0xfe37, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe38, Hi: 0xfe38, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe39, Hi: 0xfe39, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe3a, Hi: 0xfe3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe3b, Hi: 0xfe3b, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe3c, Hi: 0xfe3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe3d, Hi: 0xfe3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe3e, Hi: 0xfe3e, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe3f, Hi: 0xfe3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe40, Hi: 0xfe40, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe41, Hi: 0xfe41, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe42, Hi: 0xfe42, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe43, Hi: 0xfe43, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe44, Hi: 0xfe44, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe47, Hi: 0xfe47, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe48, Hi: 0xfe48, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe59, Hi: 0xfe59, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe5a, Hi: 0xfe5a, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe5b, Hi: 0xfe5b, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe5c, Hi: 0xfe5c, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe5d, Hi: 0xfe5d, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe5e, Hi: 0xfe5e, Stride: 0x1},
+		unicode.Range16{Lo: 0xff08, Hi: 0xff08, Stride: 0x1},
+		unicode.Range16{Lo: 0xff09, Hi: 0xff09, Stride: 0x1},
+		unicode.Range16{Lo: 0xff3b, Hi: 0xff3b, Stride: 0x1},
+		unicode.Range16{Lo: 0xff3d, Hi: 0xff3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xff5b, Hi: 0xff5b, Stride: 0x1},
+		unicode.Range16{Lo: 0xff5d, Hi: 0xff5d, Stride: 0x1},
+		unicode.Range16{Lo: 0xff5f, Hi: 0xff5f, Stride: 0x1},
+		unicode.Range16{Lo: 0xff60, Hi: 0xff60, Stride: 0x1},
+		unicode.Range16{Lo: 0xff62, Hi: 0xff62, Stride: 0x1},
+		unicode.Range16{Lo: 0xff63, Hi: 0xff63, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x1f676, Hi: 0x1f678, Stride: 0x1},
+	},
+	LatinOffset: 10,
+}
+
+var _SentenceExtend = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1},
+		unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1},
+		unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1},
+		unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1},
+		unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1},
+		unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1},
+		unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1},
+		unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1},
+		unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1},
+		unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1},
+		unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1},
+		unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1},
+		unicode.Range16{Lo: 0x7fd, Hi: 0x7fd, Stride: 0x1},
+		unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1},
+		unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1},
+		unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1},
+		unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1},
+		unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1},
+		unicode.Range16{Lo: 0x898, Hi: 0x89f, Stride: 0x1},
+		unicode.Range16{Lo: 0x8ca, Hi: 0x8e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1},
+		unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1},
+		unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1},
+		unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1},
+		unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1},
+		unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1},
+		unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1},
+		unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1},
+		unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1},
+		unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1},
+		unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1},
+		unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1},
+		unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1},
+		unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1},
+		unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1},
+		unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1},
+		unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1},
+		unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1},
+		unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1},
+		unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1},
+		unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1},
+		unicode.Range16{Lo: 0x9fe, Hi: 0x9fe, Stride: 0x1},
+		unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1},
+		unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1},
+		unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1},
+		unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1},
+		unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1},
+		unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1},
+		unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1},
+		unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1},
+		unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1},
+		unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1},
+		unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1},
+		unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1},
+		unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1},
+		unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1},
+		unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1},
+		unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1},
+		unicode.Range16{Lo: 0xafa, Hi: 0xaff, Stride: 0x1},
+		unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1},
+		unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1},
+		unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1},
+		unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xb55, Hi: 0xb56, Stride: 0x1},
+		unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1},
+		unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1},
+		unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1},
+		unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1},
+		unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1},
+		unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1},
+		unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1},
+		unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1},
+		unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1},
+		unicode.Range16{Lo: 0xc04, Hi: 0xc04, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3c, Hi: 0xc3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1},
+		unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1},
+		unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1},
+		unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1},
+		unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1},
+		unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1},
+		unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1},
+		unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1},
+		unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1},
+		unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1},
+		unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf3, Hi: 0xcf3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd00, Hi: 0xd01, Stride: 0x1},
+		unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3b, Hi: 0xd3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1},
+		unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1},
+		unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1},
+		unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1},
+		unicode.Range16{Lo: 0xd81, Hi: 0xd81, Stride: 0x1},
+		unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1},
+		unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1},
+		unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1},
+		unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1},
+		unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1},
+		unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1},
+		unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1},
+		unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1},
+		unicode.Range16{Lo: 0xeb4, Hi: 0xebc, Stride: 0x1},
+		unicode.Range16{Lo: 0xec8, Hi: 0xece, Stride: 0x1},
+		unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1},
+		unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1},
+		unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1},
+		unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1},
+		unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1},
+		unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1},
+		unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1},
+		unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1},
+		unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1},
+		unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1},
+		unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1},
+		unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1},
+		unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1},
+		unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1},
+		unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1},
+		unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1},
+		unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1},
+		unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1},
+		unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1},
+		unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1},
+		unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1},
+		unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1},
+		unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1},
+		unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1},
+		unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1},
+		unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1},
+		unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1},
+		unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1},
+		unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1},
+		unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1},
+		unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1},
+		unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1},
+		unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1},
+		unicode.Range16{Lo: 0x1715, Hi: 0x1715, Stride: 0x1},
+		unicode.Range16{Lo: 0x1732, Hi: 0x1733, Stride: 0x1},
+		unicode.Range16{Lo: 0x1734, Hi: 0x1734, Stride: 0x1},
+		unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1},
+		unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1},
+		unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1},
+		unicode.Range16{Lo: 0x180f, Hi: 0x180f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1},
+		unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1},
+		unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1},
+		unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1},
+		unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1},
+		unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1},
+		unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1},
+		unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1},
+		unicode.Range16{Lo: 0x1abf, Hi: 0x1ace, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf7, Hi: 0x1cf7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1dc0, Hi: 0x1dff, Stride: 0x1},
+		unicode.Range16{Lo: 0x200c, Hi: 0x200d, Stride: 0x1},
+		unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1},
+		unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1},
+		unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1},
+		unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1},
+		unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1},
+		unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1},
+		unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1},
+		unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1},
+		unicode.Range16{Lo: 0xa82c, Hi: 0xa82c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8ff, Hi: 0xa8ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1},
+		unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1},
+		unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1},
+		unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bd, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9be, Hi: 0xa9c0, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1},
+		unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1},
+		unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1},
+		unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1},
+		unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1},
+		unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1},
+		unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1},
+		unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1},
+		unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1},
+		unicode.Range32{Lo: 0x10d24, Hi: 0x10d27, Stride: 0x1},
+		unicode.Range32{Lo: 0x10eab, Hi: 0x10eac, Stride: 0x1},
+		unicode.Range32{Lo: 0x10efd, Hi: 0x10eff, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f46, Hi: 0x10f50, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f82, Hi: 0x10f85, Stride: 0x1},
+		unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1},
+		unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1},
+		unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1},
+		unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1},
+		unicode.Range32{Lo: 0x11070, Hi: 0x11070, Stride: 0x1},
+		unicode.Range32{Lo: 0x11073, Hi: 0x11074, Stride: 0x1},
+		unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1},
+		unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1},
+		unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1},
+		unicode.Range32{Lo: 0x110c2, Hi: 0x110c2, Stride: 0x1},
+		unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1},
+		unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1},
+		unicode.Range32{Lo: 0x11145, Hi: 0x11146, Stride: 0x1},
+		unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1},
+		unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1},
+		unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1},
+		unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1},
+		unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x111c9, Hi: 0x111cc, Stride: 0x1},
+		unicode.Range32{Lo: 0x111ce, Hi: 0x111ce, Stride: 0x1},
+		unicode.Range32{Lo: 0x111cf, Hi: 0x111cf, Stride: 0x1},
+		unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1},
+		unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1},
+		unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1},
+		unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1},
+		unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1},
+		unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11241, Hi: 0x11241, Stride: 0x1},
+		unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1},
+		unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1},
+		unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1},
+		unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1},
+		unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133b, Hi: 0x1133c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1},
+		unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1},
+		unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1},
+		unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1},
+		unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1},
+		unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1},
+		unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1},
+		unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1},
+		unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1},
+		unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1},
+		unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1},
+		unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1},
+		unicode.Range32{Lo: 0x1145e, Hi: 0x1145e, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1},
+		unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1},
+		unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1},
+		unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1},
+		unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1},
+		unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1},
+		unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1},
+		unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1},
+		unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1},
+		unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1},
+		unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1},
+		unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1},
+		unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1},
+		unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1},
+		unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1},
+		unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1182c, Hi: 0x1182e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1182f, Hi: 0x11837, Stride: 0x1},
+		unicode.Range32{Lo: 0x11838, Hi: 0x11838, Stride: 0x1},
+		unicode.Range32{Lo: 0x11839, Hi: 0x1183a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11930, Hi: 0x11935, Stride: 0x1},
+		unicode.Range32{Lo: 0x11937, Hi: 0x11938, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193b, Hi: 0x1193c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193d, Hi: 0x1193d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193e, Hi: 0x1193e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11940, Hi: 0x11940, Stride: 0x1},
+		unicode.Range32{Lo: 0x11942, Hi: 0x11942, Stride: 0x1},
+		unicode.Range32{Lo: 0x11943, Hi: 0x11943, Stride: 0x1},
+		unicode.Range32{Lo: 0x119d1, Hi: 0x119d3, Stride: 0x1},
+		unicode.Range32{Lo: 0x119d4, Hi: 0x119d7, Stride: 0x1},
+		unicode.Range32{Lo: 0x119da, Hi: 0x119db, Stride: 0x1},
+		unicode.Range32{Lo: 0x119dc, Hi: 0x119df, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e0, Hi: 0x119e0, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e4, Hi: 0x119e4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a01, Hi: 0x11a0a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a33, Hi: 0x11a38, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a39, Hi: 0x11a39, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a3b, Hi: 0x11a3e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a47, Hi: 0x11a47, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a51, Hi: 0x11a56, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a57, Hi: 0x11a58, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a59, Hi: 0x11a5b, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a8a, Hi: 0x11a96, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a97, Hi: 0x11a97, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a98, Hi: 0x11a99, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d31, Hi: 0x11d36, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3a, Hi: 0x11d3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3c, Hi: 0x11d3d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d3f, Hi: 0x11d45, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d47, Hi: 0x11d47, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d8a, Hi: 0x11d8e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d90, Hi: 0x11d91, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d93, Hi: 0x11d94, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d95, Hi: 0x11d95, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d96, Hi: 0x11d96, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d97, Hi: 0x11d97, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ef3, Hi: 0x11ef4, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ef5, Hi: 0x11ef6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f00, Hi: 0x11f01, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f03, Hi: 0x11f03, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f34, Hi: 0x11f35, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f36, Hi: 0x11f3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f3e, Hi: 0x11f3f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f40, Hi: 0x11f40, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f41, Hi: 0x11f41, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f42, Hi: 0x11f42, Stride: 0x1},
+		unicode.Range32{Lo: 0x13440, Hi: 0x13440, Stride: 0x1},
+		unicode.Range32{Lo: 0x13447, Hi: 0x13455, Stride: 0x1},
+		unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f4f, Hi: 0x16f4f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f51, Hi: 0x16f87, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1},
+		unicode.Range32{Lo: 0x16fe4, Hi: 0x16fe4, Stride: 0x1},
+		unicode.Range32{Lo: 0x16ff0, Hi: 0x16ff1, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1cf00, Hi: 0x1cf2d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1cf30, Hi: 0x1cf46, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e08f, Hi: 0x1e08f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e130, Hi: 0x1e136, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2ae, Hi: 0x1e2ae, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2ec, Hi: 0x1e2ef, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4ec, Hi: 0x1e4ef, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _SentenceFormat = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1},
+		unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1},
+		unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1},
+		unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1},
+		unicode.Range16{Lo: 0x890, Hi: 0x891, Stride: 0x1},
+		unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1},
+		unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1},
+		unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1},
+		unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1},
+		unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1},
+		unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1},
+		unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1},
+		unicode.Range32{Lo: 0x110cd, Hi: 0x110cd, Stride: 0x1},
+		unicode.Range32{Lo: 0x13430, Hi: 0x1343f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1},
+		unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _SentenceLF = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _SentenceLower = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1},
+		unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1},
+		unicode.Range16{Lo: 0xdf, Hi: 0xf6, Stride: 0x1},
+		unicode.Range16{Lo: 0xf8, Hi: 0xff, Stride: 0x1},
+		unicode.Range16{Lo: 0x101, Hi: 0x101, Stride: 0x1},
+		unicode.Range16{Lo: 0x103, Hi: 0x103, Stride: 0x1},
+		unicode.Range16{Lo: 0x105, Hi: 0x105, Stride: 0x1},
+		unicode.Range16{Lo: 0x107, Hi: 0x107, Stride: 0x1},
+		unicode.Range16{Lo: 0x109, Hi: 0x109, Stride: 0x1},
+		unicode.Range16{Lo: 0x10b, Hi: 0x10b, Stride: 0x1},
+		unicode.Range16{Lo: 0x10d, Hi: 0x10d, Stride: 0x1},
+		unicode.Range16{Lo: 0x10f, Hi: 0x10f, Stride: 0x1},
+		unicode.Range16{Lo: 0x111, Hi: 0x111, Stride: 0x1},
+		unicode.Range16{Lo: 0x113, Hi: 0x113, Stride: 0x1},
+		unicode.Range16{Lo: 0x115, Hi: 0x115, Stride: 0x1},
+		unicode.Range16{Lo: 0x117, Hi: 0x117, Stride: 0x1},
+		unicode.Range16{Lo: 0x119, Hi: 0x119, Stride: 0x1},
+		unicode.Range16{Lo: 0x11b, Hi: 0x11b, Stride: 0x1},
+		unicode.Range16{Lo: 0x11d, Hi: 0x11d, Stride: 0x1},
+		unicode.Range16{Lo: 0x11f, Hi: 0x11f, Stride: 0x1},
+		unicode.Range16{Lo: 0x121, Hi: 0x121, Stride: 0x1},
+		unicode.Range16{Lo: 0x123, Hi: 0x123, Stride: 0x1},
+		unicode.Range16{Lo: 0x125, Hi: 0x125, Stride: 0x1},
+		unicode.Range16{Lo: 0x127, Hi: 0x127, Stride: 0x1},
+		unicode.Range16{Lo: 0x129, Hi: 0x129, Stride: 0x1},
+		unicode.Range16{Lo: 0x12b, Hi: 0x12b, Stride: 0x1},
+		unicode.Range16{Lo: 0x12d, Hi: 0x12d, Stride: 0x1},
+		unicode.Range16{Lo: 0x12f, Hi: 0x12f, Stride: 0x1},
+		unicode.Range16{Lo: 0x131, Hi: 0x131, Stride: 0x1},
+		unicode.Range16{Lo: 0x133, Hi: 0x133, Stride: 0x1},
+		unicode.Range16{Lo: 0x135, Hi: 0x135, Stride: 0x1},
+		unicode.Range16{Lo: 0x137, Hi: 0x138, Stride: 0x1},
+		unicode.Range16{Lo: 0x13a, Hi: 0x13a, Stride: 0x1},
+		unicode.Range16{Lo: 0x13c, Hi: 0x13c, Stride: 0x1},
+		unicode.Range16{Lo: 0x13e, Hi: 0x13e, Stride: 0x1},
+		unicode.Range16{Lo: 0x140, Hi: 0x140, Stride: 0x1},
+		unicode.Range16{Lo: 0x142, Hi: 0x142, Stride: 0x1},
+		unicode.Range16{Lo: 0x144, Hi: 0x144, Stride: 0x1},
+		unicode.Range16{Lo: 0x146, Hi: 0x146, Stride: 0x1},
+		unicode.Range16{Lo: 0x148, Hi: 0x149, Stride: 0x1},
+		unicode.Range16{Lo: 0x14b, Hi: 0x14b, Stride: 0x1},
+		unicode.Range16{Lo: 0x14d, Hi: 0x14d, Stride: 0x1},
+		unicode.Range16{Lo: 0x14f, Hi: 0x14f, Stride: 0x1},
+		unicode.Range16{Lo: 0x151, Hi: 0x151, Stride: 0x1},
+		unicode.Range16{Lo: 0x153, Hi: 0x153, Stride: 0x1},
+		unicode.Range16{Lo: 0x155, Hi: 0x155, Stride: 0x1},
+		unicode.Range16{Lo: 0x157, Hi: 0x157, Stride: 0x1},
+		unicode.Range16{Lo: 0x159, Hi: 0x159, Stride: 0x1},
+		unicode.Range16{Lo: 0x15b, Hi: 0x15b, Stride: 0x1},
+		unicode.Range16{Lo: 0x15d, Hi: 0x15d, Stride: 0x1},
+		unicode.Range16{Lo: 0x15f, Hi: 0x15f, Stride: 0x1},
+		unicode.Range16{Lo: 0x161, Hi: 0x161, Stride: 0x1},
+		unicode.Range16{Lo: 0x163, Hi: 0x163, Stride: 0x1},
+		unicode.Range16{Lo: 0x165, Hi: 0x165, Stride: 0x1},
+		unicode.Range16{Lo: 0x167, Hi: 0x167, Stride: 0x1},
+		unicode.Range16{Lo: 0x169, Hi: 0x169, Stride: 0x1},
+		unicode.Range16{Lo: 0x16b, Hi: 0x16b, Stride: 0x1},
+		unicode.Range16{Lo: 0x16d, Hi: 0x16d, Stride: 0x1},
+		unicode.Range16{Lo: 0x16f, Hi: 0x16f, Stride: 0x1},
+		unicode.Range16{Lo: 0x171, Hi: 0x171, Stride: 0x1},
+		unicode.Range16{Lo: 0x173, Hi: 0x173, Stride: 0x1},
+		unicode.Range16{Lo: 0x175, Hi: 0x175, Stride: 0x1},
+		unicode.Range16{Lo: 0x177, Hi: 0x177, Stride: 0x1},
+		unicode.Range16{Lo: 0x17a, Hi: 0x17a, Stride: 0x1},
+		unicode.Range16{Lo: 0x17c, Hi: 0x17c, Stride: 0x1},
+		unicode.Range16{Lo: 0x17e, Hi: 0x180, Stride: 0x1},
+		unicode.Range16{Lo: 0x183, Hi: 0x183, Stride: 0x1},
+		unicode.Range16{Lo: 0x185, Hi: 0x185, Stride: 0x1},
+		unicode.Range16{Lo: 0x188, Hi: 0x188, Stride: 0x1},
+		unicode.Range16{Lo: 0x18c, Hi: 0x18d, Stride: 0x1},
+		unicode.Range16{Lo: 0x192, Hi: 0x192, Stride: 0x1},
+		unicode.Range16{Lo: 0x195, Hi: 0x195, Stride: 0x1},
+		unicode.Range16{Lo: 0x199, Hi: 0x19b, Stride: 0x1},
+		unicode.Range16{Lo: 0x19e, Hi: 0x19e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a1, Hi: 0x1a1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a3, Hi: 0x1a3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a5, Hi: 0x1a5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a8, Hi: 0x1a8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1aa, Hi: 0x1ab, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ad, Hi: 0x1ad, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b0, Hi: 0x1b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b4, Hi: 0x1b4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b6, Hi: 0x1b6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b9, Hi: 0x1ba, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bd, Hi: 0x1bf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c6, Hi: 0x1c6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c9, Hi: 0x1c9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cc, Hi: 0x1cc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce, Hi: 0x1ce, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d0, Hi: 0x1d0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d2, Hi: 0x1d2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d4, Hi: 0x1d4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d6, Hi: 0x1d6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d8, Hi: 0x1d8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1da, Hi: 0x1da, Stride: 0x1},
+		unicode.Range16{Lo: 0x1dc, Hi: 0x1dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1df, Hi: 0x1df, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e1, Hi: 0x1e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e3, Hi: 0x1e3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e5, Hi: 0x1e5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e7, Hi: 0x1e7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e9, Hi: 0x1e9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb, Hi: 0x1eb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed, Hi: 0x1ed, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef, Hi: 0x1f0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f3, Hi: 0x1f3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f5, Hi: 0x1f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f9, Hi: 0x1f9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fb, Hi: 0x1fb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fd, Hi: 0x1fd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ff, Hi: 0x1ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x201, Hi: 0x201, Stride: 0x1},
+		unicode.Range16{Lo: 0x203, Hi: 0x203, Stride: 0x1},
+		unicode.Range16{Lo: 0x205, Hi: 0x205, Stride: 0x1},
+		unicode.Range16{Lo: 0x207, Hi: 0x207, Stride: 0x1},
+		unicode.Range16{Lo: 0x209, Hi: 0x209, Stride: 0x1},
+		unicode.Range16{Lo: 0x20b, Hi: 0x20b, Stride: 0x1},
+		unicode.Range16{Lo: 0x20d, Hi: 0x20d, Stride: 0x1},
+		unicode.Range16{Lo: 0x20f, Hi: 0x20f, Stride: 0x1},
+		unicode.Range16{Lo: 0x211, Hi: 0x211, Stride: 0x1},
+		unicode.Range16{Lo: 0x213, Hi: 0x213, Stride: 0x1},
+		unicode.Range16{Lo: 0x215, Hi: 0x215, Stride: 0x1},
+		unicode.Range16{Lo: 0x217, Hi: 0x217, Stride: 0x1},
+		unicode.Range16{Lo: 0x219, Hi: 0x219, Stride: 0x1},
+		unicode.Range16{Lo: 0x21b, Hi: 0x21b, Stride: 0x1},
+		unicode.Range16{Lo: 0x21d, Hi: 0x21d, Stride: 0x1},
+		unicode.Range16{Lo: 0x21f, Hi: 0x21f, Stride: 0x1},
+		unicode.Range16{Lo: 0x221, Hi: 0x221, Stride: 0x1},
+		unicode.Range16{Lo: 0x223, Hi: 0x223, Stride: 0x1},
+		unicode.Range16{Lo: 0x225, Hi: 0x225, Stride: 0x1},
+		unicode.Range16{Lo: 0x227, Hi: 0x227, Stride: 0x1},
+		unicode.Range16{Lo: 0x229, Hi: 0x229, Stride: 0x1},
+		unicode.Range16{Lo: 0x22b, Hi: 0x22b, Stride: 0x1},
+		unicode.Range16{Lo: 0x22d, Hi: 0x22d, Stride: 0x1},
+		unicode.Range16{Lo: 0x22f, Hi: 0x22f, Stride: 0x1},
+		unicode.Range16{Lo: 0x231, Hi: 0x231, Stride: 0x1},
+		unicode.Range16{Lo: 0x233, Hi: 0x239, Stride: 0x1},
+		unicode.Range16{Lo: 0x23c, Hi: 0x23c, Stride: 0x1},
+		unicode.Range16{Lo: 0x23f, Hi: 0x240, Stride: 0x1},
+		unicode.Range16{Lo: 0x242, Hi: 0x242, Stride: 0x1},
+		unicode.Range16{Lo: 0x247, Hi: 0x247, Stride: 0x1},
+		unicode.Range16{Lo: 0x249, Hi: 0x249, Stride: 0x1},
+		unicode.Range16{Lo: 0x24b, Hi: 0x24b, Stride: 0x1},
+		unicode.Range16{Lo: 0x24d, Hi: 0x24d, Stride: 0x1},
+		unicode.Range16{Lo: 0x24f, Hi: 0x293, Stride: 0x1},
+		unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1},
+		unicode.Range16{Lo: 0x2b0, Hi: 0x2b8, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c0, Hi: 0x2c1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x371, Hi: 0x371, Stride: 0x1},
+		unicode.Range16{Lo: 0x373, Hi: 0x373, Stride: 0x1},
+		unicode.Range16{Lo: 0x377, Hi: 0x377, Stride: 0x1},
+		unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1},
+		unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1},
+		unicode.Range16{Lo: 0x390, Hi: 0x390, Stride: 0x1},
+		unicode.Range16{Lo: 0x3ac, Hi: 0x3ce, Stride: 0x1},
+		unicode.Range16{Lo: 0x3d0, Hi: 0x3d1, Stride: 0x1},
+		unicode.Range16{Lo: 0x3d5, Hi: 0x3d7, Stride: 0x1},
+		unicode.Range16{Lo: 0x3d9, Hi: 0x3d9, Stride: 0x1},
+		unicode.Range16{Lo: 0x3db, Hi: 0x3db, Stride: 0x1},
+		unicode.Range16{Lo: 0x3dd, Hi: 0x3dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x3df, Hi: 0x3df, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e1, Hi: 0x3e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e3, Hi: 0x3e3, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e5, Hi: 0x3e5, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e7, Hi: 0x3e7, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e9, Hi: 0x3e9, Stride: 0x1},
+		unicode.Range16{Lo: 0x3eb, Hi: 0x3eb, Stride: 0x1},
+		unicode.Range16{Lo: 0x3ed, Hi: 0x3ed, Stride: 0x1},
+		unicode.Range16{Lo: 0x3ef, Hi: 0x3f3, Stride: 0x1},
+		unicode.Range16{Lo: 0x3f5, Hi: 0x3f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x3f8, Hi: 0x3f8, Stride: 0x1},
+		unicode.Range16{Lo: 0x3fb, Hi: 0x3fc, Stride: 0x1},
+		unicode.Range16{Lo: 0x430, Hi: 0x45f, Stride: 0x1},
+		unicode.Range16{Lo: 0x461, Hi: 0x461, Stride: 0x1},
+		unicode.Range16{Lo: 0x463, Hi: 0x463, Stride: 0x1},
+		unicode.Range16{Lo: 0x465, Hi: 0x465, Stride: 0x1},
+		unicode.Range16{Lo: 0x467, Hi: 0x467, Stride: 0x1},
+		unicode.Range16{Lo: 0x469, Hi: 0x469, Stride: 0x1},
+		unicode.Range16{Lo: 0x46b, Hi: 0x46b, Stride: 0x1},
+		unicode.Range16{Lo: 0x46d, Hi: 0x46d, Stride: 0x1},
+		unicode.Range16{Lo: 0x46f, Hi: 0x46f, Stride: 0x1},
+		unicode.Range16{Lo: 0x471, Hi: 0x471, Stride: 0x1},
+		unicode.Range16{Lo: 0x473, Hi: 0x473, Stride: 0x1},
+		unicode.Range16{Lo: 0x475, Hi: 0x475, Stride: 0x1},
+		unicode.Range16{Lo: 0x477, Hi: 0x477, Stride: 0x1},
+		unicode.Range16{Lo: 0x479, Hi: 0x479, Stride: 0x1},
+		unicode.Range16{Lo: 0x47b, Hi: 0x47b, Stride: 0x1},
+		unicode.Range16{Lo: 0x47d, Hi: 0x47d, Stride: 0x1},
+		unicode.Range16{Lo: 0x47f, Hi: 0x47f, Stride: 0x1},
+		unicode.Range16{Lo: 0x481, Hi: 0x481, Stride: 0x1},
+		unicode.Range16{Lo: 0x48b, Hi: 0x48b, Stride: 0x1},
+		unicode.Range16{Lo: 0x48d, Hi: 0x48d, Stride: 0x1},
+		unicode.Range16{Lo: 0x48f, Hi: 0x48f, Stride: 0x1},
+		unicode.Range16{Lo: 0x491, Hi: 0x491, Stride: 0x1},
+		unicode.Range16{Lo: 0x493, Hi: 0x493, Stride: 0x1},
+		unicode.Range16{Lo: 0x495, Hi: 0x495, Stride: 0x1},
+		unicode.Range16{Lo: 0x497, Hi: 0x497, Stride: 0x1},
+		unicode.Range16{Lo: 0x499, Hi: 0x499, Stride: 0x1},
+		unicode.Range16{Lo: 0x49b, Hi: 0x49b, Stride: 0x1},
+		unicode.Range16{Lo: 0x49d, Hi: 0x49d, Stride: 0x1},
+		unicode.Range16{Lo: 0x49f, Hi: 0x49f, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a1, Hi: 0x4a1, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a3, Hi: 0x4a3, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a5, Hi: 0x4a5, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a7, Hi: 0x4a7, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a9, Hi: 0x4a9, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ab, Hi: 0x4ab, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ad, Hi: 0x4ad, Stride: 0x1},
+		unicode.Range16{Lo: 0x4af, Hi: 0x4af, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b1, Hi: 0x4b1, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b3, Hi: 0x4b3, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b5, Hi: 0x4b5, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b7, Hi: 0x4b7, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b9, Hi: 0x4b9, Stride: 0x1},
+		unicode.Range16{Lo: 0x4bb, Hi: 0x4bb, Stride: 0x1},
+		unicode.Range16{Lo: 0x4bd, Hi: 0x4bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x4bf, Hi: 0x4bf, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c2, Hi: 0x4c2, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c4, Hi: 0x4c4, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c6, Hi: 0x4c6, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c8, Hi: 0x4c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ca, Hi: 0x4ca, Stride: 0x1},
+		unicode.Range16{Lo: 0x4cc, Hi: 0x4cc, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ce, Hi: 0x4cf, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d1, Hi: 0x4d1, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d3, Hi: 0x4d3, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d5, Hi: 0x4d5, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d7, Hi: 0x4d7, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d9, Hi: 0x4d9, Stride: 0x1},
+		unicode.Range16{Lo: 0x4db, Hi: 0x4db, Stride: 0x1},
+		unicode.Range16{Lo: 0x4dd, Hi: 0x4dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x4df, Hi: 0x4df, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e1, Hi: 0x4e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e3, Hi: 0x4e3, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e5, Hi: 0x4e5, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e7, Hi: 0x4e7, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e9, Hi: 0x4e9, Stride: 0x1},
+		unicode.Range16{Lo: 0x4eb, Hi: 0x4eb, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ed, Hi: 0x4ed, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ef, Hi: 0x4ef, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f1, Hi: 0x4f1, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f3, Hi: 0x4f3, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f5, Hi: 0x4f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f7, Hi: 0x4f7, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f9, Hi: 0x4f9, Stride: 0x1},
+		unicode.Range16{Lo: 0x4fb, Hi: 0x4fb, Stride: 0x1},
+		unicode.Range16{Lo: 0x4fd, Hi: 0x4fd, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ff, Hi: 0x4ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x501, Hi: 0x501, Stride: 0x1},
+		unicode.Range16{Lo: 0x503, Hi: 0x503, Stride: 0x1},
+		unicode.Range16{Lo: 0x505, Hi: 0x505, Stride: 0x1},
+		unicode.Range16{Lo: 0x507, Hi: 0x507, Stride: 0x1},
+		unicode.Range16{Lo: 0x509, Hi: 0x509, Stride: 0x1},
+		unicode.Range16{Lo: 0x50b, Hi: 0x50b, Stride: 0x1},
+		unicode.Range16{Lo: 0x50d, Hi: 0x50d, Stride: 0x1},
+		unicode.Range16{Lo: 0x50f, Hi: 0x50f, Stride: 0x1},
+		unicode.Range16{Lo: 0x511, Hi: 0x511, Stride: 0x1},
+		unicode.Range16{Lo: 0x513, Hi: 0x513, Stride: 0x1},
+		unicode.Range16{Lo: 0x515, Hi: 0x515, Stride: 0x1},
+		unicode.Range16{Lo: 0x517, Hi: 0x517, Stride: 0x1},
+		unicode.Range16{Lo: 0x519, Hi: 0x519, Stride: 0x1},
+		unicode.Range16{Lo: 0x51b, Hi: 0x51b, Stride: 0x1},
+		unicode.Range16{Lo: 0x51d, Hi: 0x51d, Stride: 0x1},
+		unicode.Range16{Lo: 0x51f, Hi: 0x51f, Stride: 0x1},
+		unicode.Range16{Lo: 0x521, Hi: 0x521, Stride: 0x1},
+		unicode.Range16{Lo: 0x523, Hi: 0x523, Stride: 0x1},
+		unicode.Range16{Lo: 0x525, Hi: 0x525, Stride: 0x1},
+		unicode.Range16{Lo: 0x527, Hi: 0x527, Stride: 0x1},
+		unicode.Range16{Lo: 0x529, Hi: 0x529, Stride: 0x1},
+		unicode.Range16{Lo: 0x52b, Hi: 0x52b, Stride: 0x1},
+		unicode.Range16{Lo: 0x52d, Hi: 0x52d, Stride: 0x1},
+		unicode.Range16{Lo: 0x52f, Hi: 0x52f, Stride: 0x1},
+		unicode.Range16{Lo: 0x560, Hi: 0x588, Stride: 0x1},
+		unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1},
+		unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e01, Hi: 0x1e01, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e03, Hi: 0x1e03, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e05, Hi: 0x1e05, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e07, Hi: 0x1e07, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e09, Hi: 0x1e09, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e0b, Hi: 0x1e0b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e0d, Hi: 0x1e0d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e0f, Hi: 0x1e0f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e11, Hi: 0x1e11, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e13, Hi: 0x1e13, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e15, Hi: 0x1e15, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e17, Hi: 0x1e17, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e19, Hi: 0x1e19, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e1b, Hi: 0x1e1b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e1d, Hi: 0x1e1d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e1f, Hi: 0x1e1f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e21, Hi: 0x1e21, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e23, Hi: 0x1e23, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e25, Hi: 0x1e25, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e27, Hi: 0x1e27, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e29, Hi: 0x1e29, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e2b, Hi: 0x1e2b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e2d, Hi: 0x1e2d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e2f, Hi: 0x1e2f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e31, Hi: 0x1e31, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e33, Hi: 0x1e33, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e35, Hi: 0x1e35, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e37, Hi: 0x1e37, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e39, Hi: 0x1e39, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e3b, Hi: 0x1e3b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e3d, Hi: 0x1e3d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e3f, Hi: 0x1e3f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e41, Hi: 0x1e41, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e43, Hi: 0x1e43, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e45, Hi: 0x1e45, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e47, Hi: 0x1e47, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e49, Hi: 0x1e49, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e4b, Hi: 0x1e4b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e4d, Hi: 0x1e4d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e4f, Hi: 0x1e4f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e51, Hi: 0x1e51, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e53, Hi: 0x1e53, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e55, Hi: 0x1e55, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e57, Hi: 0x1e57, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e59, Hi: 0x1e59, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e5b, Hi: 0x1e5b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e5d, Hi: 0x1e5d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e5f, Hi: 0x1e5f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e61, Hi: 0x1e61, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e63, Hi: 0x1e63, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e65, Hi: 0x1e65, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e67, Hi: 0x1e67, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e69, Hi: 0x1e69, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e6b, Hi: 0x1e6b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e6d, Hi: 0x1e6d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e6f, Hi: 0x1e6f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e71, Hi: 0x1e71, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e73, Hi: 0x1e73, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e75, Hi: 0x1e75, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e77, Hi: 0x1e77, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e79, Hi: 0x1e79, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e7b, Hi: 0x1e7b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e7d, Hi: 0x1e7d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e7f, Hi: 0x1e7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e81, Hi: 0x1e81, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e83, Hi: 0x1e83, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e85, Hi: 0x1e85, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e87, Hi: 0x1e87, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e89, Hi: 0x1e89, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e8b, Hi: 0x1e8b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e8d, Hi: 0x1e8d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e8f, Hi: 0x1e8f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e91, Hi: 0x1e91, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e93, Hi: 0x1e93, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e95, Hi: 0x1e9d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e9f, Hi: 0x1e9f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea1, Hi: 0x1ea1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea3, Hi: 0x1ea3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea5, Hi: 0x1ea5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea7, Hi: 0x1ea7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea9, Hi: 0x1ea9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eab, Hi: 0x1eab, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ead, Hi: 0x1ead, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eaf, Hi: 0x1eaf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb1, Hi: 0x1eb1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb3, Hi: 0x1eb3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb5, Hi: 0x1eb5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb7, Hi: 0x1eb7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb9, Hi: 0x1eb9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ebb, Hi: 0x1ebb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ebd, Hi: 0x1ebd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ebf, Hi: 0x1ebf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec1, Hi: 0x1ec1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec3, Hi: 0x1ec3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec5, Hi: 0x1ec5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec7, Hi: 0x1ec7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec9, Hi: 0x1ec9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ecb, Hi: 0x1ecb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ecd, Hi: 0x1ecd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ecf, Hi: 0x1ecf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed1, Hi: 0x1ed1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed3, Hi: 0x1ed3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed5, Hi: 0x1ed5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed7, Hi: 0x1ed7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed9, Hi: 0x1ed9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1edb, Hi: 0x1edb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1edd, Hi: 0x1edd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1edf, Hi: 0x1edf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee1, Hi: 0x1ee1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee3, Hi: 0x1ee3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee5, Hi: 0x1ee5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee7, Hi: 0x1ee7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee9, Hi: 0x1ee9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eeb, Hi: 0x1eeb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eed, Hi: 0x1eed, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eef, Hi: 0x1eef, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef1, Hi: 0x1ef1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef3, Hi: 0x1ef3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef5, Hi: 0x1ef5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef7, Hi: 0x1ef7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef9, Hi: 0x1ef9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1efb, Hi: 0x1efb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1efd, Hi: 0x1efd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eff, Hi: 0x1f07, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f10, Hi: 0x1f15, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f20, Hi: 0x1f27, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f30, Hi: 0x1f37, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f40, Hi: 0x1f45, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f60, Hi: 0x1f67, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f70, Hi: 0x1f7d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f80, Hi: 0x1f87, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f90, Hi: 0x1f97, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fa0, Hi: 0x1fa7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fb0, Hi: 0x1fb4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fb6, Hi: 0x1fb7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fc6, Hi: 0x1fc7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fd6, Hi: 0x1fd7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fe0, Hi: 0x1fe7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ff6, Hi: 0x1ff7, Stride: 0x1},
+		unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1},
+		unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1},
+		unicode.Range16{Lo: 0x210a, Hi: 0x210a, Stride: 0x1},
+		unicode.Range16{Lo: 0x210e, Hi: 0x210f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2113, Hi: 0x2113, Stride: 0x1},
+		unicode.Range16{Lo: 0x212f, Hi: 0x212f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2134, Hi: 0x2134, Stride: 0x1},
+		unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1},
+		unicode.Range16{Lo: 0x213c, Hi: 0x213d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2146, Hi: 0x2149, Stride: 0x1},
+		unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2170, Hi: 0x217f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2184, Hi: 0x2184, Stride: 0x1},
+		unicode.Range16{Lo: 0x24d0, Hi: 0x24e9, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c30, Hi: 0x2c5f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c61, Hi: 0x2c61, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c65, Hi: 0x2c66, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c68, Hi: 0x2c68, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c6a, Hi: 0x2c6a, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c6c, Hi: 0x2c6c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c71, Hi: 0x2c71, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c73, Hi: 0x2c74, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c76, Hi: 0x2c7b, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c81, Hi: 0x2c81, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c83, Hi: 0x2c83, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c85, Hi: 0x2c85, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c87, Hi: 0x2c87, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c89, Hi: 0x2c89, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c8b, Hi: 0x2c8b, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c8d, Hi: 0x2c8d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c8f, Hi: 0x2c8f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c91, Hi: 0x2c91, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c93, Hi: 0x2c93, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c95, Hi: 0x2c95, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c97, Hi: 0x2c97, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c99, Hi: 0x2c99, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c9b, Hi: 0x2c9b, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c9d, Hi: 0x2c9d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c9f, Hi: 0x2c9f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca1, Hi: 0x2ca1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca3, Hi: 0x2ca3, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca5, Hi: 0x2ca5, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca7, Hi: 0x2ca7, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca9, Hi: 0x2ca9, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cab, Hi: 0x2cab, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cad, Hi: 0x2cad, Stride: 0x1},
+		unicode.Range16{Lo: 0x2caf, Hi: 0x2caf, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb1, Hi: 0x2cb1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb3, Hi: 0x2cb3, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb5, Hi: 0x2cb5, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb7, Hi: 0x2cb7, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb9, Hi: 0x2cb9, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cbb, Hi: 0x2cbb, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cbd, Hi: 0x2cbd, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cbf, Hi: 0x2cbf, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc1, Hi: 0x2cc1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc3, Hi: 0x2cc3, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc5, Hi: 0x2cc5, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc7, Hi: 0x2cc7, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc9, Hi: 0x2cc9, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ccb, Hi: 0x2ccb, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ccd, Hi: 0x2ccd, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ccf, Hi: 0x2ccf, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd1, Hi: 0x2cd1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd3, Hi: 0x2cd3, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd5, Hi: 0x2cd5, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd7, Hi: 0x2cd7, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd9, Hi: 0x2cd9, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cdb, Hi: 0x2cdb, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cdd, Hi: 0x2cdd, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cdf, Hi: 0x2cdf, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ce1, Hi: 0x2ce1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ce3, Hi: 0x2ce4, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cec, Hi: 0x2cec, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cee, Hi: 0x2cee, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cf3, Hi: 0x2cf3, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa641, Hi: 0xa641, Stride: 0x1},
+		unicode.Range16{Lo: 0xa643, Hi: 0xa643, Stride: 0x1},
+		unicode.Range16{Lo: 0xa645, Hi: 0xa645, Stride: 0x1},
+		unicode.Range16{Lo: 0xa647, Hi: 0xa647, Stride: 0x1},
+		unicode.Range16{Lo: 0xa649, Hi: 0xa649, Stride: 0x1},
+		unicode.Range16{Lo: 0xa64b, Hi: 0xa64b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa64d, Hi: 0xa64d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa64f, Hi: 0xa64f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa651, Hi: 0xa651, Stride: 0x1},
+		unicode.Range16{Lo: 0xa653, Hi: 0xa653, Stride: 0x1},
+		unicode.Range16{Lo: 0xa655, Hi: 0xa655, Stride: 0x1},
+		unicode.Range16{Lo: 0xa657, Hi: 0xa657, Stride: 0x1},
+		unicode.Range16{Lo: 0xa659, Hi: 0xa659, Stride: 0x1},
+		unicode.Range16{Lo: 0xa65b, Hi: 0xa65b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa65d, Hi: 0xa65d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa65f, Hi: 0xa65f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa661, Hi: 0xa661, Stride: 0x1},
+		unicode.Range16{Lo: 0xa663, Hi: 0xa663, Stride: 0x1},
+		unicode.Range16{Lo: 0xa665, Hi: 0xa665, Stride: 0x1},
+		unicode.Range16{Lo: 0xa667, Hi: 0xa667, Stride: 0x1},
+		unicode.Range16{Lo: 0xa669, Hi: 0xa669, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66b, Hi: 0xa66b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66d, Hi: 0xa66d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa681, Hi: 0xa681, Stride: 0x1},
+		unicode.Range16{Lo: 0xa683, Hi: 0xa683, Stride: 0x1},
+		unicode.Range16{Lo: 0xa685, Hi: 0xa685, Stride: 0x1},
+		unicode.Range16{Lo: 0xa687, Hi: 0xa687, Stride: 0x1},
+		unicode.Range16{Lo: 0xa689, Hi: 0xa689, Stride: 0x1},
+		unicode.Range16{Lo: 0xa68b, Hi: 0xa68b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa68d, Hi: 0xa68d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa68f, Hi: 0xa68f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa691, Hi: 0xa691, Stride: 0x1},
+		unicode.Range16{Lo: 0xa693, Hi: 0xa693, Stride: 0x1},
+		unicode.Range16{Lo: 0xa695, Hi: 0xa695, Stride: 0x1},
+		unicode.Range16{Lo: 0xa697, Hi: 0xa697, Stride: 0x1},
+		unicode.Range16{Lo: 0xa699, Hi: 0xa699, Stride: 0x1},
+		unicode.Range16{Lo: 0xa69b, Hi: 0xa69b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa723, Hi: 0xa723, Stride: 0x1},
+		unicode.Range16{Lo: 0xa725, Hi: 0xa725, Stride: 0x1},
+		unicode.Range16{Lo: 0xa727, Hi: 0xa727, Stride: 0x1},
+		unicode.Range16{Lo: 0xa729, Hi: 0xa729, Stride: 0x1},
+		unicode.Range16{Lo: 0xa72b, Hi: 0xa72b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa72d, Hi: 0xa72d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa72f, Hi: 0xa731, Stride: 0x1},
+		unicode.Range16{Lo: 0xa733, Hi: 0xa733, Stride: 0x1},
+		unicode.Range16{Lo: 0xa735, Hi: 0xa735, Stride: 0x1},
+		unicode.Range16{Lo: 0xa737, Hi: 0xa737, Stride: 0x1},
+		unicode.Range16{Lo: 0xa739, Hi: 0xa739, Stride: 0x1},
+		unicode.Range16{Lo: 0xa73b, Hi: 0xa73b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa73d, Hi: 0xa73d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa73f, Hi: 0xa73f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa741, Hi: 0xa741, Stride: 0x1},
+		unicode.Range16{Lo: 0xa743, Hi: 0xa743, Stride: 0x1},
+		unicode.Range16{Lo: 0xa745, Hi: 0xa745, Stride: 0x1},
+		unicode.Range16{Lo: 0xa747, Hi: 0xa747, Stride: 0x1},
+		unicode.Range16{Lo: 0xa749, Hi: 0xa749, Stride: 0x1},
+		unicode.Range16{Lo: 0xa74b, Hi: 0xa74b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa74d, Hi: 0xa74d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa74f, Hi: 0xa74f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa751, Hi: 0xa751, Stride: 0x1},
+		unicode.Range16{Lo: 0xa753, Hi: 0xa753, Stride: 0x1},
+		unicode.Range16{Lo: 0xa755, Hi: 0xa755, Stride: 0x1},
+		unicode.Range16{Lo: 0xa757, Hi: 0xa757, Stride: 0x1},
+		unicode.Range16{Lo: 0xa759, Hi: 0xa759, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75b, Hi: 0xa75b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75d, Hi: 0xa75d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75f, Hi: 0xa75f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa761, Hi: 0xa761, Stride: 0x1},
+		unicode.Range16{Lo: 0xa763, Hi: 0xa763, Stride: 0x1},
+		unicode.Range16{Lo: 0xa765, Hi: 0xa765, Stride: 0x1},
+		unicode.Range16{Lo: 0xa767, Hi: 0xa767, Stride: 0x1},
+		unicode.Range16{Lo: 0xa769, Hi: 0xa769, Stride: 0x1},
+		unicode.Range16{Lo: 0xa76b, Hi: 0xa76b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa76d, Hi: 0xa76d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa76f, Hi: 0xa76f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1},
+		unicode.Range16{Lo: 0xa771, Hi: 0xa778, Stride: 0x1},
+		unicode.Range16{Lo: 0xa77a, Hi: 0xa77a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa77c, Hi: 0xa77c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa77f, Hi: 0xa77f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa781, Hi: 0xa781, Stride: 0x1},
+		unicode.Range16{Lo: 0xa783, Hi: 0xa783, Stride: 0x1},
+		unicode.Range16{Lo: 0xa785, Hi: 0xa785, Stride: 0x1},
+		unicode.Range16{Lo: 0xa787, Hi: 0xa787, Stride: 0x1},
+		unicode.Range16{Lo: 0xa78c, Hi: 0xa78c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa78e, Hi: 0xa78e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa791, Hi: 0xa791, Stride: 0x1},
+		unicode.Range16{Lo: 0xa793, Hi: 0xa795, Stride: 0x1},
+		unicode.Range16{Lo: 0xa797, Hi: 0xa797, Stride: 0x1},
+		unicode.Range16{Lo: 0xa799, Hi: 0xa799, Stride: 0x1},
+		unicode.Range16{Lo: 0xa79b, Hi: 0xa79b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa79d, Hi: 0xa79d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa79f, Hi: 0xa79f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a1, Hi: 0xa7a1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a3, Hi: 0xa7a3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a5, Hi: 0xa7a5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a7, Hi: 0xa7a7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a9, Hi: 0xa7a9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7af, Hi: 0xa7af, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7b5, Hi: 0xa7b5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7b7, Hi: 0xa7b7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7b9, Hi: 0xa7b9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7bb, Hi: 0xa7bb, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7bd, Hi: 0xa7bd, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7bf, Hi: 0xa7bf, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7c1, Hi: 0xa7c1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7c3, Hi: 0xa7c3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7c8, Hi: 0xa7c8, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7ca, Hi: 0xa7ca, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d1, Hi: 0xa7d1, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d3, Hi: 0xa7d3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d5, Hi: 0xa7d5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d7, Hi: 0xa7d7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d9, Hi: 0xa7d9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f2, Hi: 0xa7f4, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f6, Hi: 0xa7f6, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1},
+		unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1},
+		unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1},
+		unicode.Range16{Lo: 0xab60, Hi: 0xab68, Stride: 0x1},
+		unicode.Range16{Lo: 0xab69, Hi: 0xab69, Stride: 0x1},
+		unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1},
+		unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x10428, Hi: 0x1044f, Stride: 0x1},
+		unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1},
+		unicode.Range32{Lo: 0x10597, Hi: 0x105a1, Stride: 0x1},
+		unicode.Range32{Lo: 0x105a3, Hi: 0x105b1, Stride: 0x1},
+		unicode.Range32{Lo: 0x105b3, Hi: 0x105b9, Stride: 0x1},
+		unicode.Range32{Lo: 0x105bb, Hi: 0x105bc, Stride: 0x1},
+		unicode.Range32{Lo: 0x10780, Hi: 0x10780, Stride: 0x1},
+		unicode.Range32{Lo: 0x10783, Hi: 0x10785, Stride: 0x1},
+		unicode.Range32{Lo: 0x10787, Hi: 0x107b0, Stride: 0x1},
+		unicode.Range32{Lo: 0x107b2, Hi: 0x107ba, Stride: 0x1},
+		unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1},
+		unicode.Range32{Lo: 0x118c0, Hi: 0x118df, Stride: 0x1},
+		unicode.Range32{Lo: 0x16e60, Hi: 0x16e7f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d41a, Hi: 0x1d433, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d44e, Hi: 0x1d454, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d456, Hi: 0x1d467, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d482, Hi: 0x1d49b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4b6, Hi: 0x1d4b9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d4cf, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4ea, Hi: 0x1d503, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d51e, Hi: 0x1d537, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d552, Hi: 0x1d56b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d586, Hi: 0x1d59f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d5ba, Hi: 0x1d5d3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d5ee, Hi: 0x1d607, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d622, Hi: 0x1d63b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d656, Hi: 0x1d66f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d68a, Hi: 0x1d6a5, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6e1, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d716, Hi: 0x1d71b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d750, Hi: 0x1d755, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d78a, Hi: 0x1d78f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7c9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d7cb, Hi: 0x1d7cb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1df00, Hi: 0x1df09, Stride: 0x1},
+		unicode.Range32{Lo: 0x1df0b, Hi: 0x1df1e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1df25, Hi: 0x1df2a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e030, Hi: 0x1e06d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e922, Hi: 0x1e943, Stride: 0x1},
+	},
+	LatinOffset: 6,
+}
+
+var _SentenceNumeric = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1},
+		unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1},
+		unicode.Range16{Lo: 0x66b, Hi: 0x66c, Stride: 0x1},
+		unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1},
+		unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1},
+		unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1},
+		unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1},
+		unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1},
+		unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1},
+		unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1},
+		unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1},
+		unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1},
+		unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1},
+		unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1},
+		unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1},
+		unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1},
+		unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1},
+		unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1},
+		unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1},
+		unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1},
+		unicode.Range16{Lo: 0xff10, Hi: 0xff19, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1},
+		unicode.Range32{Lo: 0x10d30, Hi: 0x10d39, Stride: 0x1},
+		unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1},
+		unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1},
+		unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1},
+		unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1},
+		unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1},
+		unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1},
+		unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11950, Hi: 0x11959, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d50, Hi: 0x11d59, Stride: 0x1},
+		unicode.Range32{Lo: 0x11da0, Hi: 0x11da9, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f50, Hi: 0x11f59, Stride: 0x1},
+		unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1},
+		unicode.Range32{Lo: 0x16ac0, Hi: 0x16ac9, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e140, Hi: 0x1e149, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2f0, Hi: 0x1e2f9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4f0, Hi: 0x1e4f9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1},
+		unicode.Range32{Lo: 0x1fbf0, Hi: 0x1fbf9, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _SentenceOLetter = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1},
+		unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1},
+		unicode.Range16{Lo: 0x2b9, Hi: 0x2bf, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1},
+		unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1},
+		unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1},
+		unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x5ef, Hi: 0x5f2, Stride: 0x1},
+		unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1},
+		unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1},
+		unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1},
+		unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1},
+		unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1},
+		unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1},
+		unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1},
+		unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1},
+		unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1},
+		unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1},
+		unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1},
+		unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1},
+		unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1},
+		unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1},
+		unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1},
+		unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1},
+		unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1},
+		unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1},
+		unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1},
+		unicode.Range16{Lo: 0x860, Hi: 0x86a, Stride: 0x1},
+		unicode.Range16{Lo: 0x870, Hi: 0x887, Stride: 0x1},
+		unicode.Range16{Lo: 0x889, Hi: 0x88e, Stride: 0x1},
+		unicode.Range16{Lo: 0x8a0, Hi: 0x8c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x8c9, Hi: 0x8c9, Stride: 0x1},
+		unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1},
+		unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1},
+		unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1},
+		unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1},
+		unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1},
+		unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1},
+		unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1},
+		unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1},
+		unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1},
+		unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1},
+		unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1},
+		unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1},
+		unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1},
+		unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1},
+		unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1},
+		unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1},
+		unicode.Range16{Lo: 0x9fc, Hi: 0x9fc, Stride: 0x1},
+		unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1},
+		unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1},
+		unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1},
+		unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1},
+		unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1},
+		unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1},
+		unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1},
+		unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1},
+		unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1},
+		unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1},
+		unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1},
+		unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1},
+		unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1},
+		unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1},
+		unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1},
+		unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1},
+		unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1},
+		unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1},
+		unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1},
+		unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1},
+		unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1},
+		unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1},
+		unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1},
+		unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1},
+		unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1},
+		unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1},
+		unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1},
+		unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1},
+		unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1},
+		unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1},
+		unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1},
+		unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1},
+		unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1},
+		unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1},
+		unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1},
+		unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1},
+		unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1},
+		unicode.Range16{Lo: 0xc5d, Hi: 0xc5d, Stride: 0x1},
+		unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1},
+		unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1},
+		unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1},
+		unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1},
+		unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1},
+		unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1},
+		unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1},
+		unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1},
+		unicode.Range16{Lo: 0xcdd, Hi: 0xcde, Stride: 0x1},
+		unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1},
+		unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1},
+		unicode.Range16{Lo: 0xd04, Hi: 0xd0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1},
+		unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1},
+		unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1},
+		unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1},
+		unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1},
+		unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1},
+		unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1},
+		unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1},
+		unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1},
+		unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1},
+		unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1},
+		unicode.Range16{Lo: 0xe01, Hi: 0xe30, Stride: 0x1},
+		unicode.Range16{Lo: 0xe32, Hi: 0xe33, Stride: 0x1},
+		unicode.Range16{Lo: 0xe40, Hi: 0xe45, Stride: 0x1},
+		unicode.Range16{Lo: 0xe46, Hi: 0xe46, Stride: 0x1},
+		unicode.Range16{Lo: 0xe81, Hi: 0xe82, Stride: 0x1},
+		unicode.Range16{Lo: 0xe84, Hi: 0xe84, Stride: 0x1},
+		unicode.Range16{Lo: 0xe86, Hi: 0xe8a, Stride: 0x1},
+		unicode.Range16{Lo: 0xe8c, Hi: 0xea3, Stride: 0x1},
+		unicode.Range16{Lo: 0xea5, Hi: 0xea5, Stride: 0x1},
+		unicode.Range16{Lo: 0xea7, Hi: 0xeb0, Stride: 0x1},
+		unicode.Range16{Lo: 0xeb2, Hi: 0xeb3, Stride: 0x1},
+		unicode.Range16{Lo: 0xebd, Hi: 0xebd, Stride: 0x1},
+		unicode.Range16{Lo: 0xec0, Hi: 0xec4, Stride: 0x1},
+		unicode.Range16{Lo: 0xec6, Hi: 0xec6, Stride: 0x1},
+		unicode.Range16{Lo: 0xedc, Hi: 0xedf, Stride: 0x1},
+		unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1},
+		unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1},
+		unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1},
+		unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1000, Hi: 0x102a, Stride: 0x1},
+		unicode.Range16{Lo: 0x103f, Hi: 0x103f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1050, Hi: 0x1055, Stride: 0x1},
+		unicode.Range16{Lo: 0x105a, Hi: 0x105d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1061, Hi: 0x1061, Stride: 0x1},
+		unicode.Range16{Lo: 0x1065, Hi: 0x1066, Stride: 0x1},
+		unicode.Range16{Lo: 0x106e, Hi: 0x1070, Stride: 0x1},
+		unicode.Range16{Lo: 0x1075, Hi: 0x1081, Stride: 0x1},
+		unicode.Range16{Lo: 0x108e, Hi: 0x108e, Stride: 0x1},
+		unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x10fd, Hi: 0x10ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x1100, Hi: 0x1248, Stride: 0x1},
+		unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1},
+		unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1},
+		unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1},
+		unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1},
+		unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1},
+		unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1},
+		unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1},
+		unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1},
+		unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1},
+		unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1},
+		unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1},
+		unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1},
+		unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1700, Hi: 0x1711, Stride: 0x1},
+		unicode.Range16{Lo: 0x171f, Hi: 0x1731, Stride: 0x1},
+		unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1},
+		unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1},
+		unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1},
+		unicode.Range16{Lo: 0x1780, Hi: 0x17b3, Stride: 0x1},
+		unicode.Range16{Lo: 0x17d7, Hi: 0x17d7, Stride: 0x1},
+		unicode.Range16{Lo: 0x17dc, Hi: 0x17dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1},
+		unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1},
+		unicode.Range16{Lo: 0x1844, Hi: 0x1878, Stride: 0x1},
+		unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1},
+		unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1},
+		unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1},
+		unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1950, Hi: 0x196d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1970, Hi: 0x1974, Stride: 0x1},
+		unicode.Range16{Lo: 0x1980, Hi: 0x19ab, Stride: 0x1},
+		unicode.Range16{Lo: 0x19b0, Hi: 0x19c9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a20, Hi: 0x1a54, Stride: 0x1},
+		unicode.Range16{Lo: 0x1aa7, Hi: 0x1aa7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b45, Hi: 0x1b4c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c90, Hi: 0x1cba, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cbd, Hi: 0x1cbf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cee, Hi: 0x1cf3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cfa, Hi: 0x1cfa, Stride: 0x1},
+		unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1},
+		unicode.Range16{Lo: 0x2180, Hi: 0x2182, Stride: 0x1},
+		unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1},
+		unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1},
+		unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1},
+		unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1},
+		unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1},
+		unicode.Range16{Lo: 0x3006, Hi: 0x3006, Stride: 0x1},
+		unicode.Range16{Lo: 0x3007, Hi: 0x3007, Stride: 0x1},
+		unicode.Range16{Lo: 0x3021, Hi: 0x3029, Stride: 0x1},
+		unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1},
+		unicode.Range16{Lo: 0x3038, Hi: 0x303a, Stride: 0x1},
+		unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1},
+		unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1},
+		unicode.Range16{Lo: 0x3041, Hi: 0x3096, Stride: 0x1},
+		unicode.Range16{Lo: 0x309d, Hi: 0x309e, Stride: 0x1},
+		unicode.Range16{Lo: 0x309f, Hi: 0x309f, Stride: 0x1},
+		unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1},
+		unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x3105, Hi: 0x312f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1},
+		unicode.Range16{Lo: 0x31a0, Hi: 0x31bf, Stride: 0x1},
+		unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1},
+		unicode.Range16{Lo: 0x3400, Hi: 0x4dbf, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e00, Hi: 0xa014, Stride: 0x1},
+		unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1},
+		unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1},
+		unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1},
+		unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1},
+		unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1},
+		unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1},
+		unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1},
+		unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1},
+		unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fe, Stride: 0x1},
+		unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1},
+		unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1},
+		unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9e0, Hi: 0xa9e4, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9e6, Hi: 0xa9e6, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9e7, Hi: 0xa9ef, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9fa, Hi: 0xa9fe, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa60, Hi: 0xaa6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa70, Hi: 0xaa70, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa71, Hi: 0xaa76, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7a, Hi: 0xaa7a, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa7e, Hi: 0xaaaf, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab1, Hi: 0xaab1, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab5, Hi: 0xaab6, Stride: 0x1},
+		unicode.Range16{Lo: 0xaab9, Hi: 0xaabd, Stride: 0x1},
+		unicode.Range16{Lo: 0xaac0, Hi: 0xaac0, Stride: 0x1},
+		unicode.Range16{Lo: 0xaac2, Hi: 0xaac2, Stride: 0x1},
+		unicode.Range16{Lo: 0xaadb, Hi: 0xaadc, Stride: 0x1},
+		unicode.Range16{Lo: 0xaadd, Hi: 0xaadd, Stride: 0x1},
+		unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1},
+		unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1},
+		unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1},
+		unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1},
+		unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1},
+		unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1},
+		unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1},
+		unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1},
+		unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1},
+		unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1},
+		unicode.Range16{Lo: 0xf900, Hi: 0xfa6d, Stride: 0x1},
+		unicode.Range16{Lo: 0xfa70, Hi: 0xfad9, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1},
+		unicode.Range16{Lo: 0xfb46, Hi: 0xfbb1, Stride: 0x1},
+		unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1},
+		unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1},
+		unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1},
+		unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1},
+		unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1},
+		unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1},
+		unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1},
+		unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1},
+		unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1},
+		unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1},
+		unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1},
+		unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1},
+		unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1},
+		unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1},
+		unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1},
+		unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1},
+		unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1},
+		unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1},
+		unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1032d, Hi: 0x10340, Stride: 0x1},
+		unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1},
+		unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1},
+		unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1},
+		unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1},
+		unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1},
+		unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1},
+		unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1},
+		unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1},
+		unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1},
+		unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1},
+		unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1},
+		unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1},
+		unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1},
+		unicode.Range32{Lo: 0x10781, Hi: 0x10782, Stride: 0x1},
+		unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1},
+		unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1},
+		unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1},
+		unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1},
+		unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1},
+		unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1},
+		unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1},
+		unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1},
+		unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1},
+		unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1},
+		unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1},
+		unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1},
+		unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a19, Hi: 0x10a35, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1},
+		unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1},
+		unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1},
+		unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1},
+		unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1},
+		unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1},
+		unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1},
+		unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1},
+		unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1},
+		unicode.Range32{Lo: 0x10d00, Hi: 0x10d23, Stride: 0x1},
+		unicode.Range32{Lo: 0x10e80, Hi: 0x10ea9, Stride: 0x1},
+		unicode.Range32{Lo: 0x10eb0, Hi: 0x10eb1, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f00, Hi: 0x10f1c, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f27, Hi: 0x10f27, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f30, Hi: 0x10f45, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f70, Hi: 0x10f81, Stride: 0x1},
+		unicode.Range32{Lo: 0x10fb0, Hi: 0x10fc4, Stride: 0x1},
+		unicode.Range32{Lo: 0x10fe0, Hi: 0x10ff6, Stride: 0x1},
+		unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1},
+		unicode.Range32{Lo: 0x11071, Hi: 0x11072, Stride: 0x1},
+		unicode.Range32{Lo: 0x11075, Hi: 0x11075, Stride: 0x1},
+		unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1},
+		unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1},
+		unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1},
+		unicode.Range32{Lo: 0x11144, Hi: 0x11144, Stride: 0x1},
+		unicode.Range32{Lo: 0x11147, Hi: 0x11147, Stride: 0x1},
+		unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1},
+		unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1},
+		unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1},
+		unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1},
+		unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1},
+		unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1},
+		unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1},
+		unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1123f, Hi: 0x11240, Stride: 0x1},
+		unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1},
+		unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1},
+		unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1},
+		unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1},
+		unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1},
+		unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1},
+		unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1},
+		unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1},
+		unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1},
+		unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1},
+		unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1},
+		unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1},
+		unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1145f, Hi: 0x11461, Stride: 0x1},
+		unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1},
+		unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1},
+		unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1},
+		unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1},
+		unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1},
+		unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1},
+		unicode.Range32{Lo: 0x116b8, Hi: 0x116b8, Stride: 0x1},
+		unicode.Range32{Lo: 0x11700, Hi: 0x1171a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11740, Hi: 0x11746, Stride: 0x1},
+		unicode.Range32{Lo: 0x11800, Hi: 0x1182b, Stride: 0x1},
+		unicode.Range32{Lo: 0x118ff, Hi: 0x11906, Stride: 0x1},
+		unicode.Range32{Lo: 0x11909, Hi: 0x11909, Stride: 0x1},
+		unicode.Range32{Lo: 0x1190c, Hi: 0x11913, Stride: 0x1},
+		unicode.Range32{Lo: 0x11915, Hi: 0x11916, Stride: 0x1},
+		unicode.Range32{Lo: 0x11918, Hi: 0x1192f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1193f, Hi: 0x1193f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11941, Hi: 0x11941, Stride: 0x1},
+		unicode.Range32{Lo: 0x119a0, Hi: 0x119a7, Stride: 0x1},
+		unicode.Range32{Lo: 0x119aa, Hi: 0x119d0, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e1, Hi: 0x119e1, Stride: 0x1},
+		unicode.Range32{Lo: 0x119e3, Hi: 0x119e3, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a00, Hi: 0x11a00, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a0b, Hi: 0x11a32, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a3a, Hi: 0x11a3a, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a50, Hi: 0x11a50, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a5c, Hi: 0x11a89, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a9d, Hi: 0x11a9d, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ab0, Hi: 0x11af8, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d00, Hi: 0x11d06, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d08, Hi: 0x11d09, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d0b, Hi: 0x11d30, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d46, Hi: 0x11d46, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d60, Hi: 0x11d65, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d67, Hi: 0x11d68, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d6a, Hi: 0x11d89, Stride: 0x1},
+		unicode.Range32{Lo: 0x11d98, Hi: 0x11d98, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ee0, Hi: 0x11ef2, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f02, Hi: 0x11f02, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f04, Hi: 0x11f10, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f12, Hi: 0x11f33, Stride: 0x1},
+		unicode.Range32{Lo: 0x11fb0, Hi: 0x11fb0, Stride: 0x1},
+		unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1},
+		unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1},
+		unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1},
+		unicode.Range32{Lo: 0x12f90, Hi: 0x12ff0, Stride: 0x1},
+		unicode.Range32{Lo: 0x13000, Hi: 0x1342f, Stride: 0x1},
+		unicode.Range32{Lo: 0x13441, Hi: 0x13446, Stride: 0x1},
+		unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1},
+		unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1},
+		unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1},
+		unicode.Range32{Lo: 0x16a70, Hi: 0x16abe, Stride: 0x1},
+		unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f00, Hi: 0x16f4a, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1},
+		unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe1, Stride: 0x1},
+		unicode.Range32{Lo: 0x16fe3, Hi: 0x16fe3, Stride: 0x1},
+		unicode.Range32{Lo: 0x17000, Hi: 0x187f7, Stride: 0x1},
+		unicode.Range32{Lo: 0x18800, Hi: 0x18cd5, Stride: 0x1},
+		unicode.Range32{Lo: 0x18d00, Hi: 0x18d08, Stride: 0x1},
+		unicode.Range32{Lo: 0x1aff0, Hi: 0x1aff3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1aff5, Hi: 0x1affb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1affd, Hi: 0x1affe, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b000, Hi: 0x1b122, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b132, Hi: 0x1b132, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b150, Hi: 0x1b152, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b155, Hi: 0x1b155, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b164, Hi: 0x1b167, Stride: 0x1},
+		unicode.Range32{Lo: 0x1b170, Hi: 0x1b2fb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1},
+		unicode.Range32{Lo: 0x1df0a, Hi: 0x1df0a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e100, Hi: 0x1e12c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e137, Hi: 0x1e13d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e14e, Hi: 0x1e14e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e290, Hi: 0x1e2ad, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e2c0, Hi: 0x1e2eb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4d0, Hi: 0x1e4ea, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e4eb, Hi: 0x1e4eb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e7e0, Hi: 0x1e7e6, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e7e8, Hi: 0x1e7eb, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e7ed, Hi: 0x1e7ee, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e7f0, Hi: 0x1e7fe, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e94b, Hi: 0x1e94b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1},
+		unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1},
+		unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1},
+		unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1},
+		unicode.Range32{Lo: 0x20000, Hi: 0x2a6df, Stride: 0x1},
+		unicode.Range32{Lo: 0x2a700, Hi: 0x2b739, Stride: 0x1},
+		unicode.Range32{Lo: 0x2b740, Hi: 0x2b81d, Stride: 0x1},
+		unicode.Range32{Lo: 0x2b820, Hi: 0x2cea1, Stride: 0x1},
+		unicode.Range32{Lo: 0x2ceb0, Hi: 0x2ebe0, Stride: 0x1},
+		unicode.Range32{Lo: 0x2f800, Hi: 0x2fa1d, Stride: 0x1},
+		unicode.Range32{Lo: 0x30000, Hi: 0x3134a, Stride: 0x1},
+		unicode.Range32{Lo: 0x31350, Hi: 0x323af, Stride: 0x1},
+	},
+	LatinOffset: 0,
+}
+
+var _SentenceSContinue = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2d, Hi: 0x2d, Stride: 0x1},
+		unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1},
+		unicode.Range16{Lo: 0x55d, Hi: 0x55d, Stride: 0x1},
+		unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1},
+		unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1802, Hi: 0x1802, Stride: 0x1},
+		unicode.Range16{Lo: 0x1808, Hi: 0x1808, Stride: 0x1},
+		unicode.Range16{Lo: 0x2013, Hi: 0x2014, Stride: 0x1},
+		unicode.Range16{Lo: 0x3001, Hi: 0x3001, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe10, Hi: 0xfe11, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe31, Hi: 0xfe32, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe50, Hi: 0xfe51, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe58, Hi: 0xfe58, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe63, Hi: 0xfe63, Stride: 0x1},
+		unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1},
+		unicode.Range16{Lo: 0xff0d, Hi: 0xff0d, Stride: 0x1},
+		unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1},
+		unicode.Range16{Lo: 0xff64, Hi: 0xff64, Stride: 0x1},
+	},
+	LatinOffset: 3,
+}
+
+var _SentenceSTerm = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x21, Hi: 0x21, Stride: 0x1},
+		unicode.Range16{Lo: 0x3f, Hi: 0x3f, Stride: 0x1},
+		unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1},
+		unicode.Range16{Lo: 0x61d, Hi: 0x61f, Stride: 0x1},
+		unicode.Range16{Lo: 0x6d4, Hi: 0x6d4, Stride: 0x1},
+		unicode.Range16{Lo: 0x700, Hi: 0x702, Stride: 0x1},
+		unicode.Range16{Lo: 0x7f9, Hi: 0x7f9, Stride: 0x1},
+		unicode.Range16{Lo: 0x837, Hi: 0x837, Stride: 0x1},
+		unicode.Range16{Lo: 0x839, Hi: 0x839, Stride: 0x1},
+		unicode.Range16{Lo: 0x83d, Hi: 0x83e, Stride: 0x1},
+		unicode.Range16{Lo: 0x964, Hi: 0x965, Stride: 0x1},
+		unicode.Range16{Lo: 0x104a, Hi: 0x104b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1362, Hi: 0x1362, Stride: 0x1},
+		unicode.Range16{Lo: 0x1367, Hi: 0x1368, Stride: 0x1},
+		unicode.Range16{Lo: 0x166e, Hi: 0x166e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1735, Hi: 0x1736, Stride: 0x1},
+		unicode.Range16{Lo: 0x1803, Hi: 0x1803, Stride: 0x1},
+		unicode.Range16{Lo: 0x1809, Hi: 0x1809, Stride: 0x1},
+		unicode.Range16{Lo: 0x1944, Hi: 0x1945, Stride: 0x1},
+		unicode.Range16{Lo: 0x1aa8, Hi: 0x1aab, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b5a, Hi: 0x1b5b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b5e, Hi: 0x1b5f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b7d, Hi: 0x1b7e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c3b, Hi: 0x1c3c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c7e, Hi: 0x1c7f, Stride: 0x1},
+		unicode.Range16{Lo: 0x203c, Hi: 0x203d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2047, Hi: 0x2049, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e2e, Hi: 0x2e2e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e3c, Hi: 0x2e3c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2e53, Hi: 0x2e54, Stride: 0x1},
+		unicode.Range16{Lo: 0x3002, Hi: 0x3002, Stride: 0x1},
+		unicode.Range16{Lo: 0xa4ff, Hi: 0xa4ff, Stride: 0x1},
+		unicode.Range16{Lo: 0xa60e, Hi: 0xa60f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6f3, Hi: 0xa6f3, Stride: 0x1},
+		unicode.Range16{Lo: 0xa6f7, Hi: 0xa6f7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa876, Hi: 0xa877, Stride: 0x1},
+		unicode.Range16{Lo: 0xa8ce, Hi: 0xa8cf, Stride: 0x1},
+		unicode.Range16{Lo: 0xa92f, Hi: 0xa92f, Stride: 0x1},
+		unicode.Range16{Lo: 0xa9c8, Hi: 0xa9c9, Stride: 0x1},
+		unicode.Range16{Lo: 0xaa5d, Hi: 0xaa5f, Stride: 0x1},
+		unicode.Range16{Lo: 0xaaf0, Hi: 0xaaf1, Stride: 0x1},
+		unicode.Range16{Lo: 0xabeb, Hi: 0xabeb, Stride: 0x1},
+		unicode.Range16{Lo: 0xfe56, Hi: 0xfe57, Stride: 0x1},
+		unicode.Range16{Lo: 0xff01, Hi: 0xff01, Stride: 0x1},
+		unicode.Range16{Lo: 0xff1f, Hi: 0xff1f, Stride: 0x1},
+		unicode.Range16{Lo: 0xff61, Hi: 0xff61, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x10a56, Hi: 0x10a57, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f55, Hi: 0x10f59, Stride: 0x1},
+		unicode.Range32{Lo: 0x10f86, Hi: 0x10f89, Stride: 0x1},
+		unicode.Range32{Lo: 0x11047, Hi: 0x11048, Stride: 0x1},
+		unicode.Range32{Lo: 0x110be, Hi: 0x110c1, Stride: 0x1},
+		unicode.Range32{Lo: 0x11141, Hi: 0x11143, Stride: 0x1},
+		unicode.Range32{Lo: 0x111c5, Hi: 0x111c6, Stride: 0x1},
+		unicode.Range32{Lo: 0x111cd, Hi: 0x111cd, Stride: 0x1},
+		unicode.Range32{Lo: 0x111de, Hi: 0x111df, Stride: 0x1},
+		unicode.Range32{Lo: 0x11238, Hi: 0x11239, Stride: 0x1},
+		unicode.Range32{Lo: 0x1123b, Hi: 0x1123c, Stride: 0x1},
+		unicode.Range32{Lo: 0x112a9, Hi: 0x112a9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1144b, Hi: 0x1144c, Stride: 0x1},
+		unicode.Range32{Lo: 0x115c2, Hi: 0x115c3, Stride: 0x1},
+		unicode.Range32{Lo: 0x115c9, Hi: 0x115d7, Stride: 0x1},
+		unicode.Range32{Lo: 0x11641, Hi: 0x11642, Stride: 0x1},
+		unicode.Range32{Lo: 0x1173c, Hi: 0x1173e, Stride: 0x1},
+		unicode.Range32{Lo: 0x11944, Hi: 0x11944, Stride: 0x1},
+		unicode.Range32{Lo: 0x11946, Hi: 0x11946, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a42, Hi: 0x11a43, Stride: 0x1},
+		unicode.Range32{Lo: 0x11a9b, Hi: 0x11a9c, Stride: 0x1},
+		unicode.Range32{Lo: 0x11c41, Hi: 0x11c42, Stride: 0x1},
+		unicode.Range32{Lo: 0x11ef7, Hi: 0x11ef8, Stride: 0x1},
+		unicode.Range32{Lo: 0x11f43, Hi: 0x11f44, Stride: 0x1},
+		unicode.Range32{Lo: 0x16a6e, Hi: 0x16a6f, Stride: 0x1},
+		unicode.Range32{Lo: 0x16af5, Hi: 0x16af5, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b37, Hi: 0x16b38, Stride: 0x1},
+		unicode.Range32{Lo: 0x16b44, Hi: 0x16b44, Stride: 0x1},
+		unicode.Range32{Lo: 0x16e98, Hi: 0x16e98, Stride: 0x1},
+		unicode.Range32{Lo: 0x1bc9f, Hi: 0x1bc9f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1da88, Hi: 0x1da88, Stride: 0x1},
+	},
+	LatinOffset: 2,
+}
+
+var _SentenceSep = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1},
+		unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1},
+		unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1},
+	},
+	LatinOffset: 1,
+}
+
+var _SentenceSp = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x9, Hi: 0x9, Stride: 0x1},
+		unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1},
+		unicode.Range16{Lo: 0x20, Hi: 0x20, Stride: 0x1},
+		unicode.Range16{Lo: 0xa0, Hi: 0xa0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1680, Hi: 0x1680, Stride: 0x1},
+		unicode.Range16{Lo: 0x2000, Hi: 0x200a, Stride: 0x1},
+		unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1},
+		unicode.Range16{Lo: 0x205f, Hi: 0x205f, Stride: 0x1},
+		unicode.Range16{Lo: 0x3000, Hi: 0x3000, Stride: 0x1},
+	},
+	LatinOffset: 4,
+}
+
+var _SentenceUpper = &unicode.RangeTable{
+	R16: []unicode.Range16{
+		unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1},
+		unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1},
+		unicode.Range16{Lo: 0xd8, Hi: 0xde, Stride: 0x1},
+		unicode.Range16{Lo: 0x100, Hi: 0x100, Stride: 0x1},
+		unicode.Range16{Lo: 0x102, Hi: 0x102, Stride: 0x1},
+		unicode.Range16{Lo: 0x104, Hi: 0x104, Stride: 0x1},
+		unicode.Range16{Lo: 0x106, Hi: 0x106, Stride: 0x1},
+		unicode.Range16{Lo: 0x108, Hi: 0x108, Stride: 0x1},
+		unicode.Range16{Lo: 0x10a, Hi: 0x10a, Stride: 0x1},
+		unicode.Range16{Lo: 0x10c, Hi: 0x10c, Stride: 0x1},
+		unicode.Range16{Lo: 0x10e, Hi: 0x10e, Stride: 0x1},
+		unicode.Range16{Lo: 0x110, Hi: 0x110, Stride: 0x1},
+		unicode.Range16{Lo: 0x112, Hi: 0x112, Stride: 0x1},
+		unicode.Range16{Lo: 0x114, Hi: 0x114, Stride: 0x1},
+		unicode.Range16{Lo: 0x116, Hi: 0x116, Stride: 0x1},
+		unicode.Range16{Lo: 0x118, Hi: 0x118, Stride: 0x1},
+		unicode.Range16{Lo: 0x11a, Hi: 0x11a, Stride: 0x1},
+		unicode.Range16{Lo: 0x11c, Hi: 0x11c, Stride: 0x1},
+		unicode.Range16{Lo: 0x11e, Hi: 0x11e, Stride: 0x1},
+		unicode.Range16{Lo: 0x120, Hi: 0x120, Stride: 0x1},
+		unicode.Range16{Lo: 0x122, Hi: 0x122, Stride: 0x1},
+		unicode.Range16{Lo: 0x124, Hi: 0x124, Stride: 0x1},
+		unicode.Range16{Lo: 0x126, Hi: 0x126, Stride: 0x1},
+		unicode.Range16{Lo: 0x128, Hi: 0x128, Stride: 0x1},
+		unicode.Range16{Lo: 0x12a, Hi: 0x12a, Stride: 0x1},
+		unicode.Range16{Lo: 0x12c, Hi: 0x12c, Stride: 0x1},
+		unicode.Range16{Lo: 0x12e, Hi: 0x12e, Stride: 0x1},
+		unicode.Range16{Lo: 0x130, Hi: 0x130, Stride: 0x1},
+		unicode.Range16{Lo: 0x132, Hi: 0x132, Stride: 0x1},
+		unicode.Range16{Lo: 0x134, Hi: 0x134, Stride: 0x1},
+		unicode.Range16{Lo: 0x136, Hi: 0x136, Stride: 0x1},
+		unicode.Range16{Lo: 0x139, Hi: 0x139, Stride: 0x1},
+		unicode.Range16{Lo: 0x13b, Hi: 0x13b, Stride: 0x1},
+		unicode.Range16{Lo: 0x13d, Hi: 0x13d, Stride: 0x1},
+		unicode.Range16{Lo: 0x13f, Hi: 0x13f, Stride: 0x1},
+		unicode.Range16{Lo: 0x141, Hi: 0x141, Stride: 0x1},
+		unicode.Range16{Lo: 0x143, Hi: 0x143, Stride: 0x1},
+		unicode.Range16{Lo: 0x145, Hi: 0x145, Stride: 0x1},
+		unicode.Range16{Lo: 0x147, Hi: 0x147, Stride: 0x1},
+		unicode.Range16{Lo: 0x14a, Hi: 0x14a, Stride: 0x1},
+		unicode.Range16{Lo: 0x14c, Hi: 0x14c, Stride: 0x1},
+		unicode.Range16{Lo: 0x14e, Hi: 0x14e, Stride: 0x1},
+		unicode.Range16{Lo: 0x150, Hi: 0x150, Stride: 0x1},
+		unicode.Range16{Lo: 0x152, Hi: 0x152, Stride: 0x1},
+		unicode.Range16{Lo: 0x154, Hi: 0x154, Stride: 0x1},
+		unicode.Range16{Lo: 0x156, Hi: 0x156, Stride: 0x1},
+		unicode.Range16{Lo: 0x158, Hi: 0x158, Stride: 0x1},
+		unicode.Range16{Lo: 0x15a, Hi: 0x15a, Stride: 0x1},
+		unicode.Range16{Lo: 0x15c, Hi: 0x15c, Stride: 0x1},
+		unicode.Range16{Lo: 0x15e, Hi: 0x15e, Stride: 0x1},
+		unicode.Range16{Lo: 0x160, Hi: 0x160, Stride: 0x1},
+		unicode.Range16{Lo: 0x162, Hi: 0x162, Stride: 0x1},
+		unicode.Range16{Lo: 0x164, Hi: 0x164, Stride: 0x1},
+		unicode.Range16{Lo: 0x166, Hi: 0x166, Stride: 0x1},
+		unicode.Range16{Lo: 0x168, Hi: 0x168, Stride: 0x1},
+		unicode.Range16{Lo: 0x16a, Hi: 0x16a, Stride: 0x1},
+		unicode.Range16{Lo: 0x16c, Hi: 0x16c, Stride: 0x1},
+		unicode.Range16{Lo: 0x16e, Hi: 0x16e, Stride: 0x1},
+		unicode.Range16{Lo: 0x170, Hi: 0x170, Stride: 0x1},
+		unicode.Range16{Lo: 0x172, Hi: 0x172, Stride: 0x1},
+		unicode.Range16{Lo: 0x174, Hi: 0x174, Stride: 0x1},
+		unicode.Range16{Lo: 0x176, Hi: 0x176, Stride: 0x1},
+		unicode.Range16{Lo: 0x178, Hi: 0x179, Stride: 0x1},
+		unicode.Range16{Lo: 0x17b, Hi: 0x17b, Stride: 0x1},
+		unicode.Range16{Lo: 0x17d, Hi: 0x17d, Stride: 0x1},
+		unicode.Range16{Lo: 0x181, Hi: 0x182, Stride: 0x1},
+		unicode.Range16{Lo: 0x184, Hi: 0x184, Stride: 0x1},
+		unicode.Range16{Lo: 0x186, Hi: 0x187, Stride: 0x1},
+		unicode.Range16{Lo: 0x189, Hi: 0x18b, Stride: 0x1},
+		unicode.Range16{Lo: 0x18e, Hi: 0x191, Stride: 0x1},
+		unicode.Range16{Lo: 0x193, Hi: 0x194, Stride: 0x1},
+		unicode.Range16{Lo: 0x196, Hi: 0x198, Stride: 0x1},
+		unicode.Range16{Lo: 0x19c, Hi: 0x19d, Stride: 0x1},
+		unicode.Range16{Lo: 0x19f, Hi: 0x1a0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a2, Hi: 0x1a2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a4, Hi: 0x1a4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a6, Hi: 0x1a7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1a9, Hi: 0x1a9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ac, Hi: 0x1ac, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ae, Hi: 0x1af, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b1, Hi: 0x1b3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b5, Hi: 0x1b5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1b7, Hi: 0x1b8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1bc, Hi: 0x1bc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c4, Hi: 0x1c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1c7, Hi: 0x1c8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ca, Hi: 0x1cb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cd, Hi: 0x1cd, Stride: 0x1},
+		unicode.Range16{Lo: 0x1cf, Hi: 0x1cf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d1, Hi: 0x1d1, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d3, Hi: 0x1d3, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d5, Hi: 0x1d5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d7, Hi: 0x1d7, Stride: 0x1},
+		unicode.Range16{Lo: 0x1d9, Hi: 0x1d9, Stride: 0x1},
+		unicode.Range16{Lo: 0x1db, Hi: 0x1db, Stride: 0x1},
+		unicode.Range16{Lo: 0x1de, Hi: 0x1de, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e0, Hi: 0x1e0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e2, Hi: 0x1e2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e4, Hi: 0x1e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e6, Hi: 0x1e6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e8, Hi: 0x1e8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea, Hi: 0x1ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec, Hi: 0x1ec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee, Hi: 0x1ee, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f1, Hi: 0x1f2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f4, Hi: 0x1f4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f6, Hi: 0x1f8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fa, Hi: 0x1fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fc, Hi: 0x1fc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fe, Hi: 0x1fe, Stride: 0x1},
+		unicode.Range16{Lo: 0x200, Hi: 0x200, Stride: 0x1},
+		unicode.Range16{Lo: 0x202, Hi: 0x202, Stride: 0x1},
+		unicode.Range16{Lo: 0x204, Hi: 0x204, Stride: 0x1},
+		unicode.Range16{Lo: 0x206, Hi: 0x206, Stride: 0x1},
+		unicode.Range16{Lo: 0x208, Hi: 0x208, Stride: 0x1},
+		unicode.Range16{Lo: 0x20a, Hi: 0x20a, Stride: 0x1},
+		unicode.Range16{Lo: 0x20c, Hi: 0x20c, Stride: 0x1},
+		unicode.Range16{Lo: 0x20e, Hi: 0x20e, Stride: 0x1},
+		unicode.Range16{Lo: 0x210, Hi: 0x210, Stride: 0x1},
+		unicode.Range16{Lo: 0x212, Hi: 0x212, Stride: 0x1},
+		unicode.Range16{Lo: 0x214, Hi: 0x214, Stride: 0x1},
+		unicode.Range16{Lo: 0x216, Hi: 0x216, Stride: 0x1},
+		unicode.Range16{Lo: 0x218, Hi: 0x218, Stride: 0x1},
+		unicode.Range16{Lo: 0x21a, Hi: 0x21a, Stride: 0x1},
+		unicode.Range16{Lo: 0x21c, Hi: 0x21c, Stride: 0x1},
+		unicode.Range16{Lo: 0x21e, Hi: 0x21e, Stride: 0x1},
+		unicode.Range16{Lo: 0x220, Hi: 0x220, Stride: 0x1},
+		unicode.Range16{Lo: 0x222, Hi: 0x222, Stride: 0x1},
+		unicode.Range16{Lo: 0x224, Hi: 0x224, Stride: 0x1},
+		unicode.Range16{Lo: 0x226, Hi: 0x226, Stride: 0x1},
+		unicode.Range16{Lo: 0x228, Hi: 0x228, Stride: 0x1},
+		unicode.Range16{Lo: 0x22a, Hi: 0x22a, Stride: 0x1},
+		unicode.Range16{Lo: 0x22c, Hi: 0x22c, Stride: 0x1},
+		unicode.Range16{Lo: 0x22e, Hi: 0x22e, Stride: 0x1},
+		unicode.Range16{Lo: 0x230, Hi: 0x230, Stride: 0x1},
+		unicode.Range16{Lo: 0x232, Hi: 0x232, Stride: 0x1},
+		unicode.Range16{Lo: 0x23a, Hi: 0x23b, Stride: 0x1},
+		unicode.Range16{Lo: 0x23d, Hi: 0x23e, Stride: 0x1},
+		unicode.Range16{Lo: 0x241, Hi: 0x241, Stride: 0x1},
+		unicode.Range16{Lo: 0x243, Hi: 0x246, Stride: 0x1},
+		unicode.Range16{Lo: 0x248, Hi: 0x248, Stride: 0x1},
+		unicode.Range16{Lo: 0x24a, Hi: 0x24a, Stride: 0x1},
+		unicode.Range16{Lo: 0x24c, Hi: 0x24c, Stride: 0x1},
+		unicode.Range16{Lo: 0x24e, Hi: 0x24e, Stride: 0x1},
+		unicode.Range16{Lo: 0x370, Hi: 0x370, Stride: 0x1},
+		unicode.Range16{Lo: 0x372, Hi: 0x372, Stride: 0x1},
+		unicode.Range16{Lo: 0x376, Hi: 0x376, Stride: 0x1},
+		unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1},
+		unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1},
+		unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1},
+		unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1},
+		unicode.Range16{Lo: 0x38e, Hi: 0x38f, Stride: 0x1},
+		unicode.Range16{Lo: 0x391, Hi: 0x3a1, Stride: 0x1},
+		unicode.Range16{Lo: 0x3a3, Hi: 0x3ab, Stride: 0x1},
+		unicode.Range16{Lo: 0x3cf, Hi: 0x3cf, Stride: 0x1},
+		unicode.Range16{Lo: 0x3d2, Hi: 0x3d4, Stride: 0x1},
+		unicode.Range16{Lo: 0x3d8, Hi: 0x3d8, Stride: 0x1},
+		unicode.Range16{Lo: 0x3da, Hi: 0x3da, Stride: 0x1},
+		unicode.Range16{Lo: 0x3dc, Hi: 0x3dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x3de, Hi: 0x3de, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e0, Hi: 0x3e0, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e2, Hi: 0x3e2, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e4, Hi: 0x3e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e6, Hi: 0x3e6, Stride: 0x1},
+		unicode.Range16{Lo: 0x3e8, Hi: 0x3e8, Stride: 0x1},
+		unicode.Range16{Lo: 0x3ea, Hi: 0x3ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x3ec, Hi: 0x3ec, Stride: 0x1},
+		unicode.Range16{Lo: 0x3ee, Hi: 0x3ee, Stride: 0x1},
+		unicode.Range16{Lo: 0x3f4, Hi: 0x3f4, Stride: 0x1},
+		unicode.Range16{Lo: 0x3f7, Hi: 0x3f7, Stride: 0x1},
+		unicode.Range16{Lo: 0x3f9, Hi: 0x3fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x3fd, Hi: 0x42f, Stride: 0x1},
+		unicode.Range16{Lo: 0x460, Hi: 0x460, Stride: 0x1},
+		unicode.Range16{Lo: 0x462, Hi: 0x462, Stride: 0x1},
+		unicode.Range16{Lo: 0x464, Hi: 0x464, Stride: 0x1},
+		unicode.Range16{Lo: 0x466, Hi: 0x466, Stride: 0x1},
+		unicode.Range16{Lo: 0x468, Hi: 0x468, Stride: 0x1},
+		unicode.Range16{Lo: 0x46a, Hi: 0x46a, Stride: 0x1},
+		unicode.Range16{Lo: 0x46c, Hi: 0x46c, Stride: 0x1},
+		unicode.Range16{Lo: 0x46e, Hi: 0x46e, Stride: 0x1},
+		unicode.Range16{Lo: 0x470, Hi: 0x470, Stride: 0x1},
+		unicode.Range16{Lo: 0x472, Hi: 0x472, Stride: 0x1},
+		unicode.Range16{Lo: 0x474, Hi: 0x474, Stride: 0x1},
+		unicode.Range16{Lo: 0x476, Hi: 0x476, Stride: 0x1},
+		unicode.Range16{Lo: 0x478, Hi: 0x478, Stride: 0x1},
+		unicode.Range16{Lo: 0x47a, Hi: 0x47a, Stride: 0x1},
+		unicode.Range16{Lo: 0x47c, Hi: 0x47c, Stride: 0x1},
+		unicode.Range16{Lo: 0x47e, Hi: 0x47e, Stride: 0x1},
+		unicode.Range16{Lo: 0x480, Hi: 0x480, Stride: 0x1},
+		unicode.Range16{Lo: 0x48a, Hi: 0x48a, Stride: 0x1},
+		unicode.Range16{Lo: 0x48c, Hi: 0x48c, Stride: 0x1},
+		unicode.Range16{Lo: 0x48e, Hi: 0x48e, Stride: 0x1},
+		unicode.Range16{Lo: 0x490, Hi: 0x490, Stride: 0x1},
+		unicode.Range16{Lo: 0x492, Hi: 0x492, Stride: 0x1},
+		unicode.Range16{Lo: 0x494, Hi: 0x494, Stride: 0x1},
+		unicode.Range16{Lo: 0x496, Hi: 0x496, Stride: 0x1},
+		unicode.Range16{Lo: 0x498, Hi: 0x498, Stride: 0x1},
+		unicode.Range16{Lo: 0x49a, Hi: 0x49a, Stride: 0x1},
+		unicode.Range16{Lo: 0x49c, Hi: 0x49c, Stride: 0x1},
+		unicode.Range16{Lo: 0x49e, Hi: 0x49e, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a0, Hi: 0x4a0, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a2, Hi: 0x4a2, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a4, Hi: 0x4a4, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a6, Hi: 0x4a6, Stride: 0x1},
+		unicode.Range16{Lo: 0x4a8, Hi: 0x4a8, Stride: 0x1},
+		unicode.Range16{Lo: 0x4aa, Hi: 0x4aa, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ac, Hi: 0x4ac, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ae, Hi: 0x4ae, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b0, Hi: 0x4b0, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b2, Hi: 0x4b2, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b4, Hi: 0x4b4, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b6, Hi: 0x4b6, Stride: 0x1},
+		unicode.Range16{Lo: 0x4b8, Hi: 0x4b8, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ba, Hi: 0x4ba, Stride: 0x1},
+		unicode.Range16{Lo: 0x4bc, Hi: 0x4bc, Stride: 0x1},
+		unicode.Range16{Lo: 0x4be, Hi: 0x4be, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c0, Hi: 0x4c1, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c3, Hi: 0x4c3, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c5, Hi: 0x4c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c7, Hi: 0x4c7, Stride: 0x1},
+		unicode.Range16{Lo: 0x4c9, Hi: 0x4c9, Stride: 0x1},
+		unicode.Range16{Lo: 0x4cb, Hi: 0x4cb, Stride: 0x1},
+		unicode.Range16{Lo: 0x4cd, Hi: 0x4cd, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d0, Hi: 0x4d0, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d2, Hi: 0x4d2, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d4, Hi: 0x4d4, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d6, Hi: 0x4d6, Stride: 0x1},
+		unicode.Range16{Lo: 0x4d8, Hi: 0x4d8, Stride: 0x1},
+		unicode.Range16{Lo: 0x4da, Hi: 0x4da, Stride: 0x1},
+		unicode.Range16{Lo: 0x4dc, Hi: 0x4dc, Stride: 0x1},
+		unicode.Range16{Lo: 0x4de, Hi: 0x4de, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e0, Hi: 0x4e0, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e2, Hi: 0x4e2, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e4, Hi: 0x4e4, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e6, Hi: 0x4e6, Stride: 0x1},
+		unicode.Range16{Lo: 0x4e8, Hi: 0x4e8, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ea, Hi: 0x4ea, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ec, Hi: 0x4ec, Stride: 0x1},
+		unicode.Range16{Lo: 0x4ee, Hi: 0x4ee, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f0, Hi: 0x4f0, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f2, Hi: 0x4f2, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f4, Hi: 0x4f4, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f6, Hi: 0x4f6, Stride: 0x1},
+		unicode.Range16{Lo: 0x4f8, Hi: 0x4f8, Stride: 0x1},
+		unicode.Range16{Lo: 0x4fa, Hi: 0x4fa, Stride: 0x1},
+		unicode.Range16{Lo: 0x4fc, Hi: 0x4fc, Stride: 0x1},
+		unicode.Range16{Lo: 0x4fe, Hi: 0x4fe, Stride: 0x1},
+		unicode.Range16{Lo: 0x500, Hi: 0x500, Stride: 0x1},
+		unicode.Range16{Lo: 0x502, Hi: 0x502, Stride: 0x1},
+		unicode.Range16{Lo: 0x504, Hi: 0x504, Stride: 0x1},
+		unicode.Range16{Lo: 0x506, Hi: 0x506, Stride: 0x1},
+		unicode.Range16{Lo: 0x508, Hi: 0x508, Stride: 0x1},
+		unicode.Range16{Lo: 0x50a, Hi: 0x50a, Stride: 0x1},
+		unicode.Range16{Lo: 0x50c, Hi: 0x50c, Stride: 0x1},
+		unicode.Range16{Lo: 0x50e, Hi: 0x50e, Stride: 0x1},
+		unicode.Range16{Lo: 0x510, Hi: 0x510, Stride: 0x1},
+		unicode.Range16{Lo: 0x512, Hi: 0x512, Stride: 0x1},
+		unicode.Range16{Lo: 0x514, Hi: 0x514, Stride: 0x1},
+		unicode.Range16{Lo: 0x516, Hi: 0x516, Stride: 0x1},
+		unicode.Range16{Lo: 0x518, Hi: 0x518, Stride: 0x1},
+		unicode.Range16{Lo: 0x51a, Hi: 0x51a, Stride: 0x1},
+		unicode.Range16{Lo: 0x51c, Hi: 0x51c, Stride: 0x1},
+		unicode.Range16{Lo: 0x51e, Hi: 0x51e, Stride: 0x1},
+		unicode.Range16{Lo: 0x520, Hi: 0x520, Stride: 0x1},
+		unicode.Range16{Lo: 0x522, Hi: 0x522, Stride: 0x1},
+		unicode.Range16{Lo: 0x524, Hi: 0x524, Stride: 0x1},
+		unicode.Range16{Lo: 0x526, Hi: 0x526, Stride: 0x1},
+		unicode.Range16{Lo: 0x528, Hi: 0x528, Stride: 0x1},
+		unicode.Range16{Lo: 0x52a, Hi: 0x52a, Stride: 0x1},
+		unicode.Range16{Lo: 0x52c, Hi: 0x52c, Stride: 0x1},
+		unicode.Range16{Lo: 0x52e, Hi: 0x52e, Stride: 0x1},
+		unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1},
+		unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1},
+		unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1},
+		unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1},
+		unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e00, Hi: 0x1e00, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e02, Hi: 0x1e02, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e04, Hi: 0x1e04, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e06, Hi: 0x1e06, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e08, Hi: 0x1e08, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e0a, Hi: 0x1e0a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e0c, Hi: 0x1e0c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e0e, Hi: 0x1e0e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e10, Hi: 0x1e10, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e12, Hi: 0x1e12, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e14, Hi: 0x1e14, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e16, Hi: 0x1e16, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e18, Hi: 0x1e18, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e1a, Hi: 0x1e1a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e1c, Hi: 0x1e1c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e1e, Hi: 0x1e1e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e20, Hi: 0x1e20, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e22, Hi: 0x1e22, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e24, Hi: 0x1e24, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e26, Hi: 0x1e26, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e28, Hi: 0x1e28, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e2a, Hi: 0x1e2a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e2c, Hi: 0x1e2c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e2e, Hi: 0x1e2e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e30, Hi: 0x1e30, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e32, Hi: 0x1e32, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e34, Hi: 0x1e34, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e36, Hi: 0x1e36, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e38, Hi: 0x1e38, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e3a, Hi: 0x1e3a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e3c, Hi: 0x1e3c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e3e, Hi: 0x1e3e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e40, Hi: 0x1e40, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e42, Hi: 0x1e42, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e44, Hi: 0x1e44, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e46, Hi: 0x1e46, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e48, Hi: 0x1e48, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e4a, Hi: 0x1e4a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e4c, Hi: 0x1e4c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e4e, Hi: 0x1e4e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e50, Hi: 0x1e50, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e52, Hi: 0x1e52, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e54, Hi: 0x1e54, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e56, Hi: 0x1e56, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e58, Hi: 0x1e58, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e5a, Hi: 0x1e5a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e5c, Hi: 0x1e5c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e5e, Hi: 0x1e5e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e60, Hi: 0x1e60, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e62, Hi: 0x1e62, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e64, Hi: 0x1e64, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e66, Hi: 0x1e66, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e68, Hi: 0x1e68, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e6a, Hi: 0x1e6a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e6c, Hi: 0x1e6c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e6e, Hi: 0x1e6e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e70, Hi: 0x1e70, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e72, Hi: 0x1e72, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e74, Hi: 0x1e74, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e76, Hi: 0x1e76, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e78, Hi: 0x1e78, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e7a, Hi: 0x1e7a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e7c, Hi: 0x1e7c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e7e, Hi: 0x1e7e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e80, Hi: 0x1e80, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e82, Hi: 0x1e82, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e84, Hi: 0x1e84, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e86, Hi: 0x1e86, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e88, Hi: 0x1e88, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e8a, Hi: 0x1e8a, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e8c, Hi: 0x1e8c, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e8e, Hi: 0x1e8e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e90, Hi: 0x1e90, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e92, Hi: 0x1e92, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e94, Hi: 0x1e94, Stride: 0x1},
+		unicode.Range16{Lo: 0x1e9e, Hi: 0x1e9e, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea0, Hi: 0x1ea0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea2, Hi: 0x1ea2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea4, Hi: 0x1ea4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea6, Hi: 0x1ea6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ea8, Hi: 0x1ea8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eaa, Hi: 0x1eaa, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eac, Hi: 0x1eac, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eae, Hi: 0x1eae, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb0, Hi: 0x1eb0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb2, Hi: 0x1eb2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb4, Hi: 0x1eb4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb6, Hi: 0x1eb6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eb8, Hi: 0x1eb8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eba, Hi: 0x1eba, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ebc, Hi: 0x1ebc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ebe, Hi: 0x1ebe, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec0, Hi: 0x1ec0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec2, Hi: 0x1ec2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec4, Hi: 0x1ec4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec6, Hi: 0x1ec6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ec8, Hi: 0x1ec8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eca, Hi: 0x1eca, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ecc, Hi: 0x1ecc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ece, Hi: 0x1ece, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed0, Hi: 0x1ed0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed2, Hi: 0x1ed2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed4, Hi: 0x1ed4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed6, Hi: 0x1ed6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ed8, Hi: 0x1ed8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eda, Hi: 0x1eda, Stride: 0x1},
+		unicode.Range16{Lo: 0x1edc, Hi: 0x1edc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ede, Hi: 0x1ede, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee0, Hi: 0x1ee0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee2, Hi: 0x1ee2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee4, Hi: 0x1ee4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee6, Hi: 0x1ee6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ee8, Hi: 0x1ee8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eea, Hi: 0x1eea, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eec, Hi: 0x1eec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1eee, Hi: 0x1eee, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef0, Hi: 0x1ef0, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef2, Hi: 0x1ef2, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef4, Hi: 0x1ef4, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef6, Hi: 0x1ef6, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ef8, Hi: 0x1ef8, Stride: 0x1},
+		unicode.Range16{Lo: 0x1efa, Hi: 0x1efa, Stride: 0x1},
+		unicode.Range16{Lo: 0x1efc, Hi: 0x1efc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1efe, Hi: 0x1efe, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f08, Hi: 0x1f0f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f28, Hi: 0x1f2f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f38, Hi: 0x1f3f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f5f, Hi: 0x1f5f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f68, Hi: 0x1f6f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f88, Hi: 0x1f8f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1f98, Hi: 0x1f9f, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fa8, Hi: 0x1faf, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fb8, Hi: 0x1fbc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fc8, Hi: 0x1fcc, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fd8, Hi: 0x1fdb, Stride: 0x1},
+		unicode.Range16{Lo: 0x1fe8, Hi: 0x1fec, Stride: 0x1},
+		unicode.Range16{Lo: 0x1ff8, Hi: 0x1ffc, Stride: 0x1},
+		unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1},
+		unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1},
+		unicode.Range16{Lo: 0x210b, Hi: 0x210d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2110, Hi: 0x2112, Stride: 0x1},
+		unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1},
+		unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1},
+		unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1},
+		unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1},
+		unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1},
+		unicode.Range16{Lo: 0x2130, Hi: 0x2133, Stride: 0x1},
+		unicode.Range16{Lo: 0x213e, Hi: 0x213f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2145, Hi: 0x2145, Stride: 0x1},
+		unicode.Range16{Lo: 0x2160, Hi: 0x216f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2183, Hi: 0x2183, Stride: 0x1},
+		unicode.Range16{Lo: 0x24b6, Hi: 0x24cf, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c00, Hi: 0x2c2f, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c60, Hi: 0x2c60, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c62, Hi: 0x2c64, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c67, Hi: 0x2c67, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c69, Hi: 0x2c69, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c6b, Hi: 0x2c6b, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c6d, Hi: 0x2c70, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c72, Hi: 0x2c72, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c75, Hi: 0x2c75, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c7e, Hi: 0x2c80, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c82, Hi: 0x2c82, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c84, Hi: 0x2c84, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c86, Hi: 0x2c86, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c88, Hi: 0x2c88, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c8a, Hi: 0x2c8a, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c8c, Hi: 0x2c8c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c8e, Hi: 0x2c8e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c90, Hi: 0x2c90, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c92, Hi: 0x2c92, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c94, Hi: 0x2c94, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c96, Hi: 0x2c96, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c98, Hi: 0x2c98, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c9a, Hi: 0x2c9a, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c9c, Hi: 0x2c9c, Stride: 0x1},
+		unicode.Range16{Lo: 0x2c9e, Hi: 0x2c9e, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca0, Hi: 0x2ca0, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca2, Hi: 0x2ca2, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca4, Hi: 0x2ca4, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca6, Hi: 0x2ca6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ca8, Hi: 0x2ca8, Stride: 0x1},
+		unicode.Range16{Lo: 0x2caa, Hi: 0x2caa, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cac, Hi: 0x2cac, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cae, Hi: 0x2cae, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb0, Hi: 0x2cb0, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb2, Hi: 0x2cb2, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb4, Hi: 0x2cb4, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb6, Hi: 0x2cb6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cb8, Hi: 0x2cb8, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cba, Hi: 0x2cba, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cbc, Hi: 0x2cbc, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cbe, Hi: 0x2cbe, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc0, Hi: 0x2cc0, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc2, Hi: 0x2cc2, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc4, Hi: 0x2cc4, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc6, Hi: 0x2cc6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cc8, Hi: 0x2cc8, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cca, Hi: 0x2cca, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ccc, Hi: 0x2ccc, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cce, Hi: 0x2cce, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd0, Hi: 0x2cd0, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd2, Hi: 0x2cd2, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd4, Hi: 0x2cd4, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd6, Hi: 0x2cd6, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cd8, Hi: 0x2cd8, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cda, Hi: 0x2cda, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cdc, Hi: 0x2cdc, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cde, Hi: 0x2cde, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ce0, Hi: 0x2ce0, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ce2, Hi: 0x2ce2, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ceb, Hi: 0x2ceb, Stride: 0x1},
+		unicode.Range16{Lo: 0x2ced, Hi: 0x2ced, Stride: 0x1},
+		unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf2, Stride: 0x1},
+		unicode.Range16{Lo: 0xa640, Hi: 0xa640, Stride: 0x1},
+		unicode.Range16{Lo: 0xa642, Hi: 0xa642, Stride: 0x1},
+		unicode.Range16{Lo: 0xa644, Hi: 0xa644, Stride: 0x1},
+		unicode.Range16{Lo: 0xa646, Hi: 0xa646, Stride: 0x1},
+		unicode.Range16{Lo: 0xa648, Hi: 0xa648, Stride: 0x1},
+		unicode.Range16{Lo: 0xa64a, Hi: 0xa64a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa64c, Hi: 0xa64c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa64e, Hi: 0xa64e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa650, Hi: 0xa650, Stride: 0x1},
+		unicode.Range16{Lo: 0xa652, Hi: 0xa652, Stride: 0x1},
+		unicode.Range16{Lo: 0xa654, Hi: 0xa654, Stride: 0x1},
+		unicode.Range16{Lo: 0xa656, Hi: 0xa656, Stride: 0x1},
+		unicode.Range16{Lo: 0xa658, Hi: 0xa658, Stride: 0x1},
+		unicode.Range16{Lo: 0xa65a, Hi: 0xa65a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa65c, Hi: 0xa65c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa65e, Hi: 0xa65e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa660, Hi: 0xa660, Stride: 0x1},
+		unicode.Range16{Lo: 0xa662, Hi: 0xa662, Stride: 0x1},
+		unicode.Range16{Lo: 0xa664, Hi: 0xa664, Stride: 0x1},
+		unicode.Range16{Lo: 0xa666, Hi: 0xa666, Stride: 0x1},
+		unicode.Range16{Lo: 0xa668, Hi: 0xa668, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66a, Hi: 0xa66a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa66c, Hi: 0xa66c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa680, Hi: 0xa680, Stride: 0x1},
+		unicode.Range16{Lo: 0xa682, Hi: 0xa682, Stride: 0x1},
+		unicode.Range16{Lo: 0xa684, Hi: 0xa684, Stride: 0x1},
+		unicode.Range16{Lo: 0xa686, Hi: 0xa686, Stride: 0x1},
+		unicode.Range16{Lo: 0xa688, Hi: 0xa688, Stride: 0x1},
+		unicode.Range16{Lo: 0xa68a, Hi: 0xa68a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa68c, Hi: 0xa68c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa68e, Hi: 0xa68e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa690, Hi: 0xa690, Stride: 0x1},
+		unicode.Range16{Lo: 0xa692, Hi: 0xa692, Stride: 0x1},
+		unicode.Range16{Lo: 0xa694, Hi: 0xa694, Stride: 0x1},
+		unicode.Range16{Lo: 0xa696, Hi: 0xa696, Stride: 0x1},
+		unicode.Range16{Lo: 0xa698, Hi: 0xa698, Stride: 0x1},
+		unicode.Range16{Lo: 0xa69a, Hi: 0xa69a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa722, Hi: 0xa722, Stride: 0x1},
+		unicode.Range16{Lo: 0xa724, Hi: 0xa724, Stride: 0x1},
+		unicode.Range16{Lo: 0xa726, Hi: 0xa726, Stride: 0x1},
+		unicode.Range16{Lo: 0xa728, Hi: 0xa728, Stride: 0x1},
+		unicode.Range16{Lo: 0xa72a, Hi: 0xa72a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa72c, Hi: 0xa72c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa72e, Hi: 0xa72e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa732, Hi: 0xa732, Stride: 0x1},
+		unicode.Range16{Lo: 0xa734, Hi: 0xa734, Stride: 0x1},
+		unicode.Range16{Lo: 0xa736, Hi: 0xa736, Stride: 0x1},
+		unicode.Range16{Lo: 0xa738, Hi: 0xa738, Stride: 0x1},
+		unicode.Range16{Lo: 0xa73a, Hi: 0xa73a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa73c, Hi: 0xa73c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa73e, Hi: 0xa73e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa740, Hi: 0xa740, Stride: 0x1},
+		unicode.Range16{Lo: 0xa742, Hi: 0xa742, Stride: 0x1},
+		unicode.Range16{Lo: 0xa744, Hi: 0xa744, Stride: 0x1},
+		unicode.Range16{Lo: 0xa746, Hi: 0xa746, Stride: 0x1},
+		unicode.Range16{Lo: 0xa748, Hi: 0xa748, Stride: 0x1},
+		unicode.Range16{Lo: 0xa74a, Hi: 0xa74a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa74c, Hi: 0xa74c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa74e, Hi: 0xa74e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa750, Hi: 0xa750, Stride: 0x1},
+		unicode.Range16{Lo: 0xa752, Hi: 0xa752, Stride: 0x1},
+		unicode.Range16{Lo: 0xa754, Hi: 0xa754, Stride: 0x1},
+		unicode.Range16{Lo: 0xa756, Hi: 0xa756, Stride: 0x1},
+		unicode.Range16{Lo: 0xa758, Hi: 0xa758, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75a, Hi: 0xa75a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75c, Hi: 0xa75c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa75e, Hi: 0xa75e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa760, Hi: 0xa760, Stride: 0x1},
+		unicode.Range16{Lo: 0xa762, Hi: 0xa762, Stride: 0x1},
+		unicode.Range16{Lo: 0xa764, Hi: 0xa764, Stride: 0x1},
+		unicode.Range16{Lo: 0xa766, Hi: 0xa766, Stride: 0x1},
+		unicode.Range16{Lo: 0xa768, Hi: 0xa768, Stride: 0x1},
+		unicode.Range16{Lo: 0xa76a, Hi: 0xa76a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa76c, Hi: 0xa76c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa76e, Hi: 0xa76e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa779, Hi: 0xa779, Stride: 0x1},
+		unicode.Range16{Lo: 0xa77b, Hi: 0xa77b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa77d, Hi: 0xa77e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa780, Hi: 0xa780, Stride: 0x1},
+		unicode.Range16{Lo: 0xa782, Hi: 0xa782, Stride: 0x1},
+		unicode.Range16{Lo: 0xa784, Hi: 0xa784, Stride: 0x1},
+		unicode.Range16{Lo: 0xa786, Hi: 0xa786, Stride: 0x1},
+		unicode.Range16{Lo: 0xa78b, Hi: 0xa78b, Stride: 0x1},
+		unicode.Range16{Lo: 0xa78d, Hi: 0xa78d, Stride: 0x1},
+		unicode.Range16{Lo: 0xa790, Hi: 0xa790, Stride: 0x1},
+		unicode.Range16{Lo: 0xa792, Hi: 0xa792, Stride: 0x1},
+		unicode.Range16{Lo: 0xa796, Hi: 0xa796, Stride: 0x1},
+		unicode.Range16{Lo: 0xa798, Hi: 0xa798, Stride: 0x1},
+		unicode.Range16{Lo: 0xa79a, Hi: 0xa79a, Stride: 0x1},
+		unicode.Range16{Lo: 0xa79c, Hi: 0xa79c, Stride: 0x1},
+		unicode.Range16{Lo: 0xa79e, Hi: 0xa79e, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a0, Hi: 0xa7a0, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a2, Hi: 0xa7a2, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a4, Hi: 0xa7a4, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a6, Hi: 0xa7a6, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7a8, Hi: 0xa7a8, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7aa, Hi: 0xa7ae, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b4, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7b6, Hi: 0xa7b6, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7b8, Hi: 0xa7b8, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7ba, Hi: 0xa7ba, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7bc, Hi: 0xa7bc, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7be, Hi: 0xa7be, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7c0, Hi: 0xa7c0, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7c2, Hi: 0xa7c2, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7c4, Hi: 0xa7c7, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7c9, Hi: 0xa7c9, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d0, Hi: 0xa7d0, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d6, Hi: 0xa7d6, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7d8, Hi: 0xa7d8, Stride: 0x1},
+		unicode.Range16{Lo: 0xa7f5, Hi: 0xa7f5, Stride: 0x1},
+		unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1},
+	},
+	R32: []unicode.Range32{
+		unicode.Range32{Lo: 0x10400, Hi: 0x10427, Stride: 0x1},
+		unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1},
+		unicode.Range32{Lo: 0x10570, Hi: 0x1057a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1057c, Hi: 0x1058a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1058c, Hi: 0x10592, Stride: 0x1},
+		unicode.Range32{Lo: 0x10594, Hi: 0x10595, Stride: 0x1},
+		unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1},
+		unicode.Range32{Lo: 0x118a0, Hi: 0x118bf, Stride: 0x1},
+		unicode.Range32{Lo: 0x16e40, Hi: 0x16e5f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d400, Hi: 0x1d419, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d434, Hi: 0x1d44d, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d468, Hi: 0x1d481, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d49c, Hi: 0x1d49c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b5, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d4d0, Hi: 0x1d4e9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d504, Hi: 0x1d505, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d538, Hi: 0x1d539, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d56c, Hi: 0x1d585, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d5a0, Hi: 0x1d5b9, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d5d4, Hi: 0x1d5ed, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d608, Hi: 0x1d621, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d63c, Hi: 0x1d655, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d670, Hi: 0x1d689, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d6e2, Hi: 0x1d6fa, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d71c, Hi: 0x1d734, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d756, Hi: 0x1d76e, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d790, Hi: 0x1d7a8, Stride: 0x1},
+		unicode.Range32{Lo: 0x1d7ca, Hi: 0x1d7ca, Stride: 0x1},
+		unicode.Range32{Lo: 0x1e900, Hi: 0x1e921, Stride: 0x1},
+		unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1},
+		unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1},
+		unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1},
+	},
+	LatinOffset: 3,
+}
+
+type _SentenceRuneRange unicode.RangeTable
+
+func _SentenceRuneType(r rune) *_SentenceRuneRange {
+	switch {
+	case unicode.Is(_SentenceATerm, r):
+		return (*_SentenceRuneRange)(_SentenceATerm)
+	case unicode.Is(_SentenceCR, r):
+		return (*_SentenceRuneRange)(_SentenceCR)
+	case unicode.Is(_SentenceClose, r):
+		return (*_SentenceRuneRange)(_SentenceClose)
+	case unicode.Is(_SentenceExtend, r):
+		return (*_SentenceRuneRange)(_SentenceExtend)
+	case unicode.Is(_SentenceFormat, r):
+		return (*_SentenceRuneRange)(_SentenceFormat)
+	case unicode.Is(_SentenceLF, r):
+		return (*_SentenceRuneRange)(_SentenceLF)
+	case unicode.Is(_SentenceLower, r):
+		return (*_SentenceRuneRange)(_SentenceLower)
+	case unicode.Is(_SentenceNumeric, r):
+		return (*_SentenceRuneRange)(_SentenceNumeric)
+	case unicode.Is(_SentenceOLetter, r):
+		return (*_SentenceRuneRange)(_SentenceOLetter)
+	case unicode.Is(_SentenceSContinue, r):
+		return (*_SentenceRuneRange)(_SentenceSContinue)
+	case unicode.Is(_SentenceSTerm, r):
+		return (*_SentenceRuneRange)(_SentenceSTerm)
+	case unicode.Is(_SentenceSep, r):
+		return (*_SentenceRuneRange)(_SentenceSep)
+	case unicode.Is(_SentenceSp, r):
+		return (*_SentenceRuneRange)(_SentenceSp)
+	case unicode.Is(_SentenceUpper, r):
+		return (*_SentenceRuneRange)(_SentenceUpper)
+	default:
+		return nil
+	}
+}
+func (rng *_SentenceRuneRange) String() string {
+	switch (*unicode.RangeTable)(rng) {
+	case _SentenceATerm:
+		return "ATerm"
+	case _SentenceCR:
+		return "CR"
+	case _SentenceClose:
+		return "Close"
+	case _SentenceExtend:
+		return "Extend"
+	case _SentenceFormat:
+		return "Format"
+	case _SentenceLF:
+		return "LF"
+	case _SentenceLower:
+		return "Lower"
+	case _SentenceNumeric:
+		return "Numeric"
+	case _SentenceOLetter:
+		return "OLetter"
+	case _SentenceSContinue:
+		return "SContinue"
+	case _SentenceSTerm:
+		return "STerm"
+	case _SentenceSep:
+		return "Sep"
+	case _SentenceSp:
+		return "Sp"
+	case _SentenceUpper:
+		return "Upper"
+	default:
+		return "Other"
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/unicode2ragel.rb 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/unicode2ragel.rb
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/unicode2ragel.rb	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/unicode2ragel.rb	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,335 @@
+#!/usr/bin/env ruby
+#
+# This scripted has been updated to accept more command-line arguments:
+#
+#    -u, --url                        URL to process
+#    -m, --machine                    Machine name
+#    -p, --properties                 Properties to add to the machine
+#    -o, --output                     Write output to file
+#
+# Updated by: Marty Schoch <marty.schoch@gmail.com>
+# 
+# This script uses the unicode spec to generate a Ragel state machine
+# that recognizes unicode alphanumeric characters.  It generates 5
+# character classes: uupper, ulower, ualpha, udigit, and ualnum.
+# Currently supported encodings are UTF-8 [default] and UCS-4.
+#
+# Usage: unicode2ragel.rb [options]
+#    -e, --encoding [ucs4 | utf8]     Data encoding
+#    -h, --help                       Show this message
+#
+# This script was originally written as part of the Ferret search
+# engine library.
+#
+# Author: Rakan El-Khalil <rakan@well.com>
+
+require 'optparse'
+require 'open-uri'
+
+ENCODINGS = [ :utf8, :ucs4 ]
+ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
+DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
+DEFAULT_MACHINE_NAME= "WChar"
+
+###
+# Display vars & default option
+
+TOTAL_WIDTH = 80
+RANGE_WIDTH = 23
+@encoding = :utf8
+@chart_url = DEFAULT_CHART_URL
+machine_name = DEFAULT_MACHINE_NAME
+properties = []
+@output = $stdout
+
+###
+# Option parsing
+
+cli_opts = OptionParser.new do |opts|
+  opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
+    @encoding = o.downcase.to_sym
+  end
+  opts.on("-h", "--help", "Show this message") do
+    puts opts
+    exit
+  end
+  opts.on("-u", "--url URL", "URL to process") do |o|
+    @chart_url = o 
+  end
+  opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
+    machine_name = o
+  end
+  opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
+    properties = o
+  end
+  opts.on("-o", "--output FILE", "output file") do |o|
+    @output = File.new(o, "w+")
+  end
+end
+
+cli_opts.parse(ARGV)
+unless ENCODINGS.member? @encoding
+  puts "Invalid encoding: #{@encoding}"
+  puts cli_opts
+  exit
+end
+
+##
+# Downloads the document at url and yields every alpha line's hex
+# range and description.
+
+def each_alpha( url, property ) 
+  URI.open( url ) do |file|
+    file.each_line do |line|
+      next if line =~ /^#/;
+      next if line !~ /; #{property} *#/;
+
+      range, description = line.split(/;/)
+      range.strip!
+      description.gsub!(/.*#/, '').strip!
+
+      if range =~ /\.\./
+           start, stop = range.split '..'
+      else start = stop = range
+      end
+
+      yield start.hex .. stop.hex, description
+    end
+  end
+end
+
+###
+# Formats to hex at minimum width
+
+def to_hex( n )
+  r = "%0X" % n
+  r = "0#{r}" unless (r.length % 2).zero?
+  r
+end
+
+###
+# UCS4 is just a straight hex conversion of the unicode codepoint.
+
+def to_ucs4( range )
+  rangestr  =   "0x" + to_hex(range.begin)
+  rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
+  [ rangestr ]
+end
+
+##
+# 0x00     - 0x7f     -> 0zzzzzzz[7]
+# 0x80     - 0x7ff    -> 110yyyyy[5] 10zzzzzz[6]
+# 0x800    - 0xffff   -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
+# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] 
+
+UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
+
+def to_utf8_enc( n )
+  r = 0
+  if n <= 0x7f
+    r = n
+  elsif n <= 0x7ff
+    y = 0xc0 | (n >> 6)
+    z = 0x80 | (n & 0x3f)
+    r = y << 8 | z
+  elsif n <= 0xffff
+    x = 0xe0 | (n >> 12)
+    y = 0x80 | (n >>  6) & 0x3f
+    z = 0x80 |  n        & 0x3f
+    r = x << 16 | y << 8 | z
+  elsif n <= 0x10ffff
+    w = 0xf0 | (n >> 18)
+    x = 0x80 | (n >> 12) & 0x3f
+    y = 0x80 | (n >>  6) & 0x3f
+    z = 0x80 |  n        & 0x3f
+    r = w << 24 | x << 16 | y << 8 | z
+  end
+
+  to_hex(r)
+end
+
+def from_utf8_enc( n )
+  n = n.hex
+  r = 0
+  if n <= 0x7f
+    r = n
+  elsif n <= 0xdfff
+    y = (n >> 8) & 0x1f
+    z =  n       & 0x3f
+    r = y << 6 | z
+  elsif n <= 0xefffff
+    x = (n >> 16) & 0x0f
+    y = (n >>  8) & 0x3f
+    z =  n        & 0x3f
+    r = x << 10 | y << 6 | z
+  elsif n <= 0xf7ffffff
+    w = (n >> 24) & 0x07
+    x = (n >> 16) & 0x3f
+    y = (n >>  8) & 0x3f
+    z =  n        & 0x3f
+    r = w << 18 | x << 12 | y << 6 | z
+  end
+  r
+end
+
+###
+# Given a range, splits it up into ranges that can be continuously
+# encoded into utf8.  Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
+# This is not strictly needed since the current [5.1] unicode standard
+# doesn't have ranges that straddle utf8 boundaries.  This is included
+# for completeness as there is no telling if that will ever change.
+
+def utf8_ranges( range )
+  ranges = []
+  UTF8_BOUNDARIES.each do |max|
+    if range.begin <= max
+      if range.end <= max
+        ranges << range
+        return ranges
+      end
+
+      ranges << (range.begin .. max)
+      range = (max + 1) .. range.end
+    end
+  end
+  ranges
+end
+
+def build_range( start, stop )
+  size = start.size/2
+  left = size - 1
+  return [""] if size < 1
+
+  a = start[0..1]
+  b = stop[0..1]
+
+  ###
+  # Shared prefix
+
+  if a == b
+    return build_range(start[2..-1], stop[2..-1]).map do |elt|
+      "0x#{a} " + elt
+    end
+  end
+
+  ###
+  # Unshared prefix, end of run
+
+  return ["0x#{a}..0x#{b} "] if left.zero?
+  
+  ###
+  # Unshared prefix, not end of run
+  # Range can be 0x123456..0x56789A
+  # Which is equivalent to:
+  #     0x123456 .. 0x12FFFF
+  #     0x130000 .. 0x55FFFF
+  #     0x560000 .. 0x56789A
+
+  ret = []
+  ret << build_range(start, a + "FF" * left)
+
+  ###
+  # Only generate middle range if need be.
+
+  if a.hex+1 != b.hex
+    max = to_hex(b.hex - 1)
+    max = "FF" if b == "FF"
+    ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
+  end
+
+  ###
+  # Don't generate last range if it is covered by first range
+  
+  ret << build_range(b + "00" * left, stop) unless b == "FF"
+  ret.flatten!
+end
+
+def to_utf8( range )
+  utf8_ranges( range ).map do |r|   
+    begin_enc = to_utf8_enc(r.begin)
+    end_enc = to_utf8_enc(r.end)
+    build_range begin_enc, end_enc
+  end.flatten!
+end
+
+##
+# Perform a 3-way comparison of the number of codepoints advertised by
+# the unicode spec for the given range, the originally parsed range,
+# and the resulting utf8 encoded range.
+
+def count_codepoints( code )
+  code.split(' ').inject(1) do |acc, elt|
+    if elt =~ /0x(.+)\.\.0x(.+)/
+      if @encoding == :utf8
+        acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
+      else
+        acc * ($2.hex - $1.hex + 1)
+      end
+    else
+      acc
+    end
+  end
+end
+
+def is_valid?( range, desc, codes )
+  spec_count  = 1
+  spec_count  = $1.to_i if desc =~ /\[(\d+)\]/
+  range_count = range.end - range.begin + 1
+
+  sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
+  sum == spec_count and sum == range_count
+end
+
+##
+# Generate the state maching to stdout
+
+def generate_machine( name, property )
+  pipe = " "
+  @output.puts "    #{name} = "
+  each_alpha( @chart_url, property ) do |range, desc|
+
+    codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
+
+    #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless 
+    #  is_valid? range, desc, codes
+
+    range_width = codes.map { |a| a.size }.max
+    range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
+
+    desc_width  = TOTAL_WIDTH - RANGE_WIDTH - 11
+    desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
+
+    if desc.size > desc_width
+      desc = desc[0..desc_width - 4] + "..."
+    end
+
+    codes.each_with_index do |r, idx|
+      desc = "" unless idx.zero?
+      code = "%-#{range_width}s" % r
+      @output.puts "      #{pipe} #{code} ##{desc}"
+      pipe = "|"
+    end
+  end
+  @output.puts "      ;"
+  @output.puts ""
+end
+
+@output.puts <<EOF
+# The following Ragel file was autogenerated with #{$0} 
+# from: #{@chart_url}
+#
+# It defines #{properties}.
+#
+# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
+# and that your input is in #{@encoding}.
+
+%%{
+    machine #{machine_name};
+    
+EOF
+
+properties.each { |x| generate_machine( x, x ) }
+
+@output.puts <<EOF
+}%%
+EOF
diff -pruN 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/utf8_seqs.go 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/utf8_seqs.go
--- 0.19.3+ds1-4/vendor/github.com/apparentlymart/go-textseg/v15/textseg/utf8_seqs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/apparentlymart/go-textseg/v15/textseg/utf8_seqs.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,19 @@
+package textseg
+
+import "unicode/utf8"
+
+// ScanGraphemeClusters is a split function for bufio.Scanner that splits
+// on UTF8 sequence boundaries.
+//
+// This is included largely for completeness, since this behavior is already
+// built in to Go when ranging over a string.
+func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) {
+	if len(data) == 0 {
+		return 0, nil, nil
+	}
+	r, seqLen := utf8.DecodeRune(data)
+	if r == utf8.RuneError && !atEOF {
+		return 0, nil, nil
+	}
+	return seqLen, data[:seqLen], nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,18 @@
+package aws
+
+// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing.
+type AccountIDEndpointMode string
+
+const (
+	// AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing
+	AccountIDEndpointModeUnset AccountIDEndpointMode = ""
+
+	// AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present
+	AccountIDEndpointModePreferred = "preferred"
+
+	// AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity
+	AccountIDEndpointModeRequired = "required"
+
+	// AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing
+	AccountIDEndpointModeDisabled = "disabled"
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,211 @@
+package aws
+
+import (
+	"net/http"
+
+	smithybearer "github.com/aws/smithy-go/auth/bearer"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// HTTPClient provides the interface to provide custom HTTPClients. Generally
+// *http.Client is sufficient for most use cases. The HTTPClient should not
+// follow 301 or 302 redirects.
+type HTTPClient interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+// A Config provides service configuration for service clients.
+type Config struct {
+	// The region to send requests to. This parameter is required and must
+	// be configured globally or on a per-client basis unless otherwise
+	// noted. A full list of regions is found in the "Regions and Endpoints"
+	// document.
+	//
+	// See http://docs.aws.amazon.com/general/latest/gr/rande.html for
+	// information on AWS regions.
+	Region string
+
+	// The credentials object to use when signing requests.
+	// Use the LoadDefaultConfig to load configuration from all the SDK's supported
+	// sources, and resolve credentials using the SDK's default credential chain.
+	Credentials CredentialsProvider
+
+	// The Bearer Authentication token provider to use for authenticating API
+	// operation calls with a Bearer Authentication token. The API clients and
+	// operation must support Bearer Authentication scheme in order for the
+	// token provider to be used. API clients created with NewFromConfig will
+	// automatically be configured with this option, if the API client support
+	// Bearer Authentication.
+	//
+	// The SDK's config.LoadDefaultConfig can automatically populate this
+	// option for external configuration options such as SSO session.
+	// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+	BearerAuthTokenProvider smithybearer.TokenProvider
+
+	// The HTTP Client the SDK's API clients will use to invoke HTTP requests.
+	// The SDK defaults to a BuildableClient allowing API clients to create
+	// copies of the HTTP Client for service specific customizations.
+	//
+	// Use a (*http.Client) for custom behavior. Using a custom http.Client
+	// will prevent the SDK from modifying the HTTP client.
+	HTTPClient HTTPClient
+
+	// An endpoint resolver that can be used to provide or override an endpoint
+	// for the given service and region.
+	//
+	// See the `aws.EndpointResolver` documentation for additional usage
+	// information.
+	//
+	// Deprecated: See Config.EndpointResolverWithOptions
+	EndpointResolver EndpointResolver
+
+	// An endpoint resolver that can be used to provide or override an endpoint
+	// for the given service and region.
+	//
+	// When EndpointResolverWithOptions is specified, it will be used by a
+	// service client rather than using EndpointResolver if also specified.
+	//
+	// See the `aws.EndpointResolverWithOptions` documentation for additional
+	// usage information.
+	//
+	// Deprecated: with the release of endpoint resolution v2 in API clients,
+	// EndpointResolver and EndpointResolverWithOptions are deprecated.
+	// Providing a value for this field will likely prevent you from using
+	// newer endpoint-related service features. See API client options
+	// EndpointResolverV2 and BaseEndpoint.
+	EndpointResolverWithOptions EndpointResolverWithOptions
+
+	// RetryMaxAttempts specifies the maximum number attempts an API client
+	// will call an operation that fails with a retryable error.
+	//
+	// API Clients will only use this value to construct a retryer if the
+	// Config.Retryer member is not nil. This value will be ignored if
+	// Retryer is not nil.
+	RetryMaxAttempts int
+
+	// RetryMode specifies the retry model the API client will be created with.
+	//
+	// API Clients will only use this value to construct a retryer if the
+	// Config.Retryer member is not nil. This value will be ignored if
+	// Retryer is not nil.
+	RetryMode RetryMode
+
+	// Retryer is a function that provides a Retryer implementation. A Retryer
+	// guides how HTTP requests should be retried in case of recoverable
+	// failures. When nil the API client will use a default retryer.
+	//
+	// In general, the provider function should return a new instance of a
+	// Retryer if you are attempting to provide a consistent Retryer
+	// configuration across all clients. This will ensure that each client will
+	// be provided a new instance of the Retryer implementation, and will avoid
+	// issues such as sharing the same retry token bucket across services.
+	//
+	// If not nil, RetryMaxAttempts, and RetryMode will be ignored by API
+	// clients.
+	Retryer func() Retryer
+
+	// ConfigSources are the sources that were used to construct the Config.
+	// Allows for additional configuration to be loaded by clients.
+	ConfigSources []interface{}
+
+	// APIOptions provides the set of middleware mutations modify how the API
+	// client requests will be handled. This is useful for adding additional
+	// tracing data to a request, or changing behavior of the SDK's client.
+	APIOptions []func(*middleware.Stack) error
+
+	// The logger writer interface to write logging messages to. Defaults to
+	// standard error.
+	Logger logging.Logger
+
+	// Configures the events that will be sent to the configured logger. This
+	// can be used to configure the logging of signing, retries, request, and
+	// responses of the SDK clients.
+	//
+	// See the ClientLogMode type documentation for the complete set of logging
+	// modes and available configuration.
+	ClientLogMode ClientLogMode
+
+	// The configured DefaultsMode. If not specified, service clients will
+	// default to legacy.
+	//
+	// Supported modes are: auto, cross-region, in-region, legacy, mobile,
+	// standard
+	DefaultsMode DefaultsMode
+
+	// The RuntimeEnvironment configuration, only populated if the DefaultsMode
+	// is set to DefaultsModeAuto and is initialized by
+	// `config.LoadDefaultConfig`. You should not populate this structure
+	// programmatically, or rely on the values here within your applications.
+	RuntimeEnvironment RuntimeEnvironment
+
+	// AppId is an optional application specific identifier that can be set.
+	// When set it will be appended to the User-Agent header of every request
+	// in the form of App/{AppId}. This variable is sourced from environment
+	// variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id.
+	// See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for
+	// more information on environment variables and shared config settings.
+	AppID string
+
+	// BaseEndpoint is an intermediary transfer location to a service specific
+	// BaseEndpoint on a service's Options.
+	BaseEndpoint *string
+
+	// DisableRequestCompression toggles if an operation request could be
+	// compressed or not. Will be set to false by default. This variable is sourced from
+	// environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute
+	// disable_request_compression
+	DisableRequestCompression bool
+
+	// RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be
+	// compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively.
+	// This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or
+	// the shared config profile attribute request_min_compression_size_bytes
+	RequestMinCompressSizeBytes int64
+
+	// Controls how a resolved AWS account ID is handled for endpoint routing.
+	AccountIDEndpointMode AccountIDEndpointMode
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+func NewConfig() *Config {
+	return &Config{}
+}
+
+// Copy will return a shallow copy of the Config object.
+func (c Config) Copy() Config {
+	cp := c
+	return cp
+}
+
+// EndpointDiscoveryEnableState indicates if endpoint discovery is
+// enabled, disabled, auto or unset state.
+//
+// Default behavior (Auto or Unset) indicates operations that require endpoint
+// discovery will use Endpoint Discovery by default. Operations that
+// optionally use Endpoint Discovery will not use Endpoint Discovery
+// unless EndpointDiscovery is explicitly enabled.
+type EndpointDiscoveryEnableState uint
+
+// Enumeration values for EndpointDiscoveryEnableState
+const (
+	// EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset.
+	// Users do not need to use this value explicitly. The behavior for unset
+	// is the same as for EndpointDiscoveryAuto.
+	EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota
+
+	// EndpointDiscoveryAuto represents an AUTO state that allows endpoint
+	// discovery only when required by the api. This is the default
+	// configuration resolved by the client if endpoint discovery is neither
+	// enabled or disabled.
+	EndpointDiscoveryAuto // default state
+
+	// EndpointDiscoveryDisabled indicates client MUST not perform endpoint
+	// discovery even when required.
+	EndpointDiscoveryDisabled
+
+	// EndpointDiscoveryEnabled indicates client MUST always perform endpoint
+	// discovery if supported for the operation.
+	EndpointDiscoveryEnabled
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,22 @@
+package aws
+
+import (
+	"context"
+	"time"
+)
+
+type suppressedContext struct {
+	context.Context
+}
+
+func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
+	return time.Time{}, false
+}
+
+func (s *suppressedContext) Done() <-chan struct{} {
+	return nil
+}
+
+func (s *suppressedContext) Err() error {
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,224 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+	"sync/atomic"
+	"time"
+
+	sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand"
+	"github.com/aws/aws-sdk-go-v2/internal/sync/singleflight"
+)
+
+// CredentialsCacheOptions are the options
+type CredentialsCacheOptions struct {
+
+	// ExpiryWindow will allow the credentials to trigger refreshing prior to
+	// the credentials actually expiring. This is beneficial so race conditions
+	// with expiring credentials do not cause request to fail unexpectedly
+	// due to ExpiredTokenException exceptions.
+	//
+	// An ExpiryWindow of 10s would cause calls to IsExpired() to return true
+	// 10 seconds before the credentials are actually expired. This can cause an
+	// increased number of requests to refresh the credentials to occur.
+	//
+	// If ExpiryWindow is 0 or less it will be ignored.
+	ExpiryWindow time.Duration
+
+	// ExpiryWindowJitterFrac provides a mechanism for randomizing the
+	// expiration of credentials within the configured ExpiryWindow by a random
+	// percentage. Valid values are between 0.0 and 1.0.
+	//
+	// As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac
+	// is 0.5 then credentials will be set to expire between 30 to 60 seconds
+	// prior to their actual expiration time.
+	//
+	// If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored.
+	// If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window.
+	// If ExpiryWindowJitterFrac < 0 the value will be treated as 0.
+	// If ExpiryWindowJitterFrac > 1 the value will be treated as 1.
+	ExpiryWindowJitterFrac float64
+}
+
+// CredentialsCache provides caching and concurrency safe credentials retrieval
+// via the provider's retrieve method.
+//
+// CredentialsCache will look for optional interfaces on the Provider to adjust
+// how the credential cache handles credentials caching.
+//
+//   - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle
+//     credential refresh failures. This could return an updated Credentials
+//     value, or attempt another means of retrieving credentials.
+//
+//   - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how
+//     credentials Expires is modified. This could modify how the Credentials
+//     Expires is adjusted based on the CredentialsCache ExpiryWindow option.
+//     Such as providing a floor not to reduce the Expires below.
+type CredentialsCache struct {
+	provider CredentialsProvider
+
+	options CredentialsCacheOptions
+	creds   atomic.Value
+	sf      singleflight.Group
+}
+
+// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider
+// is expected to not be nil. A variadic list of one or more functions can be
+// provided to modify the CredentialsCache configuration. This allows for
+// configuration of credential expiry window and jitter.
+func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache {
+	options := CredentialsCacheOptions{}
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	if options.ExpiryWindow < 0 {
+		options.ExpiryWindow = 0
+	}
+
+	if options.ExpiryWindowJitterFrac < 0 {
+		options.ExpiryWindowJitterFrac = 0
+	} else if options.ExpiryWindowJitterFrac > 1 {
+		options.ExpiryWindowJitterFrac = 1
+	}
+
+	return &CredentialsCache{
+		provider: provider,
+		options:  options,
+	}
+}
+
+// Retrieve returns the credentials. If the credentials have already been
+// retrieved, and not expired the cached credentials will be returned. If the
+// credentials have not been retrieved yet, or expired the provider's Retrieve
+// method will be called.
+//
+// Returns and error if the provider's retrieve method returns an error.
+func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
+	if creds, ok := p.getCreds(); ok && !creds.Expired() {
+		return creds, nil
+	}
+
+	resCh := p.sf.DoChan("", func() (interface{}, error) {
+		return p.singleRetrieve(&suppressedContext{ctx})
+	})
+	select {
+	case res := <-resCh:
+		return res.Val.(Credentials), res.Err
+	case <-ctx.Done():
+		return Credentials{}, &RequestCanceledError{Err: ctx.Err()}
+	}
+}
+
+func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) {
+	currCreds, ok := p.getCreds()
+	if ok && !currCreds.Expired() {
+		return currCreds, nil
+	}
+
+	newCreds, err := p.provider.Retrieve(ctx)
+	if err != nil {
+		handleFailToRefresh := defaultHandleFailToRefresh
+		if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok {
+			handleFailToRefresh = cs.HandleFailToRefresh
+		}
+		newCreds, err = handleFailToRefresh(ctx, currCreds, err)
+		if err != nil {
+			return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err)
+		}
+	}
+
+	if newCreds.CanExpire && p.options.ExpiryWindow > 0 {
+		adjustExpiresBy := defaultAdjustExpiresBy
+		if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok {
+			adjustExpiresBy = cs.AdjustExpiresBy
+		}
+
+		randFloat64, err := sdkrand.CryptoRandFloat64()
+		if err != nil {
+			return Credentials{}, fmt.Errorf("failed to get random provider, %w", err)
+		}
+
+		var jitter time.Duration
+		if p.options.ExpiryWindowJitterFrac > 0 {
+			jitter = time.Duration(randFloat64 *
+				p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
+		}
+
+		newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter))
+		if err != nil {
+			return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err)
+		}
+	}
+
+	p.creds.Store(&newCreds)
+	return newCreds, nil
+}
+
+// getCreds returns the currently stored credentials and true. Returning false
+// if no credentials were stored.
+func (p *CredentialsCache) getCreds() (Credentials, bool) {
+	v := p.creds.Load()
+	if v == nil {
+		return Credentials{}, false
+	}
+
+	c := v.(*Credentials)
+	if c == nil || !c.HasKeys() {
+		return Credentials{}, false
+	}
+
+	return *c, true
+}
+
+// Invalidate will invalidate the cached credentials. The next call to Retrieve
+// will cause the provider's Retrieve method to be called.
+func (p *CredentialsCache) Invalidate() {
+	p.creds.Store((*Credentials)(nil))
+}
+
+// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache
+// matches the target provider type.
+func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool {
+	return IsCredentialsProvider(p.provider, target)
+}
+
+// HandleFailRefreshCredentialsCacheStrategy is an interface for
+// CredentialsCache to allow CredentialsProvider  how failed to refresh
+// credentials is handled.
+type HandleFailRefreshCredentialsCacheStrategy interface {
+	// Given the previously cached Credentials, if any, and refresh error, may
+	// returns new or modified set of Credentials, or error.
+	//
+	// Credential caches may use default implementation if nil.
+	HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error)
+}
+
+// defaultHandleFailToRefresh returns the passed in error.
+func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) {
+	return Credentials{}, err
+}
+
+// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache
+// to allow CredentialsProvider to intercept adjustments to Credentials expiry
+// based on expectations and use cases of CredentialsProvider.
+//
+// Credential caches may use default implementation if nil.
+type AdjustExpiresByCredentialsCacheStrategy interface {
+	// Given a Credentials as input, applying any mutations and
+	// returning the potentially updated Credentials, or error.
+	AdjustExpiresBy(Credentials, time.Duration) (Credentials, error)
+}
+
+// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires,
+// and returns the updated credentials value. If Credentials value's CanExpire
+// is false, the passed in credentials are returned unchanged.
+func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) {
+	if !creds.CanExpire {
+		return creds, nil
+	}
+
+	creds.Expires = creds.Expires.Add(dur)
+	return creds, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,173 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+// AnonymousCredentials provides a sentinel CredentialsProvider that should be
+// used to instruct the SDK's signing middleware to not sign the request.
+//
+// Using `nil` credentials when configuring an API client will achieve the same
+// result. The AnonymousCredentials type allows you to configure the SDK's
+// external config loading to not attempt to source credentials from the shared
+// config or environment.
+//
+// For example you can use this CredentialsProvider with an API client's
+// Options to instruct the client not to sign a request for accessing public
+// S3 bucket objects.
+//
+// The following example demonstrates using the AnonymousCredentials to prevent
+// SDK's external config loading attempt to resolve credentials.
+//
+//	cfg, err := config.LoadDefaultConfig(context.TODO(),
+//	     config.WithCredentialsProvider(aws.AnonymousCredentials{}),
+//	)
+//	if err != nil {
+//	     log.Fatalf("failed to load config, %v", err)
+//	}
+//
+//	client := s3.NewFromConfig(cfg)
+//
+// Alternatively you can leave the API client Option's `Credential` member to
+// nil. If using the `NewFromConfig` constructor you'll need to explicitly set
+// the `Credentials` member to nil, if the external config resolved a
+// credential provider.
+//
+//	client := s3.New(s3.Options{
+//	     // Credentials defaults to a nil value.
+//	})
+//
+// This can also be configured for specific operations calls too.
+//
+//	cfg, err := config.LoadDefaultConfig(context.TODO())
+//	if err != nil {
+//	     log.Fatalf("failed to load config, %v", err)
+//	}
+//
+//	client := s3.NewFromConfig(config)
+//
+//	result, err := client.GetObject(context.TODO(), s3.GetObject{
+//	     Bucket: aws.String("example-bucket"),
+//	     Key: aws.String("example-key"),
+//	}, func(o *s3.Options) {
+//	     o.Credentials = nil
+//	     // Or
+//	     o.Credentials = aws.AnonymousCredentials{}
+//	})
+type AnonymousCredentials struct{}
+
+// Retrieve implements the CredentialsProvider interface, but will always
+// return error, and cannot be used to sign a request. The AnonymousCredentials
+// type is used as a sentinel type instructing the AWS request signing
+// middleware to not sign a request.
+func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) {
+	return Credentials{Source: "AnonymousCredentials"},
+		fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with")
+}
+
+// A Credentials is the AWS credentials value for individual credential fields.
+type Credentials struct {
+	// AWS Access key ID
+	AccessKeyID string
+
+	// AWS Secret Access Key
+	SecretAccessKey string
+
+	// AWS Session Token
+	SessionToken string
+
+	// Source of the credentials
+	Source string
+
+	// States if the credentials can expire or not.
+	CanExpire bool
+
+	// The time the credentials will expire at. Should be ignored if CanExpire
+	// is false.
+	Expires time.Time
+
+	// The ID of the account for the credentials.
+	AccountID string
+}
+
+// Expired returns if the credentials have expired.
+func (v Credentials) Expired() bool {
+	if v.CanExpire {
+		// Calling Round(0) on the current time will truncate the monotonic
+		// reading only. Ensures credential expiry time is always based on
+		// reported wall-clock time.
+		return !v.Expires.After(sdk.NowTime().Round(0))
+	}
+
+	return false
+}
+
+// HasKeys returns if the credentials keys are set.
+func (v Credentials) HasKeys() bool {
+	return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0
+}
+
+// A CredentialsProvider is the interface for any component which will provide
+// credentials Credentials. A CredentialsProvider is required to manage its own
+// Expired state, and what to be expired means.
+//
+// A credentials provider implementation can be wrapped with a CredentialCache
+// to cache the credential value retrieved. Without the cache the SDK will
+// attempt to retrieve the credentials for every request.
+type CredentialsProvider interface {
+	// Retrieve returns nil if it successfully retrieved the value.
+	// Error is returned if the value were not obtainable, or empty.
+	Retrieve(ctx context.Context) (Credentials, error)
+}
+
+// CredentialsProviderFunc provides a helper wrapping a function value to
+// satisfy the CredentialsProvider interface.
+type CredentialsProviderFunc func(context.Context) (Credentials, error)
+
+// Retrieve delegates to the function value the CredentialsProviderFunc wraps.
+func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) {
+	return fn(ctx)
+}
+
+type isCredentialsProvider interface {
+	IsCredentialsProvider(CredentialsProvider) bool
+}
+
+// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the
+// implementation type.
+//
+// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating
+// whether target matches the credential provider type.
+//
+// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used:
+//
+//	If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false
+//	If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false
+//	If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false
+//	If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false
+func IsCredentialsProvider(provider, target CredentialsProvider) bool {
+	if target == nil || provider == nil {
+		return provider == target
+	}
+
+	if x, ok := provider.(isCredentialsProvider); ok {
+		return x.IsCredentialsProvider(target)
+	}
+
+	targetType := reflect.TypeOf(target)
+	if targetType.Kind() != reflect.Ptr {
+		targetType = reflect.PtrTo(targetType)
+	}
+
+	providerType := reflect.TypeOf(provider)
+	if providerType.Kind() != reflect.Ptr {
+		providerType = reflect.PtrTo(providerType)
+	}
+
+	return targetType.AssignableTo(providerType)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,38 @@
+package defaults
+
+import (
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"runtime"
+	"strings"
+)
+
+var getGOOS = func() string {
+	return runtime.GOOS
+}
+
+// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode
+// is set to aws.DefaultsModeAuto.
+func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode {
+	goos := getGOOS()
+	if goos == "android" || goos == "ios" {
+		return aws.DefaultsModeMobile
+	}
+
+	var currentRegion string
+	if len(environment.EnvironmentIdentifier) > 0 {
+		currentRegion = environment.Region
+	}
+
+	if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 {
+		currentRegion = environment.EC2InstanceMetadataRegion
+	}
+
+	if len(region) > 0 && len(currentRegion) > 0 {
+		if strings.EqualFold(region, currentRegion) {
+			return aws.DefaultsModeInRegion
+		}
+		return aws.DefaultsModeCrossRegion
+	}
+
+	return aws.DefaultsModeStandard
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,43 @@
+package defaults
+
+import (
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// Configuration is the set of SDK configuration options that are determined based
+// on the configured DefaultsMode.
+type Configuration struct {
+	// RetryMode is the configuration's default retry mode API clients should
+	// use for constructing a Retryer.
+	RetryMode aws.RetryMode
+
+	// ConnectTimeout is the maximum amount of time a dial will wait for
+	// a connect to complete.
+	//
+	// See https://pkg.go.dev/net#Dialer.Timeout
+	ConnectTimeout *time.Duration
+
+	// TLSNegotiationTimeout specifies the maximum amount of time waiting to
+	// wait for a TLS handshake.
+	//
+	// See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout
+	TLSNegotiationTimeout *time.Duration
+}
+
+// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set.
+func (c *Configuration) GetConnectTimeout() (time.Duration, bool) {
+	if c.ConnectTimeout == nil {
+		return 0, false
+	}
+	return *c.ConnectTimeout, true
+}
+
+// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set.
+func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) {
+	if c.TLSNegotiationTimeout == nil {
+		return 0, false
+	}
+	return *c.TLSNegotiationTimeout, true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,50 @@
+// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT.
+
+package defaults
+
+import (
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"time"
+)
+
+// GetModeConfiguration returns the default Configuration descriptor for the given mode.
+//
+// Supports the following modes: cross-region, in-region, mobile, standard
+func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) {
+	var mv aws.DefaultsMode
+	mv.SetFromString(string(mode))
+
+	switch mv {
+	case aws.DefaultsModeCrossRegion:
+		settings := Configuration{
+			ConnectTimeout:        aws.Duration(3100 * time.Millisecond),
+			RetryMode:             aws.RetryMode("standard"),
+			TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
+		}
+		return settings, nil
+	case aws.DefaultsModeInRegion:
+		settings := Configuration{
+			ConnectTimeout:        aws.Duration(1100 * time.Millisecond),
+			RetryMode:             aws.RetryMode("standard"),
+			TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond),
+		}
+		return settings, nil
+	case aws.DefaultsModeMobile:
+		settings := Configuration{
+			ConnectTimeout:        aws.Duration(30000 * time.Millisecond),
+			RetryMode:             aws.RetryMode("standard"),
+			TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond),
+		}
+		return settings, nil
+	case aws.DefaultsModeStandard:
+		settings := Configuration{
+			ConnectTimeout:        aws.Duration(3100 * time.Millisecond),
+			RetryMode:             aws.RetryMode("standard"),
+			TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
+		}
+		return settings, nil
+	default:
+		return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2 @@
+// Package defaults provides recommended configuration values for AWS SDKs and CLIs.
+package defaults
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,95 @@
+// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT.
+
+package aws
+
+import (
+	"strings"
+)
+
+// DefaultsMode is the SDK defaults mode setting.
+type DefaultsMode string
+
+// The DefaultsMode constants.
+const (
+	// DefaultsModeAuto is an experimental mode that builds on the standard mode.
+	// The SDK will attempt to discover the execution environment to determine the
+	// appropriate settings automatically.
+	//
+	// Note that the auto detection is heuristics-based and does not guarantee 100%
+	// accuracy. STANDARD mode will be used if the execution environment cannot
+	// be determined. The auto detection might query EC2 Instance Metadata service
+	// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html),
+	// which might introduce latency. Therefore we recommend choosing an explicit
+	// defaults_mode instead if startup latency is critical to your application
+	DefaultsModeAuto DefaultsMode = "auto"
+
+	// DefaultsModeCrossRegion builds on the standard mode and includes optimization
+	// tailored for applications which call AWS services in a different region
+	//
+	// Note that the default values vended from this mode might change as best practices
+	// may evolve. As a result, it is encouraged to perform tests when upgrading
+	// the SDK
+	DefaultsModeCrossRegion DefaultsMode = "cross-region"
+
+	// DefaultsModeInRegion builds on the standard mode and includes optimization
+	// tailored for applications which call AWS services from within the same AWS
+	// region
+	//
+	// Note that the default values vended from this mode might change as best practices
+	// may evolve. As a result, it is encouraged to perform tests when upgrading
+	// the SDK
+	DefaultsModeInRegion DefaultsMode = "in-region"
+
+	// DefaultsModeLegacy provides default settings that vary per SDK and were used
+	// prior to establishment of defaults_mode
+	DefaultsModeLegacy DefaultsMode = "legacy"
+
+	// DefaultsModeMobile builds on the standard mode and includes optimization
+	// tailored for mobile applications
+	//
+	// Note that the default values vended from this mode might change as best practices
+	// may evolve. As a result, it is encouraged to perform tests when upgrading
+	// the SDK
+	DefaultsModeMobile DefaultsMode = "mobile"
+
+	// DefaultsModeStandard provides the latest recommended default values that
+	// should be safe to run in most scenarios
+	//
+	// Note that the default values vended from this mode might change as best practices
+	// may evolve. As a result, it is encouraged to perform tests when upgrading
+	// the SDK
+	DefaultsModeStandard DefaultsMode = "standard"
+)
+
+// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches
+// the provided string when compared using EqualFold. If the value does not match a known
+// constant it will be set to as-is and the function will return false. As a special case, if the
+// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode.
+func (d *DefaultsMode) SetFromString(v string) (ok bool) {
+	switch {
+	case strings.EqualFold(v, string(DefaultsModeAuto)):
+		*d = DefaultsModeAuto
+		ok = true
+	case strings.EqualFold(v, string(DefaultsModeCrossRegion)):
+		*d = DefaultsModeCrossRegion
+		ok = true
+	case strings.EqualFold(v, string(DefaultsModeInRegion)):
+		*d = DefaultsModeInRegion
+		ok = true
+	case strings.EqualFold(v, string(DefaultsModeLegacy)):
+		*d = DefaultsModeLegacy
+		ok = true
+	case strings.EqualFold(v, string(DefaultsModeMobile)):
+		*d = DefaultsModeMobile
+		ok = true
+	case strings.EqualFold(v, string(DefaultsModeStandard)):
+		*d = DefaultsModeStandard
+		ok = true
+	case len(v) == 0:
+		*d = DefaultsModeLegacy
+		ok = true
+	default:
+		*d = DefaultsMode(v)
+	}
+	return ok
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,62 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// # Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+//	var strPtr *string
+//
+//	// Without the SDK's conversion functions
+//	str := "my string"
+//	strPtr = &str
+//
+//	// With the SDK's conversion functions
+//	strPtr = aws.String("my string")
+//
+//	// Convert *string to string value
+//	str = aws.ToString(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+//	var strPtrs []*string
+//	var strs []string = []string{"Go", "Gophers", "Go"}
+//
+//	// Convert []string to []*string
+//	strPtrs = aws.StringSlice(strs)
+//
+//	// Convert []*string to []string
+//	strs = aws.ToStringSlice(strPtrs)
+//
+// # SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
+
+// generate.go uses a build tag of "ignore", go run doesn't need to specify
+// this because go run ignores all build flags when running a go file directly.
+//go:generate go run -tags codegen generate.go
+//go:generate go run -tags codegen logging_generate.go
+//go:generate gofmt -w -s .
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,247 @@
+package aws
+
+import (
+	"fmt"
+)
+
+// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior.
+type DualStackEndpointState uint
+
+const (
+	// DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution.
+	DualStackEndpointStateUnset DualStackEndpointState = iota
+
+	// DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints.
+	DualStackEndpointStateEnabled
+
+	// DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints.
+	DualStackEndpointStateDisabled
+)
+
+// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value.
+// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState.
+func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) {
+	type iface interface {
+		GetUseDualStackEndpoint() DualStackEndpointState
+	}
+	for _, option := range options {
+		if i, ok := option.(iface); ok {
+			value = i.GetUseDualStackEndpoint()
+			found = true
+			break
+		}
+	}
+	return value, found
+}
+
+// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior.
+type FIPSEndpointState uint
+
+const (
+	// FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution.
+	FIPSEndpointStateUnset FIPSEndpointState = iota
+
+	// FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints.
+	FIPSEndpointStateEnabled
+
+	// FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints.
+	FIPSEndpointStateDisabled
+)
+
+// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value.
+// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState.
+func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) {
+	type iface interface {
+		GetUseFIPSEndpoint() FIPSEndpointState
+	}
+	for _, option := range options {
+		if i, ok := option.(iface); ok {
+			value = i.GetUseFIPSEndpoint()
+			found = true
+			break
+		}
+	}
+	return value, found
+}
+
+// Endpoint represents the endpoint a service client should make API operation
+// calls to.
+//
+// The SDK will automatically resolve these endpoints per API client using an
+// internal endpoint resolvers. If you'd like to provide custom endpoint
+// resolving behavior you can implement the EndpointResolver interface.
+//
+// Deprecated: This structure was used with the global [EndpointResolver]
+// interface, which has been deprecated in favor of service-specific endpoint
+// resolution. See the deprecation docs on that interface for more information.
+type Endpoint struct {
+	// The base URL endpoint the SDK API clients will use to make API calls to.
+	// The SDK will suffix URI path and query elements to this endpoint.
+	URL string
+
+	// Specifies if the endpoint's hostname can be modified by the SDK's API
+	// client.
+	//
+	// If the hostname is mutable the SDK API clients may modify any part of
+	// the hostname based on the requirements of the API, (e.g. adding, or
+	// removing content in the hostname). Such as, Amazon S3 API client
+	// prefixing "bucketname" to the hostname, or changing the
+	// hostname service name component from "s3." to "s3-accesspoint.dualstack."
+	// for the dualstack endpoint of an S3 Accesspoint resource.
+	//
+	// Care should be taken when providing a custom endpoint for an API. If the
+	// endpoint hostname is mutable, and the client cannot modify the endpoint
+	// correctly, the operation call will most likely fail, or have undefined
+	// behavior.
+	//
+	// If hostname is immutable, the SDK API clients will not modify the
+	// hostname of the URL. This may cause the API client not to function
+	// correctly if the API requires the operation specific hostname values
+	// to be used by the client.
+	//
+	// This flag does not modify the API client's behavior if this endpoint
+	// will be used instead of Endpoint Discovery, or if the endpoint will be
+	// used to perform Endpoint Discovery. That behavior is configured via the
+	// API Client's Options.
+	HostnameImmutable bool
+
+	// The AWS partition the endpoint belongs to.
+	PartitionID string
+
+	// The service name that should be used for signing the requests to the
+	// endpoint.
+	SigningName string
+
+	// The region that should be used for signing the request to the endpoint.
+	SigningRegion string
+
+	// The signing method that should be used for signing the requests to the
+	// endpoint.
+	SigningMethod string
+
+	// The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata.
+	// When providing a custom endpoint, you should set the source as EndpointSourceCustom.
+	// If source is not provided when providing a custom endpoint, the SDK may not
+	// perform required host mutations correctly. Source should be used along with
+	// HostnameImmutable property as per the usage requirement.
+	Source EndpointSource
+}
+
+// EndpointSource is the endpoint source type.
+//
+// Deprecated: The global [Endpoint] structure is deprecated.
+type EndpointSource int
+
+const (
+	// EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source.
+	EndpointSourceServiceMetadata EndpointSource = iota
+
+	// EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when
+	// user provides a custom endpoint to be used by the SDK.
+	EndpointSourceCustom
+)
+
+// EndpointNotFoundError is a sentinel error to indicate that the
+// EndpointResolver implementation was unable to resolve an endpoint for the
+// given service and region. Resolvers should use this to indicate that an API
+// client should fallback and attempt to use it's internal default resolver to
+// resolve the endpoint.
+type EndpointNotFoundError struct {
+	Err error
+}
+
+// Error is the error message.
+func (e *EndpointNotFoundError) Error() string {
+	return fmt.Sprintf("endpoint not found, %v", e.Err)
+}
+
+// Unwrap returns the underlying error.
+func (e *EndpointNotFoundError) Unwrap() error {
+	return e.Err
+}
+
+// EndpointResolver is an endpoint resolver that can be used to provide or
+// override an endpoint for the given service and region. API clients will
+// attempt to use the EndpointResolver first to resolve an endpoint if
+// available. If the EndpointResolver returns an EndpointNotFoundError error,
+// API clients will fallback to attempting to resolve the endpoint using its
+// internal default endpoint resolver.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. The API
+// for endpoint resolution is now unique to each service and is set via the
+// EndpointResolverV2 field on service client options. Setting a value for
+// EndpointResolver on aws.Config or service client options will prevent you
+// from using any endpoint-related service features released after the
+// introduction of EndpointResolverV2. You may also encounter broken or
+// unexpected behavior when using the old global interface with services that
+// use many endpoint-related customizations such as S3.
+type EndpointResolver interface {
+	ResolveEndpoint(service, region string) (Endpoint, error)
+}
+
+// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
+type EndpointResolverFunc func(service, region string) (Endpoint, error)
+
+// ResolveEndpoint calls the wrapped function and returns the results.
+func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) {
+	return e(service, region)
+}
+
+// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or
+// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will
+// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if
+// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error,
+// API clients will fallback to attempting to resolve the endpoint using its
+// internal default endpoint resolver.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
+type EndpointResolverWithOptions interface {
+	ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error)
+}
+
+// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
+type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error)
+
+// ResolveEndpoint calls the wrapped function and returns the results.
+func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) {
+	return e(service, region, options...)
+}
+
+// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value.
+// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS.
+func GetDisableHTTPS(options ...interface{}) (value bool, found bool) {
+	type iface interface {
+		GetDisableHTTPS() bool
+	}
+	for _, option := range options {
+		if i, ok := option.(iface); ok {
+			value = i.GetDisableHTTPS()
+			found = true
+			break
+		}
+	}
+	return value, found
+}
+
+// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value.
+// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion.
+func GetResolvedRegion(options ...interface{}) (value string, found bool) {
+	type iface interface {
+		GetResolvedRegion() string
+	}
+	for _, option := range options {
+		if i, ok := option.(iface); ok {
+			value = i.GetResolvedRegion()
+			found = true
+			break
+		}
+	}
+	return value, found
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,9 @@
+package aws
+
+// MissingRegionError is an error that is returned if region configuration
+// value was not found.
+type MissingRegionError struct{}
+
+func (*MissingRegionError) Error() string {
+	return "an AWS region is required, but was not found"
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,365 @@
+// Code generated by aws/generate.go DO NOT EDIT.
+
+package aws
+
+import (
+	"github.com/aws/smithy-go/ptr"
+	"time"
+)
+
+// ToBool returns bool value dereferenced if the passed
+// in pointer was not nil. Returns a bool zero value if the
+// pointer was nil.
+func ToBool(p *bool) (v bool) {
+	return ptr.ToBool(p)
+}
+
+// ToBoolSlice returns a slice of bool values, that are
+// dereferenced if the passed in pointer was not nil. Returns a bool
+// zero value if the pointer was nil.
+func ToBoolSlice(vs []*bool) []bool {
+	return ptr.ToBoolSlice(vs)
+}
+
+// ToBoolMap returns a map of bool values, that are
+// dereferenced if the passed in pointer was not nil. The bool
+// zero value is used if the pointer was nil.
+func ToBoolMap(vs map[string]*bool) map[string]bool {
+	return ptr.ToBoolMap(vs)
+}
+
+// ToByte returns byte value dereferenced if the passed
+// in pointer was not nil. Returns a byte zero value if the
+// pointer was nil.
+func ToByte(p *byte) (v byte) {
+	return ptr.ToByte(p)
+}
+
+// ToByteSlice returns a slice of byte values, that are
+// dereferenced if the passed in pointer was not nil. Returns a byte
+// zero value if the pointer was nil.
+func ToByteSlice(vs []*byte) []byte {
+	return ptr.ToByteSlice(vs)
+}
+
+// ToByteMap returns a map of byte values, that are
+// dereferenced if the passed in pointer was not nil. The byte
+// zero value is used if the pointer was nil.
+func ToByteMap(vs map[string]*byte) map[string]byte {
+	return ptr.ToByteMap(vs)
+}
+
+// ToString returns string value dereferenced if the passed
+// in pointer was not nil. Returns a string zero value if the
+// pointer was nil.
+func ToString(p *string) (v string) {
+	return ptr.ToString(p)
+}
+
+// ToStringSlice returns a slice of string values, that are
+// dereferenced if the passed in pointer was not nil. Returns a string
+// zero value if the pointer was nil.
+func ToStringSlice(vs []*string) []string {
+	return ptr.ToStringSlice(vs)
+}
+
+// ToStringMap returns a map of string values, that are
+// dereferenced if the passed in pointer was not nil. The string
+// zero value is used if the pointer was nil.
+func ToStringMap(vs map[string]*string) map[string]string {
+	return ptr.ToStringMap(vs)
+}
+
+// ToInt returns int value dereferenced if the passed
+// in pointer was not nil. Returns a int zero value if the
+// pointer was nil.
+func ToInt(p *int) (v int) {
+	return ptr.ToInt(p)
+}
+
+// ToIntSlice returns a slice of int values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int
+// zero value if the pointer was nil.
+func ToIntSlice(vs []*int) []int {
+	return ptr.ToIntSlice(vs)
+}
+
+// ToIntMap returns a map of int values, that are
+// dereferenced if the passed in pointer was not nil. The int
+// zero value is used if the pointer was nil.
+func ToIntMap(vs map[string]*int) map[string]int {
+	return ptr.ToIntMap(vs)
+}
+
+// ToInt8 returns int8 value dereferenced if the passed
+// in pointer was not nil. Returns a int8 zero value if the
+// pointer was nil.
+func ToInt8(p *int8) (v int8) {
+	return ptr.ToInt8(p)
+}
+
+// ToInt8Slice returns a slice of int8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int8
+// zero value if the pointer was nil.
+func ToInt8Slice(vs []*int8) []int8 {
+	return ptr.ToInt8Slice(vs)
+}
+
+// ToInt8Map returns a map of int8 values, that are
+// dereferenced if the passed in pointer was not nil. The int8
+// zero value is used if the pointer was nil.
+func ToInt8Map(vs map[string]*int8) map[string]int8 {
+	return ptr.ToInt8Map(vs)
+}
+
+// ToInt16 returns int16 value dereferenced if the passed
+// in pointer was not nil. Returns a int16 zero value if the
+// pointer was nil.
+func ToInt16(p *int16) (v int16) {
+	return ptr.ToInt16(p)
+}
+
+// ToInt16Slice returns a slice of int16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int16
+// zero value if the pointer was nil.
+func ToInt16Slice(vs []*int16) []int16 {
+	return ptr.ToInt16Slice(vs)
+}
+
+// ToInt16Map returns a map of int16 values, that are
+// dereferenced if the passed in pointer was not nil. The int16
+// zero value is used if the pointer was nil.
+func ToInt16Map(vs map[string]*int16) map[string]int16 {
+	return ptr.ToInt16Map(vs)
+}
+
+// ToInt32 returns int32 value dereferenced if the passed
+// in pointer was not nil. Returns a int32 zero value if the
+// pointer was nil.
+func ToInt32(p *int32) (v int32) {
+	return ptr.ToInt32(p)
+}
+
+// ToInt32Slice returns a slice of int32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int32
+// zero value if the pointer was nil.
+func ToInt32Slice(vs []*int32) []int32 {
+	return ptr.ToInt32Slice(vs)
+}
+
+// ToInt32Map returns a map of int32 values, that are
+// dereferenced if the passed in pointer was not nil. The int32
+// zero value is used if the pointer was nil.
+func ToInt32Map(vs map[string]*int32) map[string]int32 {
+	return ptr.ToInt32Map(vs)
+}
+
+// ToInt64 returns int64 value dereferenced if the passed
+// in pointer was not nil. Returns a int64 zero value if the
+// pointer was nil.
+func ToInt64(p *int64) (v int64) {
+	return ptr.ToInt64(p)
+}
+
+// ToInt64Slice returns a slice of int64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int64
+// zero value if the pointer was nil.
+func ToInt64Slice(vs []*int64) []int64 {
+	return ptr.ToInt64Slice(vs)
+}
+
+// ToInt64Map returns a map of int64 values, that are
+// dereferenced if the passed in pointer was not nil. The int64
+// zero value is used if the pointer was nil.
+func ToInt64Map(vs map[string]*int64) map[string]int64 {
+	return ptr.ToInt64Map(vs)
+}
+
+// ToUint returns uint value dereferenced if the passed
+// in pointer was not nil. Returns a uint zero value if the
+// pointer was nil.
+func ToUint(p *uint) (v uint) {
+	return ptr.ToUint(p)
+}
+
+// ToUintSlice returns a slice of uint values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint
+// zero value if the pointer was nil.
+func ToUintSlice(vs []*uint) []uint {
+	return ptr.ToUintSlice(vs)
+}
+
+// ToUintMap returns a map of uint values, that are
+// dereferenced if the passed in pointer was not nil. The uint
+// zero value is used if the pointer was nil.
+func ToUintMap(vs map[string]*uint) map[string]uint {
+	return ptr.ToUintMap(vs)
+}
+
+// ToUint8 returns uint8 value dereferenced if the passed
+// in pointer was not nil. Returns a uint8 zero value if the
+// pointer was nil.
+func ToUint8(p *uint8) (v uint8) {
+	return ptr.ToUint8(p)
+}
+
+// ToUint8Slice returns a slice of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint8
+// zero value if the pointer was nil.
+func ToUint8Slice(vs []*uint8) []uint8 {
+	return ptr.ToUint8Slice(vs)
+}
+
+// ToUint8Map returns a map of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. The uint8
+// zero value is used if the pointer was nil.
+func ToUint8Map(vs map[string]*uint8) map[string]uint8 {
+	return ptr.ToUint8Map(vs)
+}
+
+// ToUint16 returns uint16 value dereferenced if the passed
+// in pointer was not nil. Returns a uint16 zero value if the
+// pointer was nil.
+func ToUint16(p *uint16) (v uint16) {
+	return ptr.ToUint16(p)
+}
+
+// ToUint16Slice returns a slice of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint16
+// zero value if the pointer was nil.
+func ToUint16Slice(vs []*uint16) []uint16 {
+	return ptr.ToUint16Slice(vs)
+}
+
+// ToUint16Map returns a map of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. The uint16
+// zero value is used if the pointer was nil.
+func ToUint16Map(vs map[string]*uint16) map[string]uint16 {
+	return ptr.ToUint16Map(vs)
+}
+
+// ToUint32 returns uint32 value dereferenced if the passed
+// in pointer was not nil. Returns a uint32 zero value if the
+// pointer was nil.
+func ToUint32(p *uint32) (v uint32) {
+	return ptr.ToUint32(p)
+}
+
+// ToUint32Slice returns a slice of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint32
+// zero value if the pointer was nil.
+func ToUint32Slice(vs []*uint32) []uint32 {
+	return ptr.ToUint32Slice(vs)
+}
+
+// ToUint32Map returns a map of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. The uint32
+// zero value is used if the pointer was nil.
+func ToUint32Map(vs map[string]*uint32) map[string]uint32 {
+	return ptr.ToUint32Map(vs)
+}
+
+// ToUint64 returns uint64 value dereferenced if the passed
+// in pointer was not nil. Returns a uint64 zero value if the
+// pointer was nil.
+func ToUint64(p *uint64) (v uint64) {
+	return ptr.ToUint64(p)
+}
+
+// ToUint64Slice returns a slice of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint64
+// zero value if the pointer was nil.
+func ToUint64Slice(vs []*uint64) []uint64 {
+	return ptr.ToUint64Slice(vs)
+}
+
+// ToUint64Map returns a map of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. The uint64
+// zero value is used if the pointer was nil.
+func ToUint64Map(vs map[string]*uint64) map[string]uint64 {
+	return ptr.ToUint64Map(vs)
+}
+
+// ToFloat32 returns float32 value dereferenced if the passed
+// in pointer was not nil. Returns a float32 zero value if the
+// pointer was nil.
+func ToFloat32(p *float32) (v float32) {
+	return ptr.ToFloat32(p)
+}
+
+// ToFloat32Slice returns a slice of float32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float32
+// zero value if the pointer was nil.
+func ToFloat32Slice(vs []*float32) []float32 {
+	return ptr.ToFloat32Slice(vs)
+}
+
+// ToFloat32Map returns a map of float32 values, that are
+// dereferenced if the passed in pointer was not nil. The float32
+// zero value is used if the pointer was nil.
+func ToFloat32Map(vs map[string]*float32) map[string]float32 {
+	return ptr.ToFloat32Map(vs)
+}
+
+// ToFloat64 returns float64 value dereferenced if the passed
+// in pointer was not nil. Returns a float64 zero value if the
+// pointer was nil.
+func ToFloat64(p *float64) (v float64) {
+	return ptr.ToFloat64(p)
+}
+
+// ToFloat64Slice returns a slice of float64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float64
+// zero value if the pointer was nil.
+func ToFloat64Slice(vs []*float64) []float64 {
+	return ptr.ToFloat64Slice(vs)
+}
+
+// ToFloat64Map returns a map of float64 values, that are
+// dereferenced if the passed in pointer was not nil. The float64
+// zero value is used if the pointer was nil.
+func ToFloat64Map(vs map[string]*float64) map[string]float64 {
+	return ptr.ToFloat64Map(vs)
+}
+
+// ToTime returns time.Time value dereferenced if the passed
+// in pointer was not nil. Returns a time.Time zero value if the
+// pointer was nil.
+func ToTime(p *time.Time) (v time.Time) {
+	return ptr.ToTime(p)
+}
+
+// ToTimeSlice returns a slice of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Time
+// zero value if the pointer was nil.
+func ToTimeSlice(vs []*time.Time) []time.Time {
+	return ptr.ToTimeSlice(vs)
+}
+
+// ToTimeMap returns a map of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. The time.Time
+// zero value is used if the pointer was nil.
+func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
+	return ptr.ToTimeMap(vs)
+}
+
+// ToDuration returns time.Duration value dereferenced if the passed
+// in pointer was not nil. Returns a time.Duration zero value if the
+// pointer was nil.
+func ToDuration(p *time.Duration) (v time.Duration) {
+	return ptr.ToDuration(p)
+}
+
+// ToDurationSlice returns a slice of time.Duration values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Duration
+// zero value if the pointer was nil.
+func ToDurationSlice(vs []*time.Duration) []time.Duration {
+	return ptr.ToDurationSlice(vs)
+}
+
+// ToDurationMap returns a map of time.Duration values, that are
+// dereferenced if the passed in pointer was not nil. The time.Duration
+// zero value is used if the pointer was nil.
+func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration {
+	return ptr.ToDurationMap(vs)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package aws
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.30.3"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,119 @@
+// Code generated by aws/logging_generate.go DO NOT EDIT.
+
+package aws
+
+// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where
+// each bit is a flag that describes the logging behavior for one or more client components.
+// The entire 64-bit group is reserved for later expansion by the SDK.
+//
+// Example: Setting ClientLogMode to enable logging of retries and requests
+//
+//	clientLogMode := aws.LogRetries | aws.LogRequest
+//
+// Example: Adding an additional log mode to an existing ClientLogMode value
+//
+//	clientLogMode |= aws.LogResponse
+type ClientLogMode uint64
+
+// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.
+const (
+	LogSigning ClientLogMode = 1 << (64 - 1 - iota)
+	LogRetries
+	LogRequest
+	LogRequestWithBody
+	LogResponse
+	LogResponseWithBody
+	LogDeprecatedUsage
+	LogRequestEventMessage
+	LogResponseEventMessage
+)
+
+// IsSigning returns whether the Signing logging mode bit is set
+func (m ClientLogMode) IsSigning() bool {
+	return m&LogSigning != 0
+}
+
+// IsRetries returns whether the Retries logging mode bit is set
+func (m ClientLogMode) IsRetries() bool {
+	return m&LogRetries != 0
+}
+
+// IsRequest returns whether the Request logging mode bit is set
+func (m ClientLogMode) IsRequest() bool {
+	return m&LogRequest != 0
+}
+
+// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set
+func (m ClientLogMode) IsRequestWithBody() bool {
+	return m&LogRequestWithBody != 0
+}
+
+// IsResponse returns whether the Response logging mode bit is set
+func (m ClientLogMode) IsResponse() bool {
+	return m&LogResponse != 0
+}
+
+// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set
+func (m ClientLogMode) IsResponseWithBody() bool {
+	return m&LogResponseWithBody != 0
+}
+
+// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set
+func (m ClientLogMode) IsDeprecatedUsage() bool {
+	return m&LogDeprecatedUsage != 0
+}
+
+// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set
+func (m ClientLogMode) IsRequestEventMessage() bool {
+	return m&LogRequestEventMessage != 0
+}
+
+// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set
+func (m ClientLogMode) IsResponseEventMessage() bool {
+	return m&LogResponseEventMessage != 0
+}
+
+// ClearSigning clears the Signing logging mode bit
+func (m *ClientLogMode) ClearSigning() {
+	*m &^= LogSigning
+}
+
+// ClearRetries clears the Retries logging mode bit
+func (m *ClientLogMode) ClearRetries() {
+	*m &^= LogRetries
+}
+
+// ClearRequest clears the Request logging mode bit
+func (m *ClientLogMode) ClearRequest() {
+	*m &^= LogRequest
+}
+
+// ClearRequestWithBody clears the RequestWithBody logging mode bit
+func (m *ClientLogMode) ClearRequestWithBody() {
+	*m &^= LogRequestWithBody
+}
+
+// ClearResponse clears the Response logging mode bit
+func (m *ClientLogMode) ClearResponse() {
+	*m &^= LogResponse
+}
+
+// ClearResponseWithBody clears the ResponseWithBody logging mode bit
+func (m *ClientLogMode) ClearResponseWithBody() {
+	*m &^= LogResponseWithBody
+}
+
+// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit
+func (m *ClientLogMode) ClearDeprecatedUsage() {
+	*m &^= LogDeprecatedUsage
+}
+
+// ClearRequestEventMessage clears the RequestEventMessage logging mode bit
+func (m *ClientLogMode) ClearRequestEventMessage() {
+	*m &^= LogRequestEventMessage
+}
+
+// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit
+func (m *ClientLogMode) ClearResponseEventMessage() {
+	*m &^= LogResponseEventMessage
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,95 @@
+//go:build clientlogmode
+// +build clientlogmode
+
+package main
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"strings"
+	"text/template"
+)
+
+var config = struct {
+	ModeBits []string
+}{
+	// Items should be appended only to keep bit-flag positions stable
+	ModeBits: []string{
+		"Signing",
+		"Retries",
+		"Request",
+		"RequestWithBody",
+		"Response",
+		"ResponseWithBody",
+		"DeprecatedUsage",
+		"RequestEventMessage",
+		"ResponseEventMessage",
+	},
+}
+
+func bitName(name string) string {
+	return strings.ToUpper(name[:1]) + name[1:]
+}
+
+var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{
+	"symbolName": func(name string) string {
+		return "Log" + bitName(name)
+	},
+	"bitName": bitName,
+}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT.
+
+package aws
+
+// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where
+// each bit is a flag that describes the logging behavior for one or more client components.
+// The entire 64-bit group is reserved for later expansion by the SDK.
+//
+// Example: Setting ClientLogMode to enable logging of retries and requests
+//  clientLogMode := aws.LogRetries | aws.LogRequest
+//
+// Example: Adding an additional log mode to an existing ClientLogMode value
+//  clientLogMode |= aws.LogResponse
+type ClientLogMode uint64
+
+// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.
+const (
+{{- range $index, $field := .ModeBits }}
+	{{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }}
+{{- end }}
+)
+{{ range $_, $field := .ModeBits }}
+// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set
+func (m ClientLogMode) Is{{- bitName $field }}() bool {
+	return m&{{- (symbolName $field) }} != 0
+}
+{{ end }}
+{{- range $_, $field := .ModeBits }}
+// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit
+func (m *ClientLogMode) Clear{{- bitName $field }}() {
+	*m &^= {{ (symbolName $field) }}
+}
+{{ end -}}
+`))
+
+func main() {
+	uniqueBitFields := make(map[string]struct{})
+
+	for _, bitName := range config.ModeBits {
+		if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok {
+			panic(fmt.Sprintf("duplicate bit field: %s", bitName))
+		}
+		uniqueBitFields[bitName] = struct{}{}
+	}
+
+	file, err := os.Create("logging.go")
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer file.Close()
+
+	err = tmpl.Execute(file, config)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,213 @@
+package middleware
+
+import (
+	"context"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+// RegisterServiceMetadata registers metadata about the service and operation into the middleware context
+// so that it is available at runtime for other middleware to introspect.
+type RegisterServiceMetadata struct {
+	ServiceID     string
+	SigningName   string
+	Region        string
+	OperationName string
+}
+
+// ID returns the middleware identifier.
+func (s *RegisterServiceMetadata) ID() string {
+	return "RegisterServiceMetadata"
+}
+
+// HandleInitialize registers service metadata information into the middleware context, allowing for introspection.
+func (s RegisterServiceMetadata) HandleInitialize(
+	ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) {
+	if len(s.ServiceID) > 0 {
+		ctx = SetServiceID(ctx, s.ServiceID)
+	}
+	if len(s.SigningName) > 0 {
+		ctx = SetSigningName(ctx, s.SigningName)
+	}
+	if len(s.Region) > 0 {
+		ctx = setRegion(ctx, s.Region)
+	}
+	if len(s.OperationName) > 0 {
+		ctx = setOperationName(ctx, s.OperationName)
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+// service metadata keys for storing and lookup of runtime stack information.
+type (
+	serviceIDKey               struct{}
+	signingNameKey             struct{}
+	signingRegionKey           struct{}
+	regionKey                  struct{}
+	operationNameKey           struct{}
+	partitionIDKey             struct{}
+	requiresLegacyEndpointsKey struct{}
+)
+
+// GetServiceID retrieves the service id from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetServiceID(ctx context.Context) (v string) {
+	v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string)
+	return v
+}
+
+// GetSigningName retrieves the service signing name from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+//
+// Deprecated: This value is unstable. The resolved signing name is available
+// in the signer properties object passed to the signer.
+func GetSigningName(ctx context.Context) (v string) {
+	v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string)
+	return v
+}
+
+// GetSigningRegion retrieves the region from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+//
+// Deprecated: This value is unstable. The resolved signing region is available
+// in the signer properties object passed to the signer.
+func GetSigningRegion(ctx context.Context) (v string) {
+	v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string)
+	return v
+}
+
+// GetRegion retrieves the endpoint region from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetRegion(ctx context.Context) (v string) {
+	v, _ = middleware.GetStackValue(ctx, regionKey{}).(string)
+	return v
+}
+
+// GetOperationName retrieves the service operation metadata from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetOperationName(ctx context.Context) (v string) {
+	v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string)
+	return v
+}
+
+// GetPartitionID retrieves the endpoint partition id from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetPartitionID(ctx context.Context) string {
+	v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string)
+	return v
+}
+
+// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint
+// customizations need to be executed.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetRequiresLegacyEndpoints(ctx context.Context) bool {
+	v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool)
+	return v
+}
+
+// SetRequiresLegacyEndpoints set or modifies the flag indicated that
+// legacy endpoint customizations are needed.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context {
+	return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value)
+}
+
+// SetSigningName set or modifies the sigv4 or sigv4a signing name on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+//
+// Deprecated: This value is unstable. Use WithSigV4SigningName client option
+// funcs instead.
+func SetSigningName(ctx context.Context, value string) context.Context {
+	return middleware.WithStackValue(ctx, signingNameKey{}, value)
+}
+
+// SetSigningRegion sets or modifies the region on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+//
+// Deprecated: This value is unstable. Use WithSigV4SigningRegion client option
+// funcs instead.
+func SetSigningRegion(ctx context.Context, value string) context.Context {
+	return middleware.WithStackValue(ctx, signingRegionKey{}, value)
+}
+
+// SetServiceID sets the service id on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetServiceID(ctx context.Context, value string) context.Context {
+	return middleware.WithStackValue(ctx, serviceIDKey{}, value)
+}
+
+// setRegion sets the endpoint region on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setRegion(ctx context.Context, value string) context.Context {
+	return middleware.WithStackValue(ctx, regionKey{}, value)
+}
+
+// setOperationName sets the service operation on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setOperationName(ctx context.Context, value string) context.Context {
+	return middleware.WithStackValue(ctx, operationNameKey{}, value)
+}
+
+// SetPartitionID sets the partition id of a resolved region on the context
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetPartitionID(ctx context.Context, value string) context.Context {
+	return middleware.WithStackValue(ctx, partitionIDKey{}, value)
+}
+
+// EndpointSource key
+type endpointSourceKey struct{}
+
+// GetEndpointSource returns an endpoint source if set on context
+func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) {
+	v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource)
+	return v
+}
+
+// SetEndpointSource sets endpoint source on context
+func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context {
+	return middleware.WithStackValue(ctx, endpointSourceKey{}, value)
+}
+
+type signingCredentialsKey struct{}
+
+// GetSigningCredentials returns the credentials that were used for signing if set on context.
+func GetSigningCredentials(ctx context.Context) (v aws.Credentials) {
+	v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials)
+	return v
+}
+
+// SetSigningCredentials sets the credentails used for signing on the context.
+func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context {
+	return middleware.WithStackValue(ctx, signingCredentialsKey{}, value)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,168 @@
+package middleware
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/internal/rand"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	smithyrand "github.com/aws/smithy-go/rand"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation
+// invocation.
+type ClientRequestID struct{}
+
+// ID the identifier for the ClientRequestID
+func (r *ClientRequestID) ID() string {
+	return "ClientRequestID"
+}
+
+// HandleBuild attaches a unique operation invocation id for the operation to the request
+func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", req)
+	}
+
+	invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID()
+	if err != nil {
+		return out, metadata, err
+	}
+
+	const invocationIDHeader = "Amz-Sdk-Invocation-Id"
+	req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID)
+
+	return next.HandleBuild(ctx, in)
+}
+
+// RecordResponseTiming records the response timing for the SDK client requests.
+type RecordResponseTiming struct{}
+
+// ID is the middleware identifier
+func (a *RecordResponseTiming) ID() string {
+	return "RecordResponseTiming"
+}
+
+// HandleDeserialize calculates response metadata and clock skew
+func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	responseAt := sdk.NowTime()
+	setResponseAt(&metadata, responseAt)
+
+	var serverTime time.Time
+
+	switch resp := out.RawResponse.(type) {
+	case *smithyhttp.Response:
+		respDateHeader := resp.Header.Get("Date")
+		if len(respDateHeader) == 0 {
+			break
+		}
+		var parseErr error
+		serverTime, parseErr = smithyhttp.ParseTime(respDateHeader)
+		if parseErr != nil {
+			logger := middleware.GetLogger(ctx)
+			logger.Logf(logging.Warn, "failed to parse response Date header value, got %v",
+				parseErr.Error())
+			break
+		}
+		setServerTime(&metadata, serverTime)
+	}
+
+	if !serverTime.IsZero() {
+		attemptSkew := serverTime.Sub(responseAt)
+		setAttemptSkew(&metadata, attemptSkew)
+	}
+
+	return out, metadata, err
+}
+
+type responseAtKey struct{}
+
+// GetResponseAt returns the time response was received at.
+func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) {
+	v, ok = metadata.Get(responseAtKey{}).(time.Time)
+	return v, ok
+}
+
+// setResponseAt sets the response time on the metadata.
+func setResponseAt(metadata *middleware.Metadata, v time.Time) {
+	metadata.Set(responseAtKey{}, v)
+}
+
+type serverTimeKey struct{}
+
+// GetServerTime returns the server time for response.
+func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) {
+	v, ok = metadata.Get(serverTimeKey{}).(time.Time)
+	return v, ok
+}
+
+// setServerTime sets the server time on the metadata.
+func setServerTime(metadata *middleware.Metadata, v time.Time) {
+	metadata.Set(serverTimeKey{}, v)
+}
+
+type attemptSkewKey struct{}
+
+// GetAttemptSkew returns Attempt clock skew for response from metadata.
+func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) {
+	v, ok = metadata.Get(attemptSkewKey{}).(time.Duration)
+	return v, ok
+}
+
+// setAttemptSkew sets the attempt clock skew on the metadata.
+func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) {
+	metadata.Set(attemptSkewKey{}, v)
+}
+
+// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack
+func AddClientRequestIDMiddleware(stack *middleware.Stack) error {
+	return stack.Build.Add(&ClientRequestID{}, middleware.After)
+}
+
+// AddRecordResponseTiming adds RecordResponseTiming middleware to the
+// middleware stack.
+func AddRecordResponseTiming(stack *middleware.Stack) error {
+	return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After)
+}
+
+// rawResponseKey is the accessor key used to store and access the
+// raw response within the response metadata.
+type rawResponseKey struct{}
+
+// AddRawResponse middleware adds raw response on to the metadata
+type AddRawResponse struct{}
+
+// ID the identifier for the ClientRequestID
+func (m *AddRawResponse) ID() string {
+	return "AddRawResponseToMetadata"
+}
+
+// HandleDeserialize adds raw response on the middleware metadata
+func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	metadata.Set(rawResponseKey{}, out.RawResponse)
+	return out, metadata, err
+}
+
+// AddRawResponseToMetadata adds middleware to the middleware stack that
+// store raw response on to the metadata.
+func AddRawResponseToMetadata(stack *middleware.Stack) error {
+	return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before)
+}
+
+// GetRawResponse returns raw response set on metadata
+func GetRawResponse(metadata middleware.Metadata) interface{} {
+	return metadata.Get(rawResponseKey{})
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,24 @@
+//go:build go1.16
+// +build go1.16
+
+package middleware
+
+import "runtime"
+
+func getNormalizedOSName() (os string) {
+	switch runtime.GOOS {
+	case "android":
+		os = "android"
+	case "linux":
+		os = "linux"
+	case "windows":
+		os = "windows"
+	case "darwin":
+		os = "macos"
+	case "ios":
+		os = "ios"
+	default:
+		os = "other"
+	}
+	return os
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,24 @@
+//go:build !go1.16
+// +build !go1.16
+
+package middleware
+
+import "runtime"
+
+func getNormalizedOSName() (os string) {
+	switch runtime.GOOS {
+	case "android":
+		os = "android"
+	case "linux":
+		os = "linux"
+	case "windows":
+		os = "windows"
+	case "darwin":
+		// Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64
+		// For now declare this as "other" until we have a better detection mechanism.
+		fallthrough
+	default:
+		os = "other"
+	}
+	return os
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,320 @@
+// Package metrics implements metrics gathering for SDK development purposes.
+//
+// This package is designated as private and is intended for use only by the
+// AWS client runtime. The exported API therein is not considered stable and
+// is subject to breaking changes without notice.
+package metrics
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+const (
+	// ServiceIDKey is the key for the service ID metric.
+	ServiceIDKey = "ServiceId"
+	// OperationNameKey is the key for the operation name metric.
+	OperationNameKey = "OperationName"
+	// ClientRequestIDKey is the key for the client request ID metric.
+	ClientRequestIDKey = "ClientRequestId"
+	// APICallDurationKey is the key for the API call duration metric.
+	APICallDurationKey = "ApiCallDuration"
+	// APICallSuccessfulKey is the key for the API call successful metric.
+	APICallSuccessfulKey = "ApiCallSuccessful"
+	// MarshallingDurationKey is the key for the marshalling duration metric.
+	MarshallingDurationKey = "MarshallingDuration"
+	// InThroughputKey is the key for the input throughput metric.
+	InThroughputKey = "InThroughput"
+	// OutThroughputKey is the key for the output throughput metric.
+	OutThroughputKey = "OutThroughput"
+	// RetryCountKey is the key for the retry count metric.
+	RetryCountKey = "RetryCount"
+	// HTTPStatusCodeKey is the key for the HTTP status code metric.
+	HTTPStatusCodeKey = "HttpStatusCode"
+	// AWSExtendedRequestIDKey is the key for the AWS extended request ID metric.
+	AWSExtendedRequestIDKey = "AwsExtendedRequestId"
+	// AWSRequestIDKey is the key for the AWS request ID metric.
+	AWSRequestIDKey = "AwsRequestId"
+	// BackoffDelayDurationKey is the key for the backoff delay duration metric.
+	BackoffDelayDurationKey = "BackoffDelayDuration"
+	// StreamThroughputKey is the key for the stream throughput metric.
+	StreamThroughputKey = "Throughput"
+	// ConcurrencyAcquireDurationKey is the key for the concurrency acquire duration metric.
+	ConcurrencyAcquireDurationKey = "ConcurrencyAcquireDuration"
+	// PendingConcurrencyAcquiresKey is the key for the pending concurrency acquires metric.
+	PendingConcurrencyAcquiresKey = "PendingConcurrencyAcquires"
+	// SigningDurationKey is the key for the signing duration metric.
+	SigningDurationKey = "SigningDuration"
+	// UnmarshallingDurationKey is the key for the unmarshalling duration metric.
+	UnmarshallingDurationKey = "UnmarshallingDuration"
+	// TimeToFirstByteKey is the key for the time to first byte metric.
+	TimeToFirstByteKey = "TimeToFirstByte"
+	// ServiceCallDurationKey is the key for the service call duration metric.
+	ServiceCallDurationKey = "ServiceCallDuration"
+	// EndpointResolutionDurationKey is the key for the endpoint resolution duration metric.
+	EndpointResolutionDurationKey = "EndpointResolutionDuration"
+	// AttemptNumberKey is the key for the attempt number metric.
+	AttemptNumberKey = "AttemptNumber"
+	// MaxConcurrencyKey is the key for the max concurrency metric.
+	MaxConcurrencyKey = "MaxConcurrency"
+	// AvailableConcurrencyKey is the key for the available concurrency metric.
+	AvailableConcurrencyKey = "AvailableConcurrency"
+)
+
+// MetricPublisher provides the interface to provide custom MetricPublishers.
+// PostRequestMetrics will be invoked by the MetricCollection middleware to post request.
+// PostStreamMetrics will be invoked by ReadCloserWithMetrics to post stream metrics.
+type MetricPublisher interface {
+	PostRequestMetrics(*MetricData) error
+	PostStreamMetrics(*MetricData) error
+}
+
+// Serializer provides the interface to provide custom Serializers.
+// Serialize will transform any input object in its corresponding string representation.
+type Serializer interface {
+	Serialize(obj interface{}) (string, error)
+}
+
+// DefaultSerializer is an implementation of the Serializer interface.
+type DefaultSerializer struct{}
+
+// Serialize uses the default JSON serializer to obtain the string representation of an object.
+func (DefaultSerializer) Serialize(obj interface{}) (string, error) {
+	bytes, err := json.Marshal(obj)
+	if err != nil {
+		return "", err
+	}
+	return string(bytes), nil
+}
+
+type metricContextKey struct{}
+
+// MetricContext contains fields to store metric-related information.
+type MetricContext struct {
+	connectionCounter *SharedConnectionCounter
+	publisher         MetricPublisher
+	data              *MetricData
+}
+
+// MetricData stores the collected metric data.
+type MetricData struct {
+	RequestStartTime           time.Time
+	RequestEndTime             time.Time
+	APICallDuration            time.Duration
+	SerializeStartTime         time.Time
+	SerializeEndTime           time.Time
+	MarshallingDuration        time.Duration
+	ResolveEndpointStartTime   time.Time
+	ResolveEndpointEndTime     time.Time
+	EndpointResolutionDuration time.Duration
+	GetIdentityStartTime       time.Time
+	GetIdentityEndTime         time.Time
+	InThroughput               float64
+	OutThroughput              float64
+	RetryCount                 int
+	Success                    uint8
+	StatusCode                 int
+	ClientRequestID            string
+	ServiceID                  string
+	OperationName              string
+	PartitionID                string
+	Region                     string
+	UserAgent                  string
+	RequestContentLength       int64
+	Stream                     StreamMetrics
+	Attempts                   []AttemptMetrics
+}
+
+// StreamMetrics stores metrics related to streaming data.
+type StreamMetrics struct {
+	ReadDuration time.Duration
+	ReadBytes    int64
+	Throughput   float64
+}
+
+// AttemptMetrics stores metrics related to individual attempts.
+type AttemptMetrics struct {
+	ServiceCallStart           time.Time
+	ServiceCallEnd             time.Time
+	ServiceCallDuration        time.Duration
+	FirstByteTime              time.Time
+	TimeToFirstByte            time.Duration
+	ConnRequestedTime          time.Time
+	ConnObtainedTime           time.Time
+	ConcurrencyAcquireDuration time.Duration
+	SignStartTime              time.Time
+	SignEndTime                time.Time
+	SigningDuration            time.Duration
+	DeserializeStartTime       time.Time
+	DeserializeEndTime         time.Time
+	UnMarshallingDuration      time.Duration
+	RetryDelay                 time.Duration
+	ResponseContentLength      int64
+	StatusCode                 int
+	RequestID                  string
+	ExtendedRequestID          string
+	HTTPClient                 string
+	MaxConcurrency             int
+	PendingConnectionAcquires  int
+	AvailableConcurrency       int
+	ActiveRequests             int
+	ReusedConnection           bool
+}
+
+// Data returns the MetricData associated with the MetricContext.
+func (mc *MetricContext) Data() *MetricData {
+	return mc.data
+}
+
+// ConnectionCounter returns the SharedConnectionCounter associated with the MetricContext.
+func (mc *MetricContext) ConnectionCounter() *SharedConnectionCounter {
+	return mc.connectionCounter
+}
+
+// Publisher returns the MetricPublisher associated with the MetricContext.
+func (mc *MetricContext) Publisher() MetricPublisher {
+	return mc.publisher
+}
+
+// ComputeRequestMetrics calculates and populates derived metrics based on the collected data.
+func (md *MetricData) ComputeRequestMetrics() {
+
+	for idx := range md.Attempts {
+		attempt := &md.Attempts[idx]
+		attempt.ConcurrencyAcquireDuration = attempt.ConnObtainedTime.Sub(attempt.ConnRequestedTime)
+		attempt.SigningDuration = attempt.SignEndTime.Sub(attempt.SignStartTime)
+		attempt.UnMarshallingDuration = attempt.DeserializeEndTime.Sub(attempt.DeserializeStartTime)
+		attempt.TimeToFirstByte = attempt.FirstByteTime.Sub(attempt.ServiceCallStart)
+		attempt.ServiceCallDuration = attempt.ServiceCallEnd.Sub(attempt.ServiceCallStart)
+	}
+
+	md.APICallDuration = md.RequestEndTime.Sub(md.RequestStartTime)
+	md.MarshallingDuration = md.SerializeEndTime.Sub(md.SerializeStartTime)
+	md.EndpointResolutionDuration = md.ResolveEndpointEndTime.Sub(md.ResolveEndpointStartTime)
+
+	md.RetryCount = len(md.Attempts) - 1
+
+	latestAttempt, err := md.LatestAttempt()
+
+	if err != nil {
+		fmt.Printf("error retrieving attempts data due to: %s. Skipping Throughput metrics", err.Error())
+	} else {
+
+		md.StatusCode = latestAttempt.StatusCode
+
+		if md.Success == 1 {
+			if latestAttempt.ResponseContentLength > 0 && latestAttempt.ServiceCallDuration > 0 {
+				md.InThroughput = float64(latestAttempt.ResponseContentLength) / latestAttempt.ServiceCallDuration.Seconds()
+			}
+			if md.RequestContentLength > 0 && latestAttempt.ServiceCallDuration > 0 {
+				md.OutThroughput = float64(md.RequestContentLength) / latestAttempt.ServiceCallDuration.Seconds()
+			}
+		}
+	}
+}
+
+// LatestAttempt returns the latest attempt metrics.
+// It returns an error if no attempts are initialized.
+func (md *MetricData) LatestAttempt() (*AttemptMetrics, error) {
+	if md.Attempts == nil || len(md.Attempts) == 0 {
+		return nil, fmt.Errorf("no attempts initialized. NewAttempt() should be called first")
+	}
+	return &md.Attempts[len(md.Attempts)-1], nil
+}
+
+// NewAttempt initializes new attempt metrics.
+func (md *MetricData) NewAttempt() {
+	if md.Attempts == nil {
+		md.Attempts = []AttemptMetrics{}
+	}
+	md.Attempts = append(md.Attempts, AttemptMetrics{})
+}
+
+// SharedConnectionCounter is a counter shared across API calls.
+type SharedConnectionCounter struct {
+	mu sync.Mutex
+
+	activeRequests           int
+	pendingConnectionAcquire int
+}
+
+// ActiveRequests returns the count of active requests.
+func (cc *SharedConnectionCounter) ActiveRequests() int {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	return cc.activeRequests
+}
+
+// PendingConnectionAcquire returns the count of pending connection acquires.
+func (cc *SharedConnectionCounter) PendingConnectionAcquire() int {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	return cc.pendingConnectionAcquire
+}
+
+// AddActiveRequest increments the count of active requests.
+func (cc *SharedConnectionCounter) AddActiveRequest() {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	cc.activeRequests++
+}
+
+// RemoveActiveRequest decrements the count of active requests.
+func (cc *SharedConnectionCounter) RemoveActiveRequest() {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	cc.activeRequests--
+}
+
+// AddPendingConnectionAcquire increments the count of pending connection acquires.
+func (cc *SharedConnectionCounter) AddPendingConnectionAcquire() {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	cc.pendingConnectionAcquire++
+}
+
+// RemovePendingConnectionAcquire decrements the count of pending connection acquires.
+func (cc *SharedConnectionCounter) RemovePendingConnectionAcquire() {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+
+	cc.pendingConnectionAcquire--
+}
+
+// InitMetricContext initializes the metric context with the provided counter and publisher.
+// It returns the updated context.
+func InitMetricContext(
+	ctx context.Context, counter *SharedConnectionCounter, publisher MetricPublisher,
+) context.Context {
+	if middleware.GetStackValue(ctx, metricContextKey{}) == nil {
+		ctx = middleware.WithStackValue(ctx, metricContextKey{}, &MetricContext{
+			connectionCounter: counter,
+			publisher:         publisher,
+			data: &MetricData{
+				Attempts: []AttemptMetrics{},
+				Stream:   StreamMetrics{},
+			},
+		})
+	}
+	return ctx
+}
+
+// Context returns the metric context from the given context.
+// It returns nil if the metric context is not found.
+func Context(ctx context.Context) *MetricContext {
+	mctx := middleware.GetStackValue(ctx, metricContextKey{})
+	if mctx == nil {
+		return nil
+	}
+	return mctx.(*MetricContext)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,94 @@
+package middleware
+
+import (
+	"context"
+	"fmt"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"os"
+)
+
+const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME"
+const envAmznTraceID = "_X_AMZN_TRACE_ID"
+const amznTraceIDHeader = "X-Amzn-Trace-Id"
+
+// AddRecursionDetection adds recursionDetection to the middleware stack
+func AddRecursionDetection(stack *middleware.Stack) error {
+	return stack.Build.Add(&RecursionDetection{}, middleware.After)
+}
+
+// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent
+// to avoid recursion invocation in Lambda
+type RecursionDetection struct{}
+
+// ID returns the middleware identifier
+func (m *RecursionDetection) ID() string {
+	return "RecursionDetection"
+}
+
+// HandleBuild detects Lambda environment and adds its trace ID to request header if absent
+func (m *RecursionDetection) HandleBuild(
+	ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown request type %T", req)
+	}
+
+	_, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName)
+	xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID)
+	value := req.Header.Get(amznTraceIDHeader)
+	// only set the X-Amzn-Trace-Id header when it is not set initially, the
+	// current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists
+	if value != "" || !hasLambdaEnv || !hasTraceID {
+		return next.HandleBuild(ctx, in)
+	}
+
+	req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID))
+	return next.HandleBuild(ctx, in)
+}
+
+func percentEncode(s string) string {
+	upperhex := "0123456789ABCDEF"
+	hexCount := 0
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if shouldEncode(c) {
+			hexCount++
+		}
+	}
+
+	if hexCount == 0 {
+		return s
+	}
+
+	required := len(s) + 2*hexCount
+	t := make([]byte, required)
+	j := 0
+	for i := 0; i < len(s); i++ {
+		if c := s[i]; shouldEncode(c) {
+			t[j] = '%'
+			t[j+1] = upperhex[c>>4]
+			t[j+2] = upperhex[c&15]
+			j += 3
+		} else {
+			t[j] = c
+			j++
+		}
+	}
+	return string(t)
+}
+
+func shouldEncode(c byte) bool {
+	if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+		return false
+	}
+	switch c {
+	case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',':
+		return false
+	default:
+		return true
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,27 @@
+package middleware
+
+import (
+	"github.com/aws/smithy-go/middleware"
+)
+
+// requestIDKey is used to retrieve request id from response metadata
+type requestIDKey struct{}
+
+// SetRequestIDMetadata sets the provided request id over middleware metadata
+func SetRequestIDMetadata(metadata *middleware.Metadata, id string) {
+	metadata.Set(requestIDKey{}, id)
+}
+
+// GetRequestIDMetadata retrieves the request id from middleware metadata
+// returns string and bool indicating value of request id, whether request id was set.
+func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) {
+	if !metadata.Has(requestIDKey{}) {
+		return "", false
+	}
+
+	v, ok := metadata.Get(requestIDKey{}).(string)
+	if !ok {
+		return "", true
+	}
+	return v, true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,53 @@
+package middleware
+
+import (
+	"context"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddRequestIDRetrieverMiddleware adds request id retriever middleware
+func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+	// add error wrapper middleware before operation deserializers so that it can wrap the error response
+	// returned by operation deserializers
+	return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+}
+
+// RequestIDRetriever middleware captures the AWS service request ID from the
+// raw response.
+type RequestIDRetriever struct {
+}
+
+// ID returns the middleware identifier
+func (m *RequestIDRetriever) ID() string {
+	return "RequestIDRetriever"
+}
+
+// HandleDeserialize pulls the AWS request ID from the response, storing it in
+// operation metadata.
+func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+
+	resp, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		// No raw response to wrap with.
+		return out, metadata, err
+	}
+
+	// Different header which can map to request id
+	requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"}
+
+	for _, h := range requestIDHeaderList {
+		// check for headers known to contain Request id
+		if v := resp.Header.Get(h); len(v) != 0 {
+			// set reqID on metadata for successful responses.
+			SetRequestIDMetadata(&metadata, v)
+			break
+		}
+	}
+
+	return out, metadata, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,305 @@
+package middleware
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"runtime"
+	"sort"
+	"strings"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+var languageVersion = strings.TrimPrefix(runtime.Version(), "go")
+
+// SDKAgentKeyType is the metadata type to add to the SDK agent string
+type SDKAgentKeyType int
+
+// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will
+// be mapped to AdditionalMetadata.
+const (
+	_ SDKAgentKeyType = iota
+	APIMetadata
+	OperatingSystemMetadata
+	LanguageMetadata
+	EnvironmentMetadata
+	FeatureMetadata
+	ConfigMetadata
+	FrameworkMetadata
+	AdditionalMetadata
+	ApplicationIdentifier
+	FeatureMetadata2
+)
+
+func (k SDKAgentKeyType) string() string {
+	switch k {
+	case APIMetadata:
+		return "api"
+	case OperatingSystemMetadata:
+		return "os"
+	case LanguageMetadata:
+		return "lang"
+	case EnvironmentMetadata:
+		return "exec-env"
+	case FeatureMetadata:
+		return "ft"
+	case ConfigMetadata:
+		return "cfg"
+	case FrameworkMetadata:
+		return "lib"
+	case ApplicationIdentifier:
+		return "app"
+	case FeatureMetadata2:
+		return "m"
+	case AdditionalMetadata:
+		fallthrough
+	default:
+		return "md"
+	}
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+
+var validChars = map[rune]bool{
+	'!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true,
+	'-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true,
+}
+
+// UserAgentFeature enumerates tracked SDK features.
+type UserAgentFeature string
+
+// Enumerates UserAgentFeature.
+const (
+	UserAgentFeatureResourceModel          UserAgentFeature = "A" // n/a (we don't generate separate resource types)
+	UserAgentFeatureWaiter                                  = "B"
+	UserAgentFeaturePaginator                               = "C"
+	UserAgentFeatureRetryModeLegacy                         = "D" // n/a (equivalent to standard)
+	UserAgentFeatureRetryModeStandard                       = "E"
+	UserAgentFeatureRetryModeAdaptive                       = "F"
+	UserAgentFeatureS3Transfer                              = "G"
+	UserAgentFeatureS3CryptoV1N                             = "H" // n/a (crypto client is external)
+	UserAgentFeatureS3CryptoV2                              = "I" // n/a
+	UserAgentFeatureS3ExpressBucket                         = "J"
+	UserAgentFeatureS3AccessGrants                          = "K" // not yet implemented
+	UserAgentFeatureGZIPRequestCompression                  = "L"
+)
+
+// RequestUserAgent is a build middleware that set the User-Agent for the request.
+type RequestUserAgent struct {
+	sdkAgent, userAgent *smithyhttp.UserAgentBuilder
+	features            map[UserAgentFeature]struct{}
+}
+
+// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the
+// request.
+//
+// User-Agent example:
+//
+//	aws-sdk-go-v2/1.2.3
+//
+// X-Amz-User-Agent example:
+//
+//	aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15
+func NewRequestUserAgent() *RequestUserAgent {
+	userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder()
+	addProductName(userAgent)
+	addProductName(sdkAgent)
+
+	r := &RequestUserAgent{
+		sdkAgent:  sdkAgent,
+		userAgent: userAgent,
+		features:  map[UserAgentFeature]struct{}{},
+	}
+
+	addSDKMetadata(r)
+
+	return r
+}
+
+func addSDKMetadata(r *RequestUserAgent) {
+	r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName())
+	r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion)
+	r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS)
+	r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH)
+	if ev := os.Getenv(execEnvVar); len(ev) > 0 {
+		r.AddSDKAgentKey(EnvironmentMetadata, ev)
+	}
+}
+
+func addProductName(builder *smithyhttp.UserAgentBuilder) {
+	builder.AddKeyValue(aws.SDKName, aws.SDKVersion)
+}
+
+// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddUserAgentKey(key string) func(*middleware.Stack) error {
+	return func(stack *middleware.Stack) error {
+		requestUserAgent, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+		requestUserAgent.AddUserAgentKey(key)
+		return nil
+	}
+}
+
+// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error {
+	return func(stack *middleware.Stack) error {
+		requestUserAgent, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+		requestUserAgent.AddUserAgentKeyValue(key, value)
+		return nil
+	}
+}
+
+// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error {
+	return func(stack *middleware.Stack) error {
+		requestUserAgent, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+		requestUserAgent.AddSDKAgentKey(keyType, key)
+		return nil
+	}
+}
+
+// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error {
+	return func(stack *middleware.Stack) error {
+		requestUserAgent, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+		requestUserAgent.AddSDKAgentKeyValue(keyType, key, value)
+		return nil
+	}
+}
+
+// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present.
+func AddRequestUserAgentMiddleware(stack *middleware.Stack) error {
+	_, err := getOrAddRequestUserAgent(stack)
+	return err
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) {
+	id := (*RequestUserAgent)(nil).ID()
+	bm, ok := stack.Build.Get(id)
+	if !ok {
+		bm = NewRequestUserAgent()
+		err := stack.Build.Add(bm, middleware.After)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	requestUserAgent, ok := bm.(*RequestUserAgent)
+	if !ok {
+		return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id)
+	}
+
+	return requestUserAgent, nil
+}
+
+// AddUserAgentKey adds the component identified by name to the User-Agent string.
+func (u *RequestUserAgent) AddUserAgentKey(key string) {
+	u.userAgent.AddKey(strings.Map(rules, key))
+}
+
+// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
+func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) {
+	u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value))
+}
+
+// AddUserAgentFeature adds the feature ID to the tracking list to be emitted
+// in the final User-Agent string.
+func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) {
+	u.features[feature] = struct{}{}
+}
+
+// AddSDKAgentKey adds the component identified by name to the User-Agent string.
+func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) {
+	// TODO: should target sdkAgent
+	u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key))
+}
+
+// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
+func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) {
+	// TODO: should target sdkAgent
+	u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value))
+}
+
+// ID the name of the middleware.
+func (u *RequestUserAgent) ID() string {
+	return "UserAgent"
+}
+
+// HandleBuild adds or appends the constructed user agent to the request.
+func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	switch req := in.Request.(type) {
+	case *smithyhttp.Request:
+		u.addHTTPUserAgent(req)
+		// TODO: To be re-enabled
+		// u.addHTTPSDKAgent(req)
+	default:
+		return out, metadata, fmt.Errorf("unknown transport type %T", in)
+	}
+
+	return next.HandleBuild(ctx, in)
+}
+
+func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) {
+	const userAgent = "User-Agent"
+	updateHTTPHeader(request, userAgent, u.userAgent.Build())
+	if len(u.features) > 0 {
+		updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features))
+	}
+}
+
+func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) {
+	const sdkAgent = "X-Amz-User-Agent"
+	updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build())
+}
+
+func updateHTTPHeader(request *smithyhttp.Request, header string, value string) {
+	var current string
+	if v := request.Header[header]; len(v) > 0 {
+		current = v[0]
+	}
+	if len(current) > 0 {
+		current = value + " " + current
+	} else {
+		current = value
+	}
+	request.Header[header] = append(request.Header[header][:0], current)
+}
+
+func rules(r rune) rune {
+	switch {
+	case r >= '0' && r <= '9':
+		return r
+	case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z':
+		return r
+	case validChars[r]:
+		return r
+	default:
+		return '-'
+	}
+}
+
+func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string {
+	fs := make([]string, 0, len(features))
+	for f := range features {
+		fs = append(fs, string(f))
+	}
+
+	sort.Strings(fs)
+	return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ","))
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,72 @@
+package query
+
+import (
+	"fmt"
+	"net/url"
+)
+
+// Array represents the encoding of Query lists and sets. A Query array is a
+// representation of a list of values of a fixed type. A serialized array might
+// look like the following:
+//
+//	ListName.member.1=foo
+//	&ListName.member.2=bar
+//	&Listname.member.3=baz
+type Array struct {
+	// The query values to add the array to.
+	values url.Values
+	// The array's prefix, which includes the names of all parent structures
+	// and ends with the name of the list. For example, the prefix might be
+	// "ParentStructure.ListName". This prefix will be used to form the full
+	// keys for each element in the list. For example, an entry might have the
+	// key "ParentStructure.ListName.member.MemberName.1".
+	//
+	// While this is currently represented as a string that gets added to, it
+	// could also be represented as a stack that only gets condensed into a
+	// string when a finalized key is created. This could potentially reduce
+	// allocations.
+	prefix string
+	// Whether the list is flat or not. A list that is not flat will produce the
+	// following entry to the url.Values for a given entry:
+	//     ListName.MemberName.1=value
+	// A list that is flat will produce the following:
+	//     ListName.1=value
+	flat bool
+	// The location name of the member. In most cases this should be "member".
+	memberName string
+	// Elements are stored in values, so we keep track of the list size here.
+	size int32
+	// Empty lists are encoded as "<prefix>=", if we add a value later we will
+	// remove this encoding
+	emptyValue Value
+}
+
+func newArray(values url.Values, prefix string, flat bool, memberName string) *Array {
+	emptyValue := newValue(values, prefix, flat)
+	emptyValue.String("")
+
+	return &Array{
+		values:     values,
+		prefix:     prefix,
+		flat:       flat,
+		memberName: memberName,
+		emptyValue: emptyValue,
+	}
+}
+
+// Value adds a new element to the Query Array. Returns a Value type used to
+// encode the array element.
+func (a *Array) Value() Value {
+	if a.size == 0 {
+		delete(a.values, a.emptyValue.key)
+	}
+
+	// Query lists start a 1, so adjust the size first
+	a.size++
+	prefix := a.prefix
+	if !a.flat {
+		prefix = fmt.Sprintf("%s.%s", prefix, a.memberName)
+	}
+	// Lists can't have flat members
+	return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,80 @@
+package query
+
+import (
+	"io"
+	"net/url"
+	"sort"
+)
+
+// Encoder is a Query encoder that supports construction of Query body
+// values using methods.
+type Encoder struct {
+	// The query values that will be built up to manage encoding.
+	values url.Values
+	// The writer that the encoded body will be written to.
+	writer io.Writer
+	Value
+}
+
+// NewEncoder returns a new Query body encoder
+func NewEncoder(writer io.Writer) *Encoder {
+	values := url.Values{}
+	return &Encoder{
+		values: values,
+		writer: writer,
+		Value:  newBaseValue(values),
+	}
+}
+
+// Encode returns the []byte slice representing the current
+// state of the Query encoder.
+func (e Encoder) Encode() error {
+	ws, ok := e.writer.(interface{ WriteString(string) (int, error) })
+	if !ok {
+		// Fall back to less optimal byte slice casting if WriteString isn't available.
+		ws = &wrapWriteString{writer: e.writer}
+	}
+
+	// Get the keys and sort them to have a stable output
+	keys := make([]string, 0, len(e.values))
+	for k := range e.values {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	isFirstEntry := true
+	for _, key := range keys {
+		queryValues := e.values[key]
+		escapedKey := url.QueryEscape(key)
+		for _, value := range queryValues {
+			if !isFirstEntry {
+				if _, err := ws.WriteString(`&`); err != nil {
+					return err
+				}
+			} else {
+				isFirstEntry = false
+			}
+			if _, err := ws.WriteString(escapedKey); err != nil {
+				return err
+			}
+			if _, err := ws.WriteString(`=`); err != nil {
+				return err
+			}
+			if _, err := ws.WriteString(url.QueryEscape(value)); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// wrapWriteString wraps an io.Writer to provide a WriteString method
+// where one is not available.
+type wrapWriteString struct {
+	writer io.Writer
+}
+
+// WriteString writes a string to the wrapped writer by casting it to
+// a byte array first.
+func (w wrapWriteString) WriteString(v string) (int, error) {
+	return w.writer.Write([]byte(v))
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,78 @@
+package query
+
+import (
+	"fmt"
+	"net/url"
+)
+
+// Map represents the encoding of Query maps. A Query map is a representation
+// of a mapping of arbitrary string keys to arbitrary values of a fixed type.
+// A Map differs from an Object in that the set of keys is not fixed, in that
+// the values must all be of the same type, and that map entries are ordered.
+// A serialized map might look like the following:
+//
+//	MapName.entry.1.key=Foo
+//	&MapName.entry.1.value=spam
+//	&MapName.entry.2.key=Bar
+//	&MapName.entry.2.value=eggs
+type Map struct {
+	// The query values to add the map to.
+	values url.Values
+	// The map's prefix, which includes the names of all parent structures
+	// and ends with the name of the object. For example, the prefix might be
+	// "ParentStructure.MapName". This prefix will be used to form the full
+	// keys for each key-value pair of the map. For example, a value might have
+	// the key "ParentStructure.MapName.1.value".
+	//
+	// While this is currently represented as a string that gets added to, it
+	// could also be represented as a stack that only gets condensed into a
+	// string when a finalized key is created. This could potentially reduce
+	// allocations.
+	prefix string
+	// Whether the map is flat or not. A map that is not flat will produce the
+	// following entries to the url.Values for a given key-value pair:
+	//     MapName.entry.1.KeyLocationName=mykey
+	//     MapName.entry.1.ValueLocationName=myvalue
+	// A map that is flat will produce the following:
+	//     MapName.1.KeyLocationName=mykey
+	//     MapName.1.ValueLocationName=myvalue
+	flat bool
+	// The location name of the key. In most cases this should be "key".
+	keyLocationName string
+	// The location name of the value. In most cases this should be "value".
+	valueLocationName string
+	// Elements are stored in values, so we keep track of the list size here.
+	size int32
+}
+
+func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map {
+	return &Map{
+		values:            values,
+		prefix:            prefix,
+		flat:              flat,
+		keyLocationName:   keyLocationName,
+		valueLocationName: valueLocationName,
+	}
+}
+
+// Key adds the given named key to the Query map.
+// Returns a Value encoder that should be used to encode a Query value type.
+func (m *Map) Key(name string) Value {
+	// Query lists start a 1, so adjust the size first
+	m.size++
+	var key string
+	var value string
+	if m.flat {
+		key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName)
+		value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName)
+	} else {
+		key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName)
+		value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName)
+	}
+
+	// The key can only be a string, so we just go ahead and set it here
+	newValue(m.values, key, false).String(name)
+
+	// Maps can't have flat members
+	return newValue(m.values, value, false)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,62 @@
+package query
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the
+// operation serializer that will convert the query request body to a GET
+// operation with the query message in the HTTP request querystring.
+func AddAsGetRequestMiddleware(stack *middleware.Stack) error {
+	return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After)
+}
+
+type asGetRequest struct{}
+
+func (*asGetRequest) ID() string { return "Query:AsGetRequest" }
+
+func (m *asGetRequest) HandleSerialize(
+	ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := input.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request)
+	}
+
+	req.Method = "GET"
+
+	// If the stream is not set, nothing else to do.
+	stream := req.GetStream()
+	if stream == nil {
+		return next.HandleSerialize(ctx, input)
+	}
+
+	// Clear the stream since there will not be any body.
+	req.Header.Del("Content-Type")
+	req, err = req.SetStream(nil)
+	if err != nil {
+		return out, metadata, fmt.Errorf("unable update request body %w", err)
+	}
+	input.Request = req
+
+	// Update request query with the body's query string value.
+	delim := ""
+	if len(req.URL.RawQuery) != 0 {
+		delim = "&"
+	}
+
+	b, err := ioutil.ReadAll(stream)
+	if err != nil {
+		return out, metadata, fmt.Errorf("unable to get request body %w", err)
+	}
+	req.URL.RawQuery += delim + string(b)
+
+	return next.HandleSerialize(ctx, input)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,69 @@
+package query
+
+import (
+	"fmt"
+	"net/url"
+)
+
+// Object represents the encoding of Query structures and unions. A Query
+// object is a representation of a mapping of string keys to arbitrary
+// values where there is a fixed set of keys whose values each have their
+// own known type. A serialized object might look like the following:
+//
+//	ObjectName.Foo=value
+//	&ObjectName.Bar=5
+type Object struct {
+	// The query values to add the object to.
+	values url.Values
+	// The object's prefix, which includes the names of all parent structures
+	// and ends with the name of the object. For example, the prefix might be
+	// "ParentStructure.ObjectName". This prefix will be used to form the full
+	// keys for each member of the object. For example, a member might have the
+	// key "ParentStructure.ObjectName.MemberName".
+	//
+	// While this is currently represented as a string that gets added to, it
+	// could also be represented as a stack that only gets condensed into a
+	// string when a finalized key is created. This could potentially reduce
+	// allocations.
+	prefix string
+}
+
+func newObject(values url.Values, prefix string) *Object {
+	return &Object{
+		values: values,
+		prefix: prefix,
+	}
+}
+
+// Key adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query value type.
+func (o *Object) Key(name string) Value {
+	return o.key(name, false)
+}
+
+// KeyWithValues adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query list of values.
+func (o *Object) KeyWithValues(name string) Value {
+	return o.keyWithValues(name, false)
+}
+
+// FlatKey adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query value type. The
+// value will be flattened if it is a map or array.
+func (o *Object) FlatKey(name string) Value {
+	return o.key(name, true)
+}
+
+func (o *Object) key(name string, flatValue bool) Value {
+	if o.prefix != "" {
+		return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue)
+	}
+	return newValue(o.values, name, flatValue)
+}
+
+func (o *Object) keyWithValues(name string, flatValue bool) Value {
+	if o.prefix != "" {
+		return newAppendValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue)
+	}
+	return newAppendValue(o.values, name, flatValue)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,115 @@
+package query
+
+import (
+	"math/big"
+	"net/url"
+
+	"github.com/aws/smithy-go/encoding/httpbinding"
+)
+
+// Value represents a Query Value type.
+type Value struct {
+	// The query values to add the value to.
+	values url.Values
+	// The value's key, which will form the prefix for complex types.
+	key string
+	// Whether the value should be flattened or not if it's a flattenable type.
+	flat       bool
+	queryValue httpbinding.QueryValue
+}
+
+func newValue(values url.Values, key string, flat bool) Value {
+	return Value{
+		values:     values,
+		key:        key,
+		flat:       flat,
+		queryValue: httpbinding.NewQueryValue(values, key, false),
+	}
+}
+
+func newAppendValue(values url.Values, key string, flat bool) Value {
+	return Value{
+		values:     values,
+		key:        key,
+		flat:       flat,
+		queryValue: httpbinding.NewQueryValue(values, key, true),
+	}
+}
+
+func newBaseValue(values url.Values) Value {
+	return Value{
+		values:     values,
+		queryValue: httpbinding.NewQueryValue(nil, "", false),
+	}
+}
+
+// Array returns a new Array encoder.
+func (qv Value) Array(locationName string) *Array {
+	return newArray(qv.values, qv.key, qv.flat, locationName)
+}
+
+// Object returns a new Object encoder.
+func (qv Value) Object() *Object {
+	return newObject(qv.values, qv.key)
+}
+
+// Map returns a new Map encoder.
+func (qv Value) Map(keyLocationName string, valueLocationName string) *Map {
+	return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName)
+}
+
+// Base64EncodeBytes encodes v as a base64 query string value.
+// This is intended to enable compatibility with the JSON encoder.
+func (qv Value) Base64EncodeBytes(v []byte) {
+	qv.queryValue.Blob(v)
+}
+
+// Boolean encodes v as a query string value
+func (qv Value) Boolean(v bool) {
+	qv.queryValue.Boolean(v)
+}
+
+// String encodes v as a query string value
+func (qv Value) String(v string) {
+	qv.queryValue.String(v)
+}
+
+// Byte encodes v as a query string value
+func (qv Value) Byte(v int8) {
+	qv.queryValue.Byte(v)
+}
+
+// Short encodes v as a query string value
+func (qv Value) Short(v int16) {
+	qv.queryValue.Short(v)
+}
+
+// Integer encodes v as a query string value
+func (qv Value) Integer(v int32) {
+	qv.queryValue.Integer(v)
+}
+
+// Long encodes v as a query string value
+func (qv Value) Long(v int64) {
+	qv.queryValue.Long(v)
+}
+
+// Float encodes v as a query string value
+func (qv Value) Float(v float32) {
+	qv.queryValue.Float(v)
+}
+
+// Double encodes v as a query string value
+func (qv Value) Double(v float64) {
+	qv.queryValue.Double(v)
+}
+
+// BigInteger encodes v as a query string value
+func (qv Value) BigInteger(v *big.Int) {
+	qv.queryValue.BigInteger(v)
+}
+
+// BigDecimal encodes v as a query string value
+func (qv Value) BigDecimal(v *big.Float) {
+	qv.queryValue.BigDecimal(v)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,85 @@
+package restjson
+
+import (
+	"encoding/json"
+	"io"
+	"strings"
+
+	"github.com/aws/smithy-go"
+)
+
+// GetErrorInfo util looks for code, __type, and message members in the
+// json body. These members are optionally available, and the function
+// returns the value of member if it is available. This function is useful to
+// identify the error code, msg in a REST JSON error response.
+func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) {
+	var errInfo struct {
+		Code    string
+		Type    string `json:"__type"`
+		Message string
+	}
+
+	err = decoder.Decode(&errInfo)
+	if err != nil {
+		if err == io.EOF {
+			return errorType, message, nil
+		}
+		return errorType, message, err
+	}
+
+	// assign error type
+	if len(errInfo.Code) != 0 {
+		errorType = errInfo.Code
+	} else if len(errInfo.Type) != 0 {
+		errorType = errInfo.Type
+	}
+
+	// assign error message
+	if len(errInfo.Message) != 0 {
+		message = errInfo.Message
+	}
+
+	// sanitize error
+	if len(errorType) != 0 {
+		errorType = SanitizeErrorCode(errorType)
+	}
+
+	return errorType, message, nil
+}
+
+// SanitizeErrorCode sanitizes the errorCode string .
+// The rule for sanitizing is if a `:` character is present, then take only the
+// contents before the first : character in the value.
+// If a # character is present, then take only the contents after the
+// first # character in the value.
+func SanitizeErrorCode(errorCode string) string {
+	if strings.ContainsAny(errorCode, ":") {
+		errorCode = strings.SplitN(errorCode, ":", 2)[0]
+	}
+
+	if strings.ContainsAny(errorCode, "#") {
+		errorCode = strings.SplitN(errorCode, "#", 2)[1]
+	}
+
+	return errorCode
+}
+
+// GetSmithyGenericAPIError returns smithy generic api error and an error interface.
+// Takes in json decoder, and error Code string as args. The function retrieves error message
+// and error code from the decoder body. If errorCode of length greater than 0 is passed in as
+// an argument, it is used instead.
+func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) {
+	errorType, message, err := GetErrorInfo(decoder)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(errorCode) == 0 {
+		errorCode = errorType
+	}
+
+	return &smithy.GenericAPIError{
+		Code:    errorCode,
+		Message: message,
+	}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,48 @@
+package xml
+
+import (
+	"encoding/xml"
+	"fmt"
+	"io"
+)
+
+// ErrorComponents represents the error response fields
+// that will be deserialized from an xml error response body
+type ErrorComponents struct {
+	Code      string
+	Message   string
+	RequestID string
+}
+
+// GetErrorResponseComponents returns the error fields from an xml error response body
+func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) {
+	if noErrorWrapping {
+		var errResponse noWrappedErrorResponse
+		if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+			return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+		}
+		return ErrorComponents(errResponse), nil
+	}
+
+	var errResponse wrappedErrorResponse
+	if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+		return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+	}
+	return ErrorComponents(errResponse), nil
+}
+
+// noWrappedErrorResponse represents the error response body with
+// no internal Error wrapping
+type noWrappedErrorResponse struct {
+	Code      string `xml:"Code"`
+	Message   string `xml:"Message"`
+	RequestID string `xml:"RequestId"`
+}
+
+// wrappedErrorResponse represents the error response body
+// wrapped within Error
+type wrappedErrorResponse struct {
+	Code      string `xml:"Error>Code"`
+	Message   string `xml:"Error>Message"`
+	RequestID string `xml:"RequestId"`
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+package ratelimit
+
+import "context"
+
+// None implements a no-op rate limiter which effectively disables client-side
+// rate limiting (also known as "retry quotas").
+//
+// GetToken does nothing and always returns a nil error. The returned
+// token-release function does nothing, and always returns a nil error.
+//
+// AddTokens does nothing and always returns a nil error.
+var None = &none{}
+
+type none struct{}
+
+func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) {
+	return func() error { return nil }, nil
+}
+
+func (*none) AddTokens(v uint) error { return nil }
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,96 @@
+package ratelimit
+
+import (
+	"sync"
+)
+
+// TokenBucket provides a concurrency safe utility for adding and removing
+// tokens from the available token bucket.
+type TokenBucket struct {
+	remainingTokens uint
+	maxCapacity     uint
+	minCapacity     uint
+	mu              sync.Mutex
+}
+
+// NewTokenBucket returns an initialized TokenBucket with the capacity
+// specified.
+func NewTokenBucket(i uint) *TokenBucket {
+	return &TokenBucket{
+		remainingTokens: i,
+		maxCapacity:     i,
+		minCapacity:     1,
+	}
+}
+
+// Retrieve attempts to reduce the available tokens by the amount requested. If
+// there are tokens available true will be returned along with the number of
+// available tokens remaining. If amount requested is larger than the available
+// capacity, false will be returned along with the available capacity. If the
+// amount is less than the available capacity, the capacity will be reduced by
+// that amount, and the remaining capacity and true will be returned.
+func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	if amount > t.remainingTokens {
+		return t.remainingTokens, false
+	}
+
+	t.remainingTokens -= amount
+	return t.remainingTokens, true
+}
+
+// Refund returns the amount of tokens back to the available token bucket, up
+// to the initial capacity.
+func (t *TokenBucket) Refund(amount uint) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	// Capacity cannot exceed max capacity.
+	t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity)
+}
+
+// Capacity returns the maximum capacity of tokens that the bucket could
+// contain.
+func (t *TokenBucket) Capacity() uint {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	return t.maxCapacity
+}
+
+// Remaining returns the number of tokens that remaining in the bucket.
+func (t *TokenBucket) Remaining() uint {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	return t.remainingTokens
+}
+
+// Resize adjusts the size of the token bucket. Returns the capacity remaining.
+func (t *TokenBucket) Resize(size uint) uint {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	t.maxCapacity = uintMax(size, t.minCapacity)
+
+	// Capacity needs to be capped at max capacity, if max size reduced.
+	t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity)
+
+	return t.remainingTokens
+}
+
+func uintMin(a, b uint) uint {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func uintMax(a, b uint) uint {
+	if a > b {
+		return a
+	}
+	return b
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,83 @@
+package ratelimit
+
+import (
+	"context"
+	"fmt"
+)
+
+type rateToken struct {
+	tokenCost uint
+	bucket    *TokenBucket
+}
+
+func (t rateToken) release() error {
+	t.bucket.Refund(t.tokenCost)
+	return nil
+}
+
+// TokenRateLimit provides a Token Bucket RateLimiter implementation
+// that limits the overall number of retry attempts that can be made across
+// operation invocations.
+type TokenRateLimit struct {
+	bucket *TokenBucket
+}
+
+// NewTokenRateLimit returns an TokenRateLimit with default values.
+// Functional options can configure the retry rate limiter.
+func NewTokenRateLimit(tokens uint) *TokenRateLimit {
+	return &TokenRateLimit{
+		bucket: NewTokenBucket(tokens),
+	}
+}
+
+type canceledError struct {
+	Err error
+}
+
+func (c canceledError) CanceledError() bool { return true }
+func (c canceledError) Unwrap() error       { return c.Err }
+func (c canceledError) Error() string {
+	return fmt.Sprintf("canceled, %v", c.Err)
+}
+
+// GetToken may cause a available pool of retry quota to be
+// decremented. Will return an error if the decremented value can not be
+// reduced from the retry quota.
+func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) {
+	select {
+	case <-ctx.Done():
+		return nil, canceledError{Err: ctx.Err()}
+	default:
+	}
+	if avail, ok := l.bucket.Retrieve(cost); !ok {
+		return nil, QuotaExceededError{Available: avail, Requested: cost}
+	}
+
+	return rateToken{
+		tokenCost: cost,
+		bucket:    l.bucket,
+	}.release, nil
+}
+
+// AddTokens increments the token bucket by a fixed amount.
+func (l *TokenRateLimit) AddTokens(v uint) error {
+	l.bucket.Refund(v)
+	return nil
+}
+
+// Remaining returns the number of remaining tokens in the bucket.
+func (l *TokenRateLimit) Remaining() uint {
+	return l.bucket.Remaining()
+}
+
+// QuotaExceededError provides the SDK error when the retries for a given
+// token bucket have been exhausted.
+type QuotaExceededError struct {
+	Available uint
+	Requested uint
+}
+
+func (e QuotaExceededError) Error() string {
+	return fmt.Sprintf("retry quota exceeded, %d available, %d requested",
+		e.Available, e.Requested)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,25 @@
+package aws
+
+import (
+	"fmt"
+)
+
+// TODO remove replace with smithy.CanceledError
+
+// RequestCanceledError is the error that will be returned by an API request
+// that was canceled. Requests given a Context may return this error when
+// canceled.
+type RequestCanceledError struct {
+	Err error
+}
+
+// CanceledError returns true to satisfy interfaces checking for canceled errors.
+func (*RequestCanceledError) CanceledError() bool { return true }
+
+// Unwrap returns the underlying error, if there was one.
+func (e *RequestCanceledError) Unwrap() error {
+	return e.Err
+}
+func (e *RequestCanceledError) Error() string {
+	return fmt.Sprintf("request canceled, %v", e.Err)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,156 @@
+package retry
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+const (
+	// DefaultRequestCost is the cost of a single request from the adaptive
+	// rate limited token bucket.
+	DefaultRequestCost uint = 1
+)
+
+// DefaultThrottles provides the set of errors considered throttle errors that
+// are checked by default.
+var DefaultThrottles = []IsErrorThrottle{
+	ThrottleErrorCode{
+		Codes: DefaultThrottleErrorCodes,
+	},
+}
+
+// AdaptiveModeOptions provides the functional options for configuring the
+// adaptive retry mode, and delay behavior.
+type AdaptiveModeOptions struct {
+	// If the adaptive token bucket is empty, when an attempt will be made
+	// AdaptiveMode will sleep until a token is available. This can occur when
+	// attempts fail with throttle errors. Use this option to disable the sleep
+	// until token is available, and return error immediately.
+	FailOnNoAttemptTokens bool
+
+	// The cost of an attempt from the AdaptiveMode's adaptive token bucket.
+	RequestCost uint
+
+	// Set of strategies to determine if the attempt failed due to a throttle
+	// error.
+	//
+	// It is safe to append to this list in NewAdaptiveMode's functional options.
+	Throttles []IsErrorThrottle
+
+	// Set of options for standard retry mode that AdaptiveMode is built on top
+	// of. AdaptiveMode may apply its own defaults to Standard retry mode that
+	// are different than the defaults of NewStandard. Use these options to
+	// override the default options.
+	StandardOptions []func(*StandardOptions)
+}
+
+// AdaptiveMode provides an experimental retry strategy that expands on the
+// Standard retry strategy, adding client attempt rate limits. The attempt rate
+// limit is initially unrestricted, but becomes restricted when the attempt
+// fails with for a throttle error. When restricted AdaptiveMode may need to
+// sleep before an attempt is made, if too many throttles have been received.
+// AdaptiveMode's sleep can be canceled with context cancel. Set
+// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep,
+// to fail fast.
+//
+// Eventually unrestricted attempt rate limit will be restored once attempts no
+// longer are failing due to throttle errors.
+type AdaptiveMode struct {
+	options   AdaptiveModeOptions
+	throttles IsErrorThrottles
+
+	retryer   aws.RetryerV2
+	rateLimit *adaptiveRateLimit
+}
+
+// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy.
+func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode {
+	o := AdaptiveModeOptions{
+		RequestCost: DefaultRequestCost,
+		Throttles:   append([]IsErrorThrottle{}, DefaultThrottles...),
+	}
+	for _, fn := range optFns {
+		fn(&o)
+	}
+
+	return &AdaptiveMode{
+		options:   o,
+		throttles: IsErrorThrottles(o.Throttles),
+		retryer:   NewStandard(o.StandardOptions...),
+		rateLimit: newAdaptiveRateLimit(),
+	}
+}
+
+// IsErrorRetryable returns if the failed attempt is retryable. This check
+// should determine if the error can be retried, or if the error is
+// terminal.
+func (a *AdaptiveMode) IsErrorRetryable(err error) bool {
+	return a.retryer.IsErrorRetryable(err)
+}
+
+// MaxAttempts returns the maximum number of attempts that can be made for
+// an attempt before failing. A value of 0 implies that the attempt should
+// be retried until it succeeds if the errors are retryable.
+func (a *AdaptiveMode) MaxAttempts() int {
+	return a.retryer.MaxAttempts()
+}
+
+// RetryDelay returns the delay that should be used before retrying the
+// attempt. Will return error if the if the delay could not be determined.
+func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) (
+	time.Duration, error,
+) {
+	return a.retryer.RetryDelay(attempt, opErr)
+}
+
+// GetRetryToken attempts to deduct the retry cost from the retry token pool.
+// Returning the token release function, or error.
+func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) (
+	releaseToken func(error) error, err error,
+) {
+	return a.retryer.GetRetryToken(ctx, opErr)
+}
+
+// GetInitialToken returns the initial attempt token that can increment the
+// retry token pool if the attempt is successful.
+//
+// Deprecated: This method does not provide a way to block using Context,
+// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only
+// present to implement Retryer interface.
+func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) {
+	return nopRelease
+}
+
+// GetAttemptToken returns the attempt token that can be used to rate limit
+// attempt calls. Will be used by the SDK's retry package's Attempt
+// middleware to get an attempt token prior to calling the temp and releasing
+// the attempt token after the attempt has been made.
+func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) {
+	for {
+		acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost)
+		if acquiredToken {
+			break
+		}
+		if a.options.FailOnNoAttemptTokens {
+			return nil, fmt.Errorf(
+				"unable to get attempt token, and FailOnNoAttemptTokens enables")
+		}
+
+		if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil {
+			return nil, fmt.Errorf("failed to wait for token to be available, %w", err)
+		}
+	}
+
+	return a.handleResponse, nil
+}
+
+func (a *AdaptiveMode) handleResponse(opErr error) error {
+	throttled := a.throttles.IsErrorThrottle(opErr).Bool()
+
+	a.rateLimit.Update(throttled)
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,158 @@
+package retry
+
+import (
+	"math"
+	"sync"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+type adaptiveRateLimit struct {
+	tokenBucketEnabled bool
+
+	smooth        float64
+	beta          float64
+	scaleConstant float64
+	minFillRate   float64
+
+	fillRate         float64
+	calculatedRate   float64
+	lastRefilled     time.Time
+	measuredTxRate   float64
+	lastTxRateBucket float64
+	requestCount     int64
+	lastMaxRate      float64
+	lastThrottleTime time.Time
+	timeWindow       float64
+
+	tokenBucket *adaptiveTokenBucket
+
+	mu sync.Mutex
+}
+
+func newAdaptiveRateLimit() *adaptiveRateLimit {
+	now := sdk.NowTime()
+	return &adaptiveRateLimit{
+		smooth:        0.8,
+		beta:          0.7,
+		scaleConstant: 0.4,
+
+		minFillRate: 0.5,
+
+		lastTxRateBucket: math.Floor(timeFloat64Seconds(now)),
+		lastThrottleTime: now,
+
+		tokenBucket: newAdaptiveTokenBucket(0),
+	}
+}
+
+func (a *adaptiveRateLimit) Enable(v bool) {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	a.tokenBucketEnabled = v
+}
+
+func (a *adaptiveRateLimit) AcquireToken(amount uint) (
+	tokenAcquired bool, waitTryAgain time.Duration,
+) {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	if !a.tokenBucketEnabled {
+		return true, 0
+	}
+
+	a.tokenBucketRefill()
+
+	available, ok := a.tokenBucket.Retrieve(float64(amount))
+	if !ok {
+		waitDur := float64Seconds((float64(amount) - available) / a.fillRate)
+		return false, waitDur
+	}
+
+	return true, 0
+}
+
+func (a *adaptiveRateLimit) Update(throttled bool) {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	a.updateMeasuredRate()
+
+	if throttled {
+		rateToUse := a.measuredTxRate
+		if a.tokenBucketEnabled {
+			rateToUse = math.Min(a.measuredTxRate, a.fillRate)
+		}
+
+		a.lastMaxRate = rateToUse
+		a.calculateTimeWindow()
+		a.lastThrottleTime = sdk.NowTime()
+		a.calculatedRate = a.cubicThrottle(rateToUse)
+		a.tokenBucketEnabled = true
+	} else {
+		a.calculateTimeWindow()
+		a.calculatedRate = a.cubicSuccess(sdk.NowTime())
+	}
+
+	newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate)
+	a.tokenBucketUpdateRate(newRate)
+}
+
+func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 {
+	dt := secondsFloat64(t.Sub(a.lastThrottleTime))
+	return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate
+}
+
+func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 {
+	return rateToUse * a.beta
+}
+
+func (a *adaptiveRateLimit) calculateTimeWindow() {
+	a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.)
+}
+
+func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) {
+	a.tokenBucketRefill()
+	a.fillRate = math.Max(newRPS, a.minFillRate)
+	a.tokenBucket.Resize(newRPS)
+}
+
+func (a *adaptiveRateLimit) updateMeasuredRate() {
+	now := sdk.NowTime()
+	timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2.
+	a.requestCount++
+
+	if timeBucket > a.lastTxRateBucket {
+		currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket)
+		a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth))
+		a.requestCount = 0
+		a.lastTxRateBucket = timeBucket
+	}
+}
+
+func (a *adaptiveRateLimit) tokenBucketRefill() {
+	now := sdk.NowTime()
+	if a.lastRefilled.IsZero() {
+		a.lastRefilled = now
+		return
+	}
+
+	fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate
+	a.tokenBucket.Refund(fillAmount)
+	a.lastRefilled = now
+}
+
+func float64Seconds(v float64) time.Duration {
+	return time.Duration(v * float64(time.Second))
+}
+
+func secondsFloat64(v time.Duration) float64 {
+	return float64(v) / float64(time.Second)
+}
+
+func timeFloat64Seconds(v time.Time) float64 {
+	return float64(v.UnixNano()) / float64(time.Second)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,83 @@
+package retry
+
+import (
+	"math"
+	"sync"
+)
+
+// adaptiveTokenBucket provides a concurrency safe utility for adding and
+// removing tokens from the available token bucket.
+type adaptiveTokenBucket struct {
+	remainingTokens float64
+	maxCapacity     float64
+	minCapacity     float64
+	mu              sync.Mutex
+}
+
+// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the
+// capacity specified.
+func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket {
+	return &adaptiveTokenBucket{
+		remainingTokens: i,
+		maxCapacity:     i,
+		minCapacity:     1,
+	}
+}
+
+// Retrieve attempts to reduce the available tokens by the amount requested. If
+// there are tokens available true will be returned along with the number of
+// available tokens remaining. If amount requested is larger than the available
+// capacity, false will be returned along with the available capacity. If the
+// amount is less than the available capacity, the capacity will be reduced by
+// that amount, and the remaining capacity and true will be returned.
+func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	if amount > t.remainingTokens {
+		return t.remainingTokens, false
+	}
+
+	t.remainingTokens -= amount
+	return t.remainingTokens, true
+}
+
+// Refund returns the amount of tokens back to the available token bucket, up
+// to the initial capacity.
+func (t *adaptiveTokenBucket) Refund(amount float64) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	// Capacity cannot exceed max capacity.
+	t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity)
+}
+
+// Capacity returns the maximum capacity of tokens that the bucket could
+// contain.
+func (t *adaptiveTokenBucket) Capacity() float64 {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	return t.maxCapacity
+}
+
+// Remaining returns the number of tokens that remaining in the bucket.
+func (t *adaptiveTokenBucket) Remaining() float64 {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	return t.remainingTokens
+}
+
+// Resize adjusts the size of the token bucket. Returns the capacity remaining.
+func (t *adaptiveTokenBucket) Resize(size float64) float64 {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	t.maxCapacity = math.Max(size, t.minCapacity)
+
+	// Capacity needs to be capped at max capacity, if max size reduced.
+	t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity)
+
+	return t.remainingTokens
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,80 @@
+// Package retry provides interfaces and implementations for SDK request retry behavior.
+//
+// # Retryer Interface and Implementations
+//
+// This package defines Retryer interface that is used to either implement custom retry behavior
+// or to extend the existing retry implementations provided by the SDK. This package provides a single
+// retry implementation: Standard.
+//
+// # Standard
+//
+// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited
+// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs.
+// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client,
+// and uses an additional delay policy to limit the time between a requests subsequent attempts.
+//
+// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether
+// a given error is retryable. By default this list of retryables includes the following:
+//   - Retrying errors that implement the RetryableError method, and return true.
+//   - Connection Errors
+//   - Errors that implement a ConnectionError, Temporary, or Timeout method that return true.
+//   - Connection Reset Errors.
+//   - net.OpErr types that are dialing errors or are temporary.
+//   - HTTP Status Codes: 500, 502, 503, and 504.
+//   - API Error Codes
+//   - RequestTimeout, RequestTimeoutException
+//   - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException,
+//     RequestThrottled, SlowDown, EC2ThrottledException
+//   - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException
+//   - TransactionInProgressException, PriorRequestNotComplete
+//
+// The standard retryer will not retry a request in the event if the context associated with the request
+// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context
+// value.
+//
+// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer
+// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions
+// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions,
+// and the retry delay policy.
+//
+// For example to modify the default retry attempts for the standard retryer:
+//
+//	// configure the custom retryer
+//	customRetry := retry.NewStandard(func(o *retry.StandardOptions) {
+//	    o.MaxAttempts = 5
+//	})
+//
+//	// create a service client with the retryer
+//	s3.NewFromConfig(cfg, func(o *s3.Options) {
+//	    o.Retryer = customRetry
+//	})
+//
+// # Utilities
+//
+// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic
+// way. These are:
+//
+//	AddWithErrorCodes      - Provides the ability to add additional API error codes that should be considered retryable
+//	                        in addition to those considered retryable by the provided retryer.
+//
+//	AddWithMaxAttempts     - Provides the ability to set the max number of attempts for retrying a request by wrapping
+//	                         a retryer implementation.
+//
+//	AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a
+//	                         request by wrapping a retryer implementation.
+//
+// The following package functions have been provided to easily satisfy different retry interfaces to further customize
+// a given retryer's behavior:
+//
+//	BackoffDelayerFunc   - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example,
+//	                       you can use this method to easily create custom back off policies to be used with the
+//	                       standard retryer.
+//
+//	IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example,
+//	                       this can be used to extend the standard retryer to add additional logic to determine if an
+//	                       error should be retried.
+//
+//	IsErrorTimeoutFunc   - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example,
+//	                       this can be used to extend the standard retryer to add additional logic to determine if an
+//	                        error should be considered a timeout.
+package retry
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+package retry
+
+import "fmt"
+
+// MaxAttemptsError provides the error when the maximum number of attempts have
+// been exceeded.
+type MaxAttemptsError struct {
+	Attempt int
+	Err     error
+}
+
+func (e *MaxAttemptsError) Error() string {
+	return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err)
+}
+
+// Unwrap returns the nested error causing the max attempts error. Provides the
+// implementation for errors.Is and errors.As to unwrap nested errors.
+func (e *MaxAttemptsError) Unwrap() error {
+	return e.Err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,49 @@
+package retry
+
+import (
+	"math"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/internal/rand"
+	"github.com/aws/aws-sdk-go-v2/internal/timeconv"
+)
+
+// ExponentialJitterBackoff provides backoff delays with jitter based on the
+// number of attempts.
+type ExponentialJitterBackoff struct {
+	maxBackoff time.Duration
+	// precomputed number of attempts needed to reach max backoff.
+	maxBackoffAttempts float64
+
+	randFloat64 func() (float64, error)
+}
+
+// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured
+// for the max backoff.
+func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff {
+	return &ExponentialJitterBackoff{
+		maxBackoff: maxBackoff,
+		maxBackoffAttempts: math.Log2(
+			float64(maxBackoff) / float64(time.Second)),
+		randFloat64: rand.CryptoRandFloat64,
+	}
+}
+
+// BackoffDelay returns the duration to wait before the next attempt should be
+// made. Returns an error if unable get a duration.
+func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) {
+	if attempt > int(j.maxBackoffAttempts) {
+		return j.maxBackoff, nil
+	}
+
+	b, err := j.randFloat64()
+	if err != nil {
+		return 0, err
+	}
+
+	// [0.0, 1.0) * 2 ^ attempts
+	ri := int64(1 << uint64(attempt))
+	delaySeconds := b * float64(ri)
+
+	return timeconv.FloatSecondsDur(delaySeconds), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,52 @@
+package retry
+
+import (
+	awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// attemptResultsKey is a metadata accessor key to retrieve metadata
+// for all request attempts.
+type attemptResultsKey struct {
+}
+
+// GetAttemptResults retrieves attempts results from middleware metadata.
+func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) {
+	m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults)
+	return m, ok
+}
+
+// AttemptResults represents struct containing metadata returned by all request attempts.
+type AttemptResults struct {
+
+	// Results is a slice consisting attempt result from all request attempts.
+	// Results are stored in order request attempt is made.
+	Results []AttemptResult
+}
+
+// AttemptResult represents attempt result returned by a single request attempt.
+type AttemptResult struct {
+
+	// Err is the error if received for the request attempt.
+	Err error
+
+	// Retryable denotes if request may be retried. This states if an
+	// error is considered retryable.
+	Retryable bool
+
+	// Retried indicates if this request was retried.
+	Retried bool
+
+	// ResponseMetadata is any existing metadata passed via the response middlewares.
+	ResponseMetadata middleware.Metadata
+}
+
+// addAttemptResults adds attempt results to middleware metadata
+func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) {
+	metadata.Set(attemptResultsKey{}, v)
+}
+
+// GetRawResponse returns raw response recorded for the attempt result
+func (a AttemptResult) GetRawResponse() interface{} {
+	return awsmiddle.GetRawResponse(a.ResponseMetadata)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,383 @@
+package retry
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics"
+	internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/smithy-go/logging"
+	smithymiddle "github.com/aws/smithy-go/middleware"
+	"github.com/aws/smithy-go/transport/http"
+)
+
+// RequestCloner is a function that can take an input request type and clone
+// the request for use in a subsequent retry attempt.
+type RequestCloner func(interface{}) interface{}
+
+type retryMetadata struct {
+	AttemptNum       int
+	AttemptTime      time.Time
+	MaxAttempts      int
+	AttemptClockSkew time.Duration
+}
+
+// Attempt is a Smithy Finalize middleware that handles retry attempts using
+// the provided Retryer implementation.
+type Attempt struct {
+	// Enable the logging of retry attempts performed by the SDK. This will
+	// include logging retry attempts, unretryable errors, and when max
+	// attempts are reached.
+	LogAttempts bool
+
+	retryer       aws.RetryerV2
+	requestCloner RequestCloner
+}
+
+// define the threshold at which we will consider certain kind of errors to be probably
+// caused by clock skew
+const skewThreshold = 4 * time.Minute
+
+// NewAttemptMiddleware returns a new Attempt retry middleware.
+func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt {
+	m := &Attempt{
+		retryer:       wrapAsRetryerV2(retryer),
+		requestCloner: requestCloner,
+	}
+	for _, fn := range optFns {
+		fn(m)
+	}
+	return m
+}
+
+// ID returns the middleware identifier
+func (r *Attempt) ID() string { return "Retry" }
+
+func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) {
+	if !r.LogAttempts {
+		return
+	}
+	logger.Logf(classification, format, v...)
+}
+
+// HandleFinalize utilizes the provider Retryer implementation to attempt
+// retries over the next handler
+func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
+	out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error,
+) {
+	var attemptNum int
+	var attemptClockSkew time.Duration
+	var attemptResults AttemptResults
+
+	maxAttempts := r.retryer.MaxAttempts()
+	releaseRetryToken := nopRelease
+
+	for {
+		attemptNum++
+		attemptInput := in
+		attemptInput.Request = r.requestCloner(attemptInput.Request)
+
+		// Record the metadata for the for attempt being started.
+		attemptCtx := setRetryMetadata(ctx, retryMetadata{
+			AttemptNum:       attemptNum,
+			AttemptTime:      sdk.NowTime().UTC(),
+			MaxAttempts:      maxAttempts,
+			AttemptClockSkew: attemptClockSkew,
+		})
+
+		// Setting clock skew to be used on other context (like signing)
+		ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew)
+
+		var attemptResult AttemptResult
+		out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next)
+		attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata)
+
+		// AttemptResult Retried states that the attempt was not successful, and
+		// should be retried.
+		shouldRetry := attemptResult.Retried
+
+		// Add attempt metadata to list of all attempt metadata
+		attemptResults.Results = append(attemptResults.Results, attemptResult)
+
+		if !shouldRetry {
+			// Ensure the last response's metadata is used as the bases for result
+			// metadata returned by the stack. The Slice of attempt results
+			// will be added to this cloned metadata.
+			metadata = attemptResult.ResponseMetadata.Clone()
+
+			break
+		}
+	}
+
+	addAttemptResults(&metadata, attemptResults)
+	return out, metadata, err
+}
+
+// handleAttempt handles an individual request attempt.
+func (r *Attempt) handleAttempt(
+	ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler,
+) (
+	out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error,
+) {
+	defer func() {
+		attemptResult.Err = err
+	}()
+
+	// Short circuit if this attempt never can succeed because the context is
+	// canceled. This reduces the chance of token pools being modified for
+	// attempts that will not be made
+	select {
+	case <-ctx.Done():
+		return out, attemptResult, nopRelease, ctx.Err()
+	default:
+	}
+
+	//------------------------------
+	// Get Attempt Token
+	//------------------------------
+	releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx)
+	if err != nil {
+		return out, attemptResult, nopRelease, fmt.Errorf(
+			"failed to get retry Send token, %w", err)
+	}
+
+	//------------------------------
+	// Send Attempt
+	//------------------------------
+	logger := smithymiddle.GetLogger(ctx)
+	service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx)
+	retryMetadata, _ := getRetryMetadata(ctx)
+	attemptNum := retryMetadata.AttemptNum
+	maxAttempts := retryMetadata.MaxAttempts
+
+	// Following attempts must ensure the request payload stream starts in a
+	// rewound state.
+	if attemptNum > 1 {
+		if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok {
+			if rewindErr := rewindable.RewindStream(); rewindErr != nil {
+				return out, attemptResult, nopRelease, fmt.Errorf(
+					"failed to rewind transport stream for retry, %w", rewindErr)
+			}
+		}
+
+		r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d",
+			service, operation, attemptNum)
+	}
+
+	var metadata smithymiddle.Metadata
+	out, metadata, err = next.HandleFinalize(ctx, in)
+	attemptResult.ResponseMetadata = metadata
+
+	//------------------------------
+	// Bookkeeping
+	//------------------------------
+	// Release the retry token based on the state of the attempt's error (if any).
+	if releaseError := releaseRetryToken(err); releaseError != nil && err != nil {
+		return out, attemptResult, nopRelease, fmt.Errorf(
+			"failed to release retry token after request error, %w", err)
+	}
+	// Release the attempt token based on the state of the attempt's error (if any).
+	if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil {
+		return out, attemptResult, nopRelease, fmt.Errorf(
+			"failed to release initial token after request error, %w", err)
+	}
+	// If there was no error making the attempt, nothing further to do. There
+	// will be nothing to retry.
+	if err == nil {
+		return out, attemptResult, nopRelease, err
+	}
+
+	err = wrapAsClockSkew(ctx, err)
+
+	//------------------------------
+	// Is Retryable and Should Retry
+	//------------------------------
+	// If the attempt failed with an unretryable error, nothing further to do
+	// but return, and inform the caller about the terminal failure.
+	retryable := r.retryer.IsErrorRetryable(err)
+	if !retryable {
+		r.logf(logger, logging.Debug, "request failed with unretryable error %v", err)
+		return out, attemptResult, nopRelease, err
+	}
+
+	// set retryable to true
+	attemptResult.Retryable = true
+
+	// Once the maximum number of attempts have been exhausted there is nothing
+	// further to do other than inform the caller about the terminal failure.
+	if maxAttempts > 0 && attemptNum >= maxAttempts {
+		r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts)
+		err = &MaxAttemptsError{
+			Attempt: attemptNum,
+			Err:     err,
+		}
+		return out, attemptResult, nopRelease, err
+	}
+
+	//------------------------------
+	// Get Retry (aka Retry Quota) Token
+	//------------------------------
+	// Get a retry token that will be released after the
+	releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err)
+	if retryTokenErr != nil {
+		return out, attemptResult, nopRelease, retryTokenErr
+	}
+
+	//------------------------------
+	// Retry Delay and Sleep
+	//------------------------------
+	// Get the retry delay before another attempt can be made, and sleep for
+	// that time. Potentially early exist if the sleep is canceled via the
+	// context.
+	retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err)
+	mctx := metrics.Context(ctx)
+	if mctx != nil {
+		attempt, err := mctx.Data().LatestAttempt()
+		if err != nil {
+			attempt.RetryDelay = retryDelay
+		}
+	}
+	if reqErr != nil {
+		return out, attemptResult, releaseRetryToken, reqErr
+	}
+	if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil {
+		err = &aws.RequestCanceledError{Err: reqErr}
+		return out, attemptResult, releaseRetryToken, err
+	}
+
+	// The request should be re-attempted.
+	attemptResult.Retried = true
+
+	return out, attemptResult, releaseRetryToken, err
+}
+
+// errors that, if detected when we know there's a clock skew,
+// can be retried and have a high chance of success
+var possibleSkewCodes = map[string]struct{}{
+	"InvalidSignatureException": {},
+	"SignatureDoesNotMatch":     {},
+	"AuthFailure":               {},
+}
+
+var definiteSkewCodes = map[string]struct{}{
+	"RequestExpired":       {},
+	"RequestInTheFuture":   {},
+	"RequestTimeTooSkewed": {},
+}
+
+// wrapAsClockSkew checks if this error could be related to a clock skew
+// error and if so, wrap the error.
+func wrapAsClockSkew(ctx context.Context, err error) error {
+	var v interface{ ErrorCode() string }
+	if !errors.As(err, &v) {
+		return err
+	}
+	if _, ok := definiteSkewCodes[v.ErrorCode()]; ok {
+		return &retryableClockSkewError{Err: err}
+	}
+	_, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()]
+	if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode {
+		return &retryableClockSkewError{Err: err}
+	}
+	return err
+}
+
+// MetricsHeader attaches SDK request metric header for retries to the transport
+type MetricsHeader struct{}
+
+// ID returns the middleware identifier
+func (r *MetricsHeader) ID() string {
+	return "RetryMetricsHeader"
+}
+
+// HandleFinalize attaches the SDK request metric header to the transport layer
+func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
+	out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error,
+) {
+	retryMetadata, _ := getRetryMetadata(ctx)
+
+	const retryMetricHeader = "Amz-Sdk-Request"
+	var parts []string
+
+	parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum))
+	if retryMetadata.MaxAttempts != 0 {
+		parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts))
+	}
+
+	var ttl time.Time
+	if deadline, ok := ctx.Deadline(); ok {
+		ttl = deadline
+	}
+
+	// Only append the TTL if it can be determined.
+	if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 {
+		const unixTimeFormat = "20060102T150405Z"
+		ttl = ttl.Add(retryMetadata.AttemptClockSkew)
+		parts = append(parts, "ttl="+ttl.Format(unixTimeFormat))
+	}
+
+	switch req := in.Request.(type) {
+	case *http.Request:
+		req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; "))
+	default:
+		return out, metadata, fmt.Errorf("unknown transport type %T", req)
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
+
+type retryMetadataKey struct{}
+
+// getRetryMetadata retrieves retryMetadata from the context and a bool
+// indicating if it was set.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
+	metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
+	return metadata, ok
+}
+
+// setRetryMetadata sets the retryMetadata on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context {
+	return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata)
+}
+
+// AddRetryMiddlewaresOptions is the set of options that can be passed to
+// AddRetryMiddlewares for configuring retry associated middleware.
+type AddRetryMiddlewaresOptions struct {
+	Retryer aws.Retryer
+
+	// Enable the logging of retry attempts performed by the SDK. This will
+	// include logging retry attempts, unretryable errors, and when max
+	// attempts are reached.
+	LogRetryAttempts bool
+}
+
+// AddRetryMiddlewares adds retry middleware to operation middleware stack
+func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error {
+	attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) {
+		middleware.LogAttempts = options.LogRetryAttempts
+	})
+
+	// index retry to before signing, if signing exists
+	if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil {
+		return err
+	}
+
+	if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil {
+		return err
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,90 @@
+package retry
+
+import (
+	"context"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// AddWithErrorCodes returns a Retryer with additional error codes considered
+// for determining if the error should be retried.
+func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer {
+	retryable := &RetryableErrorCode{
+		Codes: map[string]struct{}{},
+	}
+	for _, c := range codes {
+		retryable.Codes[c] = struct{}{}
+	}
+
+	return &withIsErrorRetryable{
+		RetryerV2: wrapAsRetryerV2(r),
+		Retryable: retryable,
+	}
+}
+
+type withIsErrorRetryable struct {
+	aws.RetryerV2
+	Retryable IsErrorRetryable
+}
+
+func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool {
+	if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary {
+		return v.Bool()
+	}
+	return r.RetryerV2.IsErrorRetryable(err)
+}
+
+// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value
+// specified.
+func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer {
+	return &withMaxAttempts{
+		RetryerV2: wrapAsRetryerV2(r),
+		Max:       max,
+	}
+}
+
+type withMaxAttempts struct {
+	aws.RetryerV2
+	Max int
+}
+
+func (w *withMaxAttempts) MaxAttempts() int {
+	return w.Max
+}
+
+// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer
+// overriding the RetryDelay behavior for a alternate minimum initial backoff
+// delay.
+func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer {
+	return &withMaxBackoffDelay{
+		RetryerV2: wrapAsRetryerV2(r),
+		backoff:   NewExponentialJitterBackoff(delay),
+	}
+}
+
+type withMaxBackoffDelay struct {
+	aws.RetryerV2
+	backoff *ExponentialJitterBackoff
+}
+
+func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) {
+	return r.backoff.BackoffDelay(attempt, err)
+}
+
+type wrappedAsRetryerV2 struct {
+	aws.Retryer
+}
+
+func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 {
+	v, ok := r.(aws.RetryerV2)
+	if !ok {
+		v = wrappedAsRetryerV2{Retryer: r}
+	}
+
+	return v
+}
+
+func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) {
+	return w.Retryer.GetInitialToken(), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,222 @@
+package retry
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"strings"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// IsErrorRetryable provides the interface of an implementation to determine if
+// a error as the result of an operation is retryable.
+type IsErrorRetryable interface {
+	IsErrorRetryable(error) aws.Ternary
+}
+
+// IsErrorRetryables is a collection of checks to determine of the error is
+// retryable.  Iterates through the checks and returns the state of retryable
+// if any check returns something other than unknown.
+type IsErrorRetryables []IsErrorRetryable
+
+// IsErrorRetryable returns if the error is retryable if any of the checks in
+// the list return a value other than unknown.
+func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary {
+	for _, re := range r {
+		if v := re.IsErrorRetryable(err); v != aws.UnknownTernary {
+			return v
+		}
+	}
+	return aws.UnknownTernary
+}
+
+// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface.
+type IsErrorRetryableFunc func(error) aws.Ternary
+
+// IsErrorRetryable returns if the error is retryable.
+func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary {
+	return fn(err)
+}
+
+// RetryableError is an IsErrorRetryable implementation which uses the
+// optional interface Retryable on the error value to determine if the error is
+// retryable.
+type RetryableError struct{}
+
+// IsErrorRetryable returns if the error is retryable if it satisfies the
+// Retryable interface, and returns if the attempt should be retried.
+func (RetryableError) IsErrorRetryable(err error) aws.Ternary {
+	var v interface{ RetryableError() bool }
+
+	if !errors.As(err, &v) {
+		return aws.UnknownTernary
+	}
+
+	return aws.BoolTernary(v.RetryableError())
+}
+
+// NoRetryCanceledError detects if the error was an request canceled error and
+// returns if so.
+type NoRetryCanceledError struct{}
+
+// IsErrorRetryable returns the error is not retryable if the request was
+// canceled.
+func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary {
+	var v interface{ CanceledError() bool }
+
+	if !errors.As(err, &v) {
+		return aws.UnknownTernary
+	}
+
+	if v.CanceledError() {
+		return aws.FalseTernary
+	}
+	return aws.UnknownTernary
+}
+
+// RetryableConnectionError determines if the underlying error is an HTTP
+// connection and returns if it should be retried.
+//
+// Includes errors such as connection reset, connection refused, net dial,
+// temporary, and timeout errors.
+type RetryableConnectionError struct{}
+
+// IsErrorRetryable returns if the error is caused by and HTTP connection
+// error, and should be retried.
+func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary {
+	if err == nil {
+		return aws.UnknownTernary
+	}
+	var retryable bool
+
+	var conErr interface{ ConnectionError() bool }
+	var tempErr interface{ Temporary() bool }
+	var timeoutErr interface{ Timeout() bool }
+	var urlErr *url.Error
+	var netOpErr *net.OpError
+	var dnsError *net.DNSError
+
+	if errors.As(err, &dnsError) {
+		// NXDOMAIN errors should not be retried
+		if dnsError.IsNotFound {
+			return aws.BoolTernary(false)
+		}
+
+		// if !dnsError.Temporary(), error may or may not be temporary,
+		// (i.e. !Temporary() =/=> !retryable) so we should fall through to
+		// remaining checks
+		if dnsError.Temporary() {
+			return aws.BoolTernary(true)
+		}
+	}
+
+	switch {
+	case errors.As(err, &conErr) && conErr.ConnectionError():
+		retryable = true
+
+	case strings.Contains(err.Error(), "connection reset"):
+		retryable = true
+
+	case errors.As(err, &urlErr):
+		// Refused connections should be retried as the service may not yet be
+		// running on the port. Go TCP dial considers refused connections as
+		// not temporary.
+		if strings.Contains(urlErr.Error(), "connection refused") {
+			retryable = true
+		} else {
+			return r.IsErrorRetryable(errors.Unwrap(urlErr))
+		}
+
+	case errors.As(err, &netOpErr):
+		// Network dial, or temporary network errors are always retryable.
+		if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() {
+			retryable = true
+		} else {
+			return r.IsErrorRetryable(errors.Unwrap(netOpErr))
+		}
+
+	case errors.As(err, &tempErr) && tempErr.Temporary():
+		// Fallback to the generic temporary check, with temporary errors
+		// retryable.
+		retryable = true
+
+	case errors.As(err, &timeoutErr) && timeoutErr.Timeout():
+		// Fallback to the generic timeout check, with timeout errors
+		// retryable.
+		retryable = true
+
+	default:
+		return aws.UnknownTernary
+	}
+
+	return aws.BoolTernary(retryable)
+
+}
+
+// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status
+// codes.
+type RetryableHTTPStatusCode struct {
+	Codes map[int]struct{}
+}
+
+// IsErrorRetryable return if the passed in error is retryable based on the
+// HTTP status code.
+func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary {
+	var v interface{ HTTPStatusCode() int }
+
+	if !errors.As(err, &v) {
+		return aws.UnknownTernary
+	}
+
+	_, ok := r.Codes[v.HTTPStatusCode()]
+	if !ok {
+		return aws.UnknownTernary
+	}
+
+	return aws.TrueTernary
+}
+
+// RetryableErrorCode determines if an attempt should be retried based on the
+// API error code.
+type RetryableErrorCode struct {
+	Codes map[string]struct{}
+}
+
+// IsErrorRetryable return if the error is retryable based on the error codes.
+// Returns unknown if the error doesn't have a code or it is unknown.
+func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary {
+	var v interface{ ErrorCode() string }
+
+	if !errors.As(err, &v) {
+		return aws.UnknownTernary
+	}
+
+	_, ok := r.Codes[v.ErrorCode()]
+	if !ok {
+		return aws.UnknownTernary
+	}
+
+	return aws.TrueTernary
+}
+
+// retryableClockSkewError marks errors that can be caused by clock skew
+// (difference between server time and client time).
+// This is returned when there's certain confidence that adjusting the client time
+// could allow a retry to succeed
+type retryableClockSkewError struct{ Err error }
+
+func (e *retryableClockSkewError) Error() string {
+	return fmt.Sprintf("Probable clock skew error: %v", e.Err)
+}
+
+// Unwrap returns the wrapped error.
+func (e *retryableClockSkewError) Unwrap() error {
+	return e.Err
+}
+
+// RetryableError allows the retryer to retry this request
+func (e *retryableClockSkewError) RetryableError() bool {
+	return true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,269 @@
+package retry
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws/ratelimit"
+)
+
+// BackoffDelayer provides the interface for determining the delay to before
+// another request attempt, that previously failed.
+type BackoffDelayer interface {
+	BackoffDelay(attempt int, err error) (time.Duration, error)
+}
+
+// BackoffDelayerFunc provides a wrapper around a function to determine the
+// backoff delay of an attempt retry.
+type BackoffDelayerFunc func(int, error) (time.Duration, error)
+
+// BackoffDelay returns the delay before attempt to retry a request.
+func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) {
+	return fn(attempt, err)
+}
+
+const (
+	// DefaultMaxAttempts is the maximum of attempts for an API request
+	DefaultMaxAttempts int = 3
+
+	// DefaultMaxBackoff is the maximum back off delay between attempts
+	DefaultMaxBackoff time.Duration = 20 * time.Second
+)
+
+// Default retry token quota values.
+const (
+	DefaultRetryRateTokens  uint = 500
+	DefaultRetryCost        uint = 5
+	DefaultRetryTimeoutCost uint = 10
+	DefaultNoRetryIncrement uint = 1
+)
+
+// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK
+// should consider as retryable errors.
+var DefaultRetryableHTTPStatusCodes = map[int]struct{}{
+	500: {},
+	502: {},
+	503: {},
+	504: {},
+}
+
+// DefaultRetryableErrorCodes provides the set of API error codes that should
+// be retried.
+var DefaultRetryableErrorCodes = map[string]struct{}{
+	"RequestTimeout":          {},
+	"RequestTimeoutException": {},
+}
+
+// DefaultThrottleErrorCodes provides the set of API error codes that are
+// considered throttle errors.
+var DefaultThrottleErrorCodes = map[string]struct{}{
+	"Throttling":                             {},
+	"ThrottlingException":                    {},
+	"ThrottledException":                     {},
+	"RequestThrottledException":              {},
+	"TooManyRequestsException":               {},
+	"ProvisionedThroughputExceededException": {},
+	"TransactionInProgressException":         {},
+	"RequestLimitExceeded":                   {},
+	"BandwidthLimitExceeded":                 {},
+	"LimitExceededException":                 {},
+	"RequestThrottled":                       {},
+	"SlowDown":                               {},
+	"PriorRequestNotComplete":                {},
+	"EC2ThrottledException":                  {},
+}
+
+// DefaultRetryables provides the set of retryable checks that are used by
+// default.
+var DefaultRetryables = []IsErrorRetryable{
+	NoRetryCanceledError{},
+	RetryableError{},
+	RetryableConnectionError{},
+	RetryableHTTPStatusCode{
+		Codes: DefaultRetryableHTTPStatusCodes,
+	},
+	RetryableErrorCode{
+		Codes: DefaultRetryableErrorCodes,
+	},
+	RetryableErrorCode{
+		Codes: DefaultThrottleErrorCodes,
+	},
+}
+
+// DefaultTimeouts provides the set of timeout checks that are used by default.
+var DefaultTimeouts = []IsErrorTimeout{
+	TimeouterError{},
+}
+
+// StandardOptions provides the functional options for configuring the standard
+// retryable, and delay behavior.
+type StandardOptions struct {
+	// Maximum number of attempts that should be made.
+	MaxAttempts int
+
+	// MaxBackoff duration between retried attempts.
+	MaxBackoff time.Duration
+
+	// Provides the backoff strategy the retryer will use to determine the
+	// delay between retry attempts.
+	Backoff BackoffDelayer
+
+	// Set of strategies to determine if the attempt should be retried based on
+	// the error response received.
+	//
+	// It is safe to append to this list in NewStandard's functional options.
+	Retryables []IsErrorRetryable
+
+	// Set of strategies to determine if the attempt failed due to a timeout
+	// error.
+	//
+	// It is safe to append to this list in NewStandard's functional options.
+	Timeouts []IsErrorTimeout
+
+	// Provides the rate limiting strategy for rate limiting attempt retries
+	// across all attempts the retryer is being used with.
+	//
+	// A RateLimiter operates as a token bucket with a set capacity, where
+	// attempt failures events consume tokens. A retry attempt that attempts to
+	// consume more tokens than what's available results in operation failure.
+	// The default implementation is parameterized as follows:
+	//   - a capacity of 500 (DefaultRetryRateTokens)
+	//   - a retry caused by a timeout costs 10 tokens (DefaultRetryCost)
+	//   - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost)
+	//   - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement)
+	//
+	// You can disable rate limiting by setting this field to ratelimit.None.
+	RateLimiter RateLimiter
+
+	// The cost to deduct from the RateLimiter's token bucket per retry.
+	RetryCost uint
+
+	// The cost to deduct from the RateLimiter's token bucket per retry caused
+	// by timeout error.
+	RetryTimeoutCost uint
+
+	// The cost to payback to the RateLimiter's token bucket for successful
+	// attempts.
+	NoRetryIncrement uint
+}
+
+// RateLimiter provides the interface for limiting the rate of attempt retries
+// allowed by the retryer.
+type RateLimiter interface {
+	GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error)
+	AddTokens(uint) error
+}
+
+// Standard is the standard retry pattern for the SDK. It uses a set of
+// retryable checks to determine of the failed attempt should be retried, and
+// what retry delay should be used.
+type Standard struct {
+	options StandardOptions
+
+	timeout   IsErrorTimeout
+	retryable IsErrorRetryable
+	backoff   BackoffDelayer
+}
+
+// NewStandard initializes a standard retry behavior with defaults that can be
+// overridden via functional options.
+func NewStandard(fnOpts ...func(*StandardOptions)) *Standard {
+	o := StandardOptions{
+		MaxAttempts: DefaultMaxAttempts,
+		MaxBackoff:  DefaultMaxBackoff,
+		Retryables:  append([]IsErrorRetryable{}, DefaultRetryables...),
+		Timeouts:    append([]IsErrorTimeout{}, DefaultTimeouts...),
+
+		RateLimiter:      ratelimit.NewTokenRateLimit(DefaultRetryRateTokens),
+		RetryCost:        DefaultRetryCost,
+		RetryTimeoutCost: DefaultRetryTimeoutCost,
+		NoRetryIncrement: DefaultNoRetryIncrement,
+	}
+	for _, fn := range fnOpts {
+		fn(&o)
+	}
+	if o.MaxAttempts <= 0 {
+		o.MaxAttempts = DefaultMaxAttempts
+	}
+
+	backoff := o.Backoff
+	if backoff == nil {
+		backoff = NewExponentialJitterBackoff(o.MaxBackoff)
+	}
+
+	return &Standard{
+		options:   o,
+		backoff:   backoff,
+		retryable: IsErrorRetryables(o.Retryables),
+		timeout:   IsErrorTimeouts(o.Timeouts),
+	}
+}
+
+// MaxAttempts returns the maximum number of attempts that can be made for a
+// request before failing.
+func (s *Standard) MaxAttempts() int {
+	return s.options.MaxAttempts
+}
+
+// IsErrorRetryable returns if the error is can be retried or not. Should not
+// consider the number of attempts made.
+func (s *Standard) IsErrorRetryable(err error) bool {
+	return s.retryable.IsErrorRetryable(err).Bool()
+}
+
+// RetryDelay returns the delay to use before another request attempt is made.
+func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) {
+	return s.backoff.BackoffDelay(attempt, err)
+}
+
+// GetAttemptToken returns the token to be released after then attempt completes.
+// The release token will add NoRetryIncrement to the RateLimiter token pool if
+// the attempt was successful. If the attempt failed, nothing will be done.
+func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) {
+	return s.GetInitialToken(), nil
+}
+
+// GetInitialToken returns a token for adding the NoRetryIncrement to the
+// RateLimiter token if the attempt completed successfully without error.
+//
+// InitialToken applies to result of the each attempt, including the first.
+// Whereas the RetryToken applies to the result of subsequent attempts.
+//
+// Deprecated: use GetAttemptToken instead.
+func (s *Standard) GetInitialToken() func(error) error {
+	return releaseToken(s.noRetryIncrement).release
+}
+
+func (s *Standard) noRetryIncrement() error {
+	return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement)
+}
+
+// GetRetryToken attempts to deduct the retry cost from the retry token pool.
+// Returning the token release function, or error.
+func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) {
+	cost := s.options.RetryCost
+
+	if s.timeout.IsErrorTimeout(opErr).Bool() {
+		cost = s.options.RetryTimeoutCost
+	}
+
+	fn, err := s.options.RateLimiter.GetToken(ctx, cost)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get rate limit token, %w", err)
+	}
+
+	return releaseToken(fn).release, nil
+}
+
+func nopRelease(error) error { return nil }
+
+type releaseToken func() error
+
+func (f releaseToken) release(err error) error {
+	if err != nil {
+		return nil
+	}
+
+	return f()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,60 @@
+package retry
+
+import (
+	"errors"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// IsErrorThrottle provides the interface of an implementation to determine if
+// a error response from an operation is a throttling error.
+type IsErrorThrottle interface {
+	IsErrorThrottle(error) aws.Ternary
+}
+
+// IsErrorThrottles is a collection of checks to determine of the error a
+// throttle error. Iterates through the checks and returns the state of
+// throttle if any check returns something other than unknown.
+type IsErrorThrottles []IsErrorThrottle
+
+// IsErrorThrottle returns if the error is a throttle error if any of the
+// checks in the list return a value other than unknown.
+func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary {
+	for _, re := range r {
+		if v := re.IsErrorThrottle(err); v != aws.UnknownTernary {
+			return v
+		}
+	}
+	return aws.UnknownTernary
+}
+
+// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface.
+type IsErrorThrottleFunc func(error) aws.Ternary
+
+// IsErrorThrottle returns if the error is a throttle error.
+func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary {
+	return fn(err)
+}
+
+// ThrottleErrorCode determines if an attempt should be retried based on the
+// API error code.
+type ThrottleErrorCode struct {
+	Codes map[string]struct{}
+}
+
+// IsErrorThrottle return if the error is a throttle error based on the error
+// codes. Returns unknown if the error doesn't have a code or it is unknown.
+func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary {
+	var v interface{ ErrorCode() string }
+
+	if !errors.As(err, &v) {
+		return aws.UnknownTernary
+	}
+
+	_, ok := r.Codes[v.ErrorCode()]
+	if !ok {
+		return aws.UnknownTernary
+	}
+
+	return aws.TrueTernary
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,52 @@
+package retry
+
+import (
+	"errors"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// IsErrorTimeout provides the interface of an implementation to determine if
+// a error matches.
+type IsErrorTimeout interface {
+	IsErrorTimeout(err error) aws.Ternary
+}
+
+// IsErrorTimeouts is a collection of checks to determine of the error is
+// retryable. Iterates through the checks and returns the state of retryable
+// if any check returns something other than unknown.
+type IsErrorTimeouts []IsErrorTimeout
+
+// IsErrorTimeout returns if the error is retryable if any of the checks in
+// the list return a value other than unknown.
+func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary {
+	for _, t := range ts {
+		if v := t.IsErrorTimeout(err); v != aws.UnknownTernary {
+			return v
+		}
+	}
+	return aws.UnknownTernary
+}
+
+// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface.
+type IsErrorTimeoutFunc func(error) aws.Ternary
+
+// IsErrorTimeout returns if the error is retryable.
+func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary {
+	return fn(err)
+}
+
+// TimeouterError provides the IsErrorTimeout implementation for determining if
+// an error is a timeout based on type with the Timeout method.
+type TimeouterError struct{}
+
+// IsErrorTimeout returns if the error is a timeout error.
+func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary {
+	var v interface{ Timeout() bool }
+
+	if !errors.As(err, &v) {
+		return aws.UnknownTernary
+	}
+
+	return aws.BoolTernary(v.Timeout())
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,127 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+	"time"
+)
+
+// RetryMode provides the mode the API client will use to create a retryer
+// based on.
+type RetryMode string
+
+const (
+	// RetryModeStandard model provides rate limited retry attempts with
+	// exponential backoff delay.
+	RetryModeStandard RetryMode = "standard"
+
+	// RetryModeAdaptive model provides attempt send rate limiting on throttle
+	// responses in addition to standard mode's retry rate limiting.
+	//
+	// Adaptive retry mode is experimental and is subject to change in the
+	// future.
+	RetryModeAdaptive RetryMode = "adaptive"
+)
+
+// ParseRetryMode attempts to parse a RetryMode from the given string.
+// Returning error if the value is not a known RetryMode.
+func ParseRetryMode(v string) (mode RetryMode, err error) {
+	switch v {
+	case "standard":
+		return RetryModeStandard, nil
+	case "adaptive":
+		return RetryModeAdaptive, nil
+	default:
+		return mode, fmt.Errorf("unknown RetryMode, %v", v)
+	}
+}
+
+func (m RetryMode) String() string { return string(m) }
+
+// Retryer is an interface to determine if a given error from a
+// attempt should be retried, and if so what backoff delay to apply. The
+// default implementation used by most services is the retry package's Standard
+// type. Which contains basic retry logic using exponential backoff.
+type Retryer interface {
+	// IsErrorRetryable returns if the failed attempt is retryable. This check
+	// should determine if the error can be retried, or if the error is
+	// terminal.
+	IsErrorRetryable(error) bool
+
+	// MaxAttempts returns the maximum number of attempts that can be made for
+	// an attempt before failing. A value of 0 implies that the attempt should
+	// be retried until it succeeds if the errors are retryable.
+	MaxAttempts() int
+
+	// RetryDelay returns the delay that should be used before retrying the
+	// attempt. Will return error if the delay could not be determined.
+	RetryDelay(attempt int, opErr error) (time.Duration, error)
+
+	// GetRetryToken attempts to deduct the retry cost from the retry token pool.
+	// Returning the token release function, or error.
+	GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error)
+
+	// GetInitialToken returns the initial attempt token that can increment the
+	// retry token pool if the attempt is successful.
+	GetInitialToken() (releaseToken func(error) error)
+}
+
+// RetryerV2 is an interface to determine if a given error from an attempt
+// should be retried, and if so what backoff delay to apply. The default
+// implementation used by most services is the retry package's Standard type.
+// Which contains basic retry logic using exponential backoff.
+//
+// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken
+// method in favor of GetAttemptToken which takes a context, and can return an error.
+//
+// The SDK's retry package's Attempt middleware, and utilities will always
+// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if
+// GetAttemptToken is not implemented.
+type RetryerV2 interface {
+	Retryer
+
+	// GetInitialToken returns the initial attempt token that can increment the
+	// retry token pool if the attempt is successful.
+	//
+	// Deprecated: This method does not provide a way to block using Context,
+	// nor can it return an error. Use RetryerV2, and GetAttemptToken instead.
+	GetInitialToken() (releaseToken func(error) error)
+
+	// GetAttemptToken returns the send token that can be used to rate limit
+	// attempt calls. Will be used by the SDK's retry package's Attempt
+	// middleware to get a send token prior to calling the temp and releasing
+	// the send token after the attempt has been made.
+	GetAttemptToken(context.Context) (func(error) error, error)
+}
+
+// NopRetryer provides a RequestRetryDecider implementation that will flag
+// all attempt errors as not retryable, with a max attempts of 1.
+type NopRetryer struct{}
+
+// IsErrorRetryable returns false for all error values.
+func (NopRetryer) IsErrorRetryable(error) bool { return false }
+
+// MaxAttempts always returns 1 for the original attempt.
+func (NopRetryer) MaxAttempts() int { return 1 }
+
+// RetryDelay is not valid for the NopRetryer. Will always return error.
+func (NopRetryer) RetryDelay(int, error) (time.Duration, error) {
+	return 0, fmt.Errorf("not retrying any attempt errors")
+}
+
+// GetRetryToken returns a stub function that does nothing.
+func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) {
+	return nopReleaseToken, nil
+}
+
+// GetInitialToken returns a stub function that does nothing.
+func (NopRetryer) GetInitialToken() func(error) error {
+	return nopReleaseToken
+}
+
+// GetAttemptToken returns a stub function that does nothing.
+func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) {
+	return nopReleaseToken, nil
+}
+
+func nopReleaseToken(error) error { return nil }
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,14 @@
+package aws
+
+// ExecutionEnvironmentID is the AWS execution environment runtime identifier.
+type ExecutionEnvironmentID string
+
+// RuntimeEnvironment is a collection of values that are determined at runtime
+// based on the environment that the SDK is executing in. Some of these values
+// may or may not be present based on the executing environment and certain SDK
+// configuration properties that drive whether these values are populated..
+type RuntimeEnvironment struct {
+	EnvironmentIdentifier     ExecutionEnvironmentID
+	Region                    string
+	EC2InstanceMetadataRegion string
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,115 @@
+package v4
+
+import (
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+func lookupKey(service, region string) string {
+	var s strings.Builder
+	s.Grow(len(region) + len(service) + 3)
+	s.WriteString(region)
+	s.WriteRune('/')
+	s.WriteString(service)
+	return s.String()
+}
+
+type derivedKey struct {
+	AccessKey  string
+	Date       time.Time
+	Credential []byte
+}
+
+type derivedKeyCache struct {
+	values map[string]derivedKey
+	mutex  sync.RWMutex
+}
+
+func newDerivedKeyCache() derivedKeyCache {
+	return derivedKeyCache{
+		values: make(map[string]derivedKey),
+	}
+}
+
+func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte {
+	key := lookupKey(service, region)
+	s.mutex.RLock()
+	if cred, ok := s.get(key, credentials, signingTime.Time); ok {
+		s.mutex.RUnlock()
+		return cred
+	}
+	s.mutex.RUnlock()
+
+	s.mutex.Lock()
+	if cred, ok := s.get(key, credentials, signingTime.Time); ok {
+		s.mutex.Unlock()
+		return cred
+	}
+	cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime)
+	entry := derivedKey{
+		AccessKey:  credentials.AccessKeyID,
+		Date:       signingTime.Time,
+		Credential: cred,
+	}
+	s.values[key] = entry
+	s.mutex.Unlock()
+
+	return cred
+}
+
+func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) {
+	cacheEntry, ok := s.retrieveFromCache(key)
+	if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) {
+		return cacheEntry.Credential, true
+	}
+	return nil, false
+}
+
+func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) {
+	if v, ok := s.values[key]; ok {
+		return v, true
+	}
+	return derivedKey{}, false
+}
+
+// SigningKeyDeriver derives a signing key from a set of credentials
+type SigningKeyDeriver struct {
+	cache derivedKeyCache
+}
+
+// NewSigningKeyDeriver returns a new SigningKeyDeriver
+func NewSigningKeyDeriver() *SigningKeyDeriver {
+	return &SigningKeyDeriver{
+		cache: newDerivedKeyCache(),
+	}
+}
+
+// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing.
+func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte {
+	return k.cache.Get(credential, service, region, signingTime)
+}
+
+func deriveKey(secret, service, region string, t SigningTime) []byte {
+	hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat()))
+	hmacRegion := HMACSHA256(hmacDate, []byte(region))
+	hmacService := HMACSHA256(hmacRegion, []byte(service))
+	return HMACSHA256(hmacService, []byte("aws4_request"))
+}
+
+func isSameDay(x, y time.Time) bool {
+	xYear, xMonth, xDay := x.Date()
+	yYear, yMonth, yDay := y.Date()
+
+	if xYear != yYear {
+		return false
+	}
+
+	if xMonth != yMonth {
+		return false
+	}
+
+	return xDay == yDay
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,40 @@
+package v4
+
+// Signature Version 4 (SigV4) Constants
+const (
+	// EmptyStringSHA256 is the hex encoded sha256 value of an empty string
+	EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+
+	// UnsignedPayload indicates that the request payload body is unsigned
+	UnsignedPayload = "UNSIGNED-PAYLOAD"
+
+	// AmzAlgorithmKey indicates the signing algorithm
+	AmzAlgorithmKey = "X-Amz-Algorithm"
+
+	// AmzSecurityTokenKey indicates the security token to be used with temporary credentials
+	AmzSecurityTokenKey = "X-Amz-Security-Token"
+
+	// AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
+	AmzDateKey = "X-Amz-Date"
+
+	// AmzCredentialKey is the access key ID and credential scope
+	AmzCredentialKey = "X-Amz-Credential"
+
+	// AmzSignedHeadersKey is the set of headers signed for the request
+	AmzSignedHeadersKey = "X-Amz-SignedHeaders"
+
+	// AmzSignatureKey is the query parameter to store the SigV4 signature
+	AmzSignatureKey = "X-Amz-Signature"
+
+	// TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
+	TimeFormat = "20060102T150405Z"
+
+	// ShortTimeFormat is the shorten time format used in the credential scope
+	ShortTimeFormat = "20060102"
+
+	// ContentSHAKey is the SHA256 of request body
+	ContentSHAKey = "X-Amz-Content-Sha256"
+
+	// StreamingEventsPayload indicates that the request payload body is a signed event stream.
+	StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS"
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,82 @@
+package v4
+
+import (
+	sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings"
+)
+
+// Rules houses a set of Rule needed for validation of a
+// string value
+type Rules []Rule
+
+// Rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that Rule
+type Rule interface {
+	IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r Rules) IsValid(value string) bool {
+	for _, rule := range r {
+		if rule.IsValid(value) {
+			return true
+		}
+	}
+	return false
+}
+
+// MapRule generic Rule for maps
+type MapRule map[string]struct{}
+
+// IsValid for the map Rule satisfies whether it exists in the map
+func (m MapRule) IsValid(value string) bool {
+	_, ok := m[value]
+	return ok
+}
+
+// AllowList is a generic Rule for include listing
+type AllowList struct {
+	Rule
+}
+
+// IsValid for AllowList checks if the value is within the AllowList
+func (w AllowList) IsValid(value string) bool {
+	return w.Rule.IsValid(value)
+}
+
+// ExcludeList is a generic Rule for exclude listing
+type ExcludeList struct {
+	Rule
+}
+
+// IsValid for AllowList checks if the value is within the AllowList
+func (b ExcludeList) IsValid(value string) bool {
+	return !b.Rule.IsValid(value)
+}
+
+// Patterns is a list of strings to match against
+type Patterns []string
+
+// IsValid for Patterns checks each pattern and returns if a match has
+// been found
+func (p Patterns) IsValid(value string) bool {
+	for _, pattern := range p {
+		if sdkstrings.HasPrefixFold(value, pattern) {
+			return true
+		}
+	}
+	return false
+}
+
+// InclusiveRules rules allow for rules to depend on one another
+type InclusiveRules []Rule
+
+// IsValid will return true if all rules are true
+func (r InclusiveRules) IsValid(value string) bool {
+	for _, rule := range r {
+		if !rule.IsValid(value) {
+			return false
+		}
+	}
+	return true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,70 @@
+package v4
+
+// IgnoredHeaders is a list of headers that are ignored during signing
+var IgnoredHeaders = Rules{
+	ExcludeList{
+		MapRule{
+			"Authorization":   struct{}{},
+			"User-Agent":      struct{}{},
+			"X-Amzn-Trace-Id": struct{}{},
+			"Expect":          struct{}{},
+		},
+	},
+}
+
+// RequiredSignedHeaders is a allow list for Build canonical headers.
+var RequiredSignedHeaders = Rules{
+	AllowList{
+		MapRule{
+			"Cache-Control":                         struct{}{},
+			"Content-Disposition":                   struct{}{},
+			"Content-Encoding":                      struct{}{},
+			"Content-Language":                      struct{}{},
+			"Content-Md5":                           struct{}{},
+			"Content-Type":                          struct{}{},
+			"Expires":                               struct{}{},
+			"If-Match":                              struct{}{},
+			"If-Modified-Since":                     struct{}{},
+			"If-None-Match":                         struct{}{},
+			"If-Unmodified-Since":                   struct{}{},
+			"Range":                                 struct{}{},
+			"X-Amz-Acl":                             struct{}{},
+			"X-Amz-Copy-Source":                     struct{}{},
+			"X-Amz-Copy-Source-If-Match":            struct{}{},
+			"X-Amz-Copy-Source-If-Modified-Since":   struct{}{},
+			"X-Amz-Copy-Source-If-None-Match":       struct{}{},
+			"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+			"X-Amz-Copy-Source-Range":               struct{}{},
+			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key":       struct{}{},
+			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5":   struct{}{},
+			"X-Amz-Grant-Full-control":                                    struct{}{},
+			"X-Amz-Grant-Read":                                            struct{}{},
+			"X-Amz-Grant-Read-Acp":                                        struct{}{},
+			"X-Amz-Grant-Write":                                           struct{}{},
+			"X-Amz-Grant-Write-Acp":                                       struct{}{},
+			"X-Amz-Metadata-Directive":                                    struct{}{},
+			"X-Amz-Mfa":                                                   struct{}{},
+			"X-Amz-Request-Payer":                                         struct{}{},
+			"X-Amz-Server-Side-Encryption":                                struct{}{},
+			"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id":                 struct{}{},
+			"X-Amz-Server-Side-Encryption-Context":                        struct{}{},
+			"X-Amz-Server-Side-Encryption-Customer-Algorithm":             struct{}{},
+			"X-Amz-Server-Side-Encryption-Customer-Key":                   struct{}{},
+			"X-Amz-Server-Side-Encryption-Customer-Key-Md5":               struct{}{},
+			"X-Amz-Storage-Class":                                         struct{}{},
+			"X-Amz-Website-Redirect-Location":                             struct{}{},
+			"X-Amz-Content-Sha256":                                        struct{}{},
+			"X-Amz-Tagging":                                               struct{}{},
+		},
+	},
+	Patterns{"X-Amz-Object-Lock-"},
+	Patterns{"X-Amz-Meta-"},
+}
+
+// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value
+// represents whether or not it is a pattern.
+var AllowedQueryHoisting = InclusiveRules{
+	ExcludeList{RequiredSignedHeaders},
+	Patterns{"X-Amz-"},
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,13 @@
+package v4
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+)
+
+// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
+func HMACSHA256(key []byte, data []byte) []byte {
+	hash := hmac.New(sha256.New, key)
+	hash.Write(data)
+	return hash.Sum(nil)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,75 @@
+package v4
+
+import (
+	"net/http"
+	"strings"
+)
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+	host := getHost(r)
+	port := portOnly(host)
+	if port != "" && isDefaultPort(r.URL.Scheme, port) {
+		r.Host = stripPort(host)
+	}
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+	if r.Host != "" {
+		return r.Host
+	}
+
+	return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+	colon := strings.IndexByte(hostport, ':')
+	if colon == -1 {
+		return hostport
+	}
+	if i := strings.IndexByte(hostport, ']'); i != -1 {
+		return strings.TrimPrefix(hostport[:i], "[")
+	}
+	return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+	colon := strings.IndexByte(hostport, ':')
+	if colon == -1 {
+		return ""
+	}
+	if i := strings.Index(hostport, "]:"); i != -1 {
+		return hostport[i+len("]:"):]
+	}
+	if strings.Contains(hostport, "]") {
+		return ""
+	}
+	return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+	if port == "" {
+		return true
+	}
+
+	lowerCaseScheme := strings.ToLower(scheme)
+	if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+		return true
+	}
+
+	return false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,13 @@
+package v4
+
+import "strings"
+
+// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope
+func BuildCredentialScope(signingTime SigningTime, region, service string) string {
+	return strings.Join([]string{
+		signingTime.ShortTimeFormat(),
+		region,
+		service,
+		"aws4_request",
+	}, "/")
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,36 @@
+package v4
+
+import "time"
+
+// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
+type SigningTime struct {
+	time.Time
+	timeFormat      string
+	shortTimeFormat string
+}
+
+// NewSigningTime creates a new SigningTime given a time.Time
+func NewSigningTime(t time.Time) SigningTime {
+	return SigningTime{
+		Time: t,
+	}
+}
+
+// TimeFormat provides a time formatted in the X-Amz-Date format.
+func (m *SigningTime) TimeFormat() string {
+	return m.format(&m.timeFormat, TimeFormat)
+}
+
+// ShortTimeFormat provides a time formatted of 20060102.
+func (m *SigningTime) ShortTimeFormat() string {
+	return m.format(&m.shortTimeFormat, ShortTimeFormat)
+}
+
+func (m *SigningTime) format(target *string, format string) string {
+	if len(*target) > 0 {
+		return *target
+	}
+	v := m.Time.Format(format)
+	*target = v
+	return v
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,80 @@
+package v4
+
+import (
+	"net/url"
+	"strings"
+)
+
+const doubleSpace = "  "
+
+// StripExcessSpaces will rewrite the passed in slice's string values to not
+// contain multiple side-by-side spaces.
+func StripExcessSpaces(str string) string {
+	var j, k, l, m, spaces int
+	// Trim trailing spaces
+	for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+	}
+
+	// Trim leading spaces
+	for k = 0; k < j && str[k] == ' '; k++ {
+	}
+	str = str[k : j+1]
+
+	// Strip multiple spaces.
+	j = strings.Index(str, doubleSpace)
+	if j < 0 {
+		return str
+	}
+
+	buf := []byte(str)
+	for k, m, l = j, j, len(buf); k < l; k++ {
+		if buf[k] == ' ' {
+			if spaces == 0 {
+				// First space.
+				buf[m] = buf[k]
+				m++
+			}
+			spaces++
+		} else {
+			// End of multiple spaces.
+			spaces = 0
+			buf[m] = buf[k]
+			m++
+		}
+	}
+
+	return string(buf[:m])
+}
+
+// GetURIPath returns the escaped URI component from the provided URL.
+func GetURIPath(u *url.URL) string {
+	var uriPath string
+
+	if len(u.Opaque) > 0 {
+		const schemeSep, pathSep, queryStart = "//", "/", "?"
+
+		opaque := u.Opaque
+		// Cut off the query string if present.
+		if idx := strings.Index(opaque, queryStart); idx >= 0 {
+			opaque = opaque[:idx]
+		}
+
+		// Cutout the scheme separator if present.
+		if strings.Index(opaque, schemeSep) == 0 {
+			opaque = opaque[len(schemeSep):]
+		}
+
+		// capture URI path starting with first path separator.
+		if idx := strings.Index(opaque, pathSep); idx >= 0 {
+			uriPath = opaque[idx:]
+		}
+	} else {
+		uriPath = u.EscapedPath()
+	}
+
+	if len(uriPath) == 0 {
+		uriPath = "/"
+	}
+
+	return uriPath
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,414 @@
+package v4
+
+import (
+	"context"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+	internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const computePayloadHashMiddlewareID = "ComputePayloadHash"
+
+// HashComputationError indicates an error occurred while computing the signing hash
+type HashComputationError struct {
+	Err error
+}
+
+// Error is the error message
+func (e *HashComputationError) Error() string {
+	return fmt.Sprintf("failed to compute payload hash: %v", e.Err)
+}
+
+// Unwrap returns the underlying error if one is set
+func (e *HashComputationError) Unwrap() error {
+	return e.Err
+}
+
+// SigningError indicates an error condition occurred while performing SigV4 signing
+type SigningError struct {
+	Err error
+}
+
+func (e *SigningError) Error() string {
+	return fmt.Sprintf("failed to sign request: %v", e.Err)
+}
+
+// Unwrap returns the underlying error cause
+func (e *SigningError) Unwrap() error {
+	return e.Err
+}
+
+// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that
+// switches between unsigned and signed payload based on TLS state for request.
+// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth.
+// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection .
+//
+// Usage example -
+// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to
+// dynamically switch between unsigned and signed payload based on TLS state for request.
+func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error {
+	_, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{})
+	return err
+}
+
+// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware.
+type dynamicPayloadSigningMiddleware struct {
+}
+
+// ID returns the resolver identifier
+func (m *dynamicPayloadSigningMiddleware) ID() string {
+	return computePayloadHashMiddlewareID
+}
+
+// HandleFinalize delegates SHA256 computation according to whether the request
+// is TLS-enabled.
+func (m *dynamicPayloadSigningMiddleware) HandleFinalize(
+	ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if req.IsHTTPS() {
+		return (&UnsignedPayload{}).HandleFinalize(ctx, in, next)
+	}
+	return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next)
+}
+
+// UnsignedPayload sets the SigV4 request payload hash to unsigned.
+//
+// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
+// stored in the context. (e.g. application pre-computed SHA256 before making
+// API call).
+//
+// This middleware does not check the X-Amz-Content-Sha256 header, if that
+// header is serialized a middleware must translate it into the context.
+type UnsignedPayload struct{}
+
+// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation
+// middleware stack
+func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+// ID returns the unsignedPayload identifier
+func (m *UnsignedPayload) ID() string {
+	return computePayloadHashMiddlewareID
+}
+
+// HandleFinalize sets the payload hash magic value to the unsigned sentinel.
+func (m *UnsignedPayload) HandleFinalize(
+	ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	if GetPayloadHash(ctx) == "" {
+		ctx = SetPayloadHash(ctx, v4Internal.UnsignedPayload)
+	}
+	return next.HandleFinalize(ctx, in)
+}
+
+// ComputePayloadSHA256 computes SHA256 payload hash to sign.
+//
+// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
+// stored in the context. (e.g. application pre-computed SHA256 before making
+// API call).
+//
+// This middleware does not check the X-Amz-Content-Sha256 header, if that
+// header is serialized a middleware must translate it into the context.
+type ComputePayloadSHA256 struct{}
+
+// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the
+// operation middleware stack
+func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the
+// operation middleware stack
+func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error {
+	_, err := stack.Finalize.Remove(computePayloadHashMiddlewareID)
+	return err
+}
+
+// ID is the middleware name
+func (m *ComputePayloadSHA256) ID() string {
+	return computePayloadHashMiddlewareID
+}
+
+// HandleFinalize computes the payload hash for the request, storing it to the
+// context. This is a no-op if a caller has previously set that value.
+func (m *ComputePayloadSHA256) HandleFinalize(
+	ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	if GetPayloadHash(ctx) != "" {
+		return next.HandleFinalize(ctx, in)
+	}
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &HashComputationError{
+			Err: fmt.Errorf("unexpected request middleware type %T", in.Request),
+		}
+	}
+
+	hash := sha256.New()
+	if stream := req.GetStream(); stream != nil {
+		_, err = io.Copy(hash, stream)
+		if err != nil {
+			return out, metadata, &HashComputationError{
+				Err: fmt.Errorf("failed to compute payload hash, %w", err),
+			}
+		}
+
+		if err := req.RewindStream(); err != nil {
+			return out, metadata, &HashComputationError{
+				Err: fmt.Errorf("failed to seek body to start, %w", err),
+			}
+		}
+	}
+
+	ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil)))
+
+	return next.HandleFinalize(ctx, in)
+}
+
+// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the
+// ComputePayloadSHA256 middleware with the UnsignedPayload middleware.
+//
+// Use this to disable computing the Payload SHA256 checksum and instead use
+// UNSIGNED-PAYLOAD for the SHA256 value.
+func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error {
+	_, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{})
+	return err
+}
+
+// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to
+// the Payload hash stored in the context.
+type ContentSHA256Header struct{}
+
+// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the
+// operation middleware stack
+func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After)
+}
+
+// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware
+// from the operation middleware stack
+func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
+	_, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID())
+	return err
+}
+
+// ID returns the ContentSHA256HeaderMiddleware identifier
+func (m *ContentSHA256Header) ID() string {
+	return "SigV4ContentSHA256Header"
+}
+
+// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash
+// stored in the context.
+func (m *ContentSHA256Header) HandleFinalize(
+	ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)}
+	}
+
+	req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx))
+	return next.HandleFinalize(ctx, in)
+}
+
+// SignHTTPRequestMiddlewareOptions is the configuration options for
+// [SignHTTPRequestMiddleware].
+//
+// Deprecated: [SignHTTPRequestMiddleware] is deprecated.
+type SignHTTPRequestMiddlewareOptions struct {
+	CredentialsProvider aws.CredentialsProvider
+	Signer              HTTPSigner
+	LogSigning          bool
+}
+
+// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4
+// HTTP Signing.
+//
+// Deprecated: AWS service clients no longer use this middleware. Signing as an
+// SDK operation is now performed through an internal per-service middleware
+// which opaquely selects and uses the signer from the resolved auth scheme.
+type SignHTTPRequestMiddleware struct {
+	credentialsProvider aws.CredentialsProvider
+	signer              HTTPSigner
+	logSigning          bool
+}
+
+// NewSignHTTPRequestMiddleware constructs a [SignHTTPRequestMiddleware] using
+// the given [Signer] for signing requests.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
+func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware {
+	return &SignHTTPRequestMiddleware{
+		credentialsProvider: options.CredentialsProvider,
+		signer:              options.Signer,
+		logSigning:          options.LogSigning,
+	}
+}
+
+// ID is the SignHTTPRequestMiddleware identifier.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
+func (s *SignHTTPRequestMiddleware) ID() string {
+	return "Signing"
+}
+
+// HandleFinalize will take the provided input and sign the request using the
+// SigV4 authentication scheme.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
+func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	if !haveCredentialProvider(s.credentialsProvider) {
+		return next.HandleFinalize(ctx, in)
+	}
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)}
+	}
+
+	signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx)
+	payloadHash := GetPayloadHash(ctx)
+	if len(payloadHash) == 0 {
+		return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")}
+	}
+
+	credentials, err := s.credentialsProvider.Retrieve(ctx)
+	if err != nil {
+		return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)}
+	}
+
+	signerOptions := []func(o *SignerOptions){
+		func(o *SignerOptions) {
+			o.Logger = middleware.GetLogger(ctx)
+			o.LogSigning = s.logSigning
+		},
+	}
+
+	// existing DisableURIPathEscaping is equivalent in purpose
+	// to authentication scheme property DisableDoubleEncoding
+	disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx)
+	if overridden {
+		signerOptions = append(signerOptions, func(o *SignerOptions) {
+			o.DisableURIPathEscaping = disableDoubleEncoding
+		})
+	}
+
+	err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...)
+	if err != nil {
+		return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)}
+	}
+
+	ctx = awsmiddleware.SetSigningCredentials(ctx, credentials)
+
+	return next.HandleFinalize(ctx, in)
+}
+
+// StreamingEventsPayload signs input event stream messages.
+type StreamingEventsPayload struct{}
+
+// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack.
+func AddStreamingEventsPayload(stack *middleware.Stack) error {
+	return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before)
+}
+
+// ID identifies the middleware.
+func (s *StreamingEventsPayload) ID() string {
+	return computePayloadHashMiddlewareID
+}
+
+// HandleFinalize marks the input stream to be signed with SigV4.
+func (s *StreamingEventsPayload) HandleFinalize(
+	ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	contentSHA := GetPayloadHash(ctx)
+	if len(contentSHA) == 0 {
+		contentSHA = v4Internal.StreamingEventsPayload
+	}
+
+	ctx = SetPayloadHash(ctx, contentSHA)
+
+	return next.HandleFinalize(ctx, in)
+}
+
+// GetSignedRequestSignature attempts to extract the signature of the request.
+// Returning an error if the request is unsigned, or unable to extract the
+// signature.
+func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
+	const authHeaderSignatureElem = "Signature="
+
+	if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
+		ps := strings.Split(auth, ", ")
+		for _, p := range ps {
+			if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
+				sig := p[len(authHeaderSignatureElem):]
+				if len(sig) == 0 {
+					return nil, fmt.Errorf("invalid request signature authorization header")
+				}
+				return hex.DecodeString(sig)
+			}
+		}
+	}
+
+	if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 {
+		return hex.DecodeString(sig)
+	}
+
+	return nil, fmt.Errorf("request not signed")
+}
+
+func haveCredentialProvider(p aws.CredentialsProvider) bool {
+	if p == nil {
+		return false
+	}
+
+	return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil))
+}
+
+type payloadHashKey struct{}
+
+// GetPayloadHash retrieves the payload hash to use for signing
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetPayloadHash(ctx context.Context) (v string) {
+	v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string)
+	return v
+}
+
+// SetPayloadHash sets the payload hash to be used for signing the request
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetPayloadHash(ctx context.Context, hash string) context.Context {
+	return middleware.WithStackValue(ctx, payloadHashKey{}, hash)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,127 @@
+package v4
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/smithy-go/middleware"
+	smithyHTTP "github.com/aws/smithy-go/transport/http"
+)
+
+// HTTPPresigner is an interface to a SigV4 signer that can sign create a
+// presigned URL for a HTTP requests.
+type HTTPPresigner interface {
+	PresignHTTP(
+		ctx context.Context, credentials aws.Credentials, r *http.Request,
+		payloadHash string, service string, region string, signingTime time.Time,
+		optFns ...func(*SignerOptions),
+	) (url string, signedHeader http.Header, err error)
+}
+
+// PresignedHTTPRequest provides the URL and signed headers that are included
+// in the presigned URL.
+type PresignedHTTPRequest struct {
+	URL          string
+	Method       string
+	SignedHeader http.Header
+}
+
+// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware.
+type PresignHTTPRequestMiddlewareOptions struct {
+	CredentialsProvider aws.CredentialsProvider
+	Presigner           HTTPPresigner
+	LogSigning          bool
+}
+
+// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a
+// presigned URL for an HTTP request.
+//
+// Will short circuit the middleware stack and not forward onto the next
+// Finalize handler.
+type PresignHTTPRequestMiddleware struct {
+	credentialsProvider aws.CredentialsProvider
+	presigner           HTTPPresigner
+	logSigning          bool
+}
+
+// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware
+// initialized with the presigner.
+func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware {
+	return &PresignHTTPRequestMiddleware{
+		credentialsProvider: options.CredentialsProvider,
+		presigner:           options.Presigner,
+		logSigning:          options.LogSigning,
+	}
+}
+
+// ID provides the middleware ID.
+func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" }
+
+// HandleFinalize will take the provided input and create a presigned url for
+// the http request using the SigV4 presign authentication scheme.
+//
+// Since the signed request is not a valid HTTP request
+func (s *PresignHTTPRequestMiddleware) HandleFinalize(
+	ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyHTTP.Request)
+	if !ok {
+		return out, metadata, &SigningError{
+			Err: fmt.Errorf("unexpected request middleware type %T", in.Request),
+		}
+	}
+
+	httpReq := req.Build(ctx)
+	if !haveCredentialProvider(s.credentialsProvider) {
+		out.Result = &PresignedHTTPRequest{
+			URL:          httpReq.URL.String(),
+			Method:       httpReq.Method,
+			SignedHeader: http.Header{},
+		}
+
+		return out, metadata, nil
+	}
+
+	signingName := awsmiddleware.GetSigningName(ctx)
+	signingRegion := awsmiddleware.GetSigningRegion(ctx)
+	payloadHash := GetPayloadHash(ctx)
+	if len(payloadHash) == 0 {
+		return out, metadata, &SigningError{
+			Err: fmt.Errorf("computed payload hash missing from context"),
+		}
+	}
+
+	credentials, err := s.credentialsProvider.Retrieve(ctx)
+	if err != nil {
+		return out, metadata, &SigningError{
+			Err: fmt.Errorf("failed to retrieve credentials: %w", err),
+		}
+	}
+
+	u, h, err := s.presigner.PresignHTTP(ctx, credentials,
+		httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(),
+		func(o *SignerOptions) {
+			o.Logger = middleware.GetLogger(ctx)
+			o.LogSigning = s.logSigning
+		})
+	if err != nil {
+		return out, metadata, &SigningError{
+			Err: fmt.Errorf("failed to sign http request, %w", err),
+		}
+	}
+
+	out.Result = &PresignedHTTPRequest{
+		URL:          u,
+		Method:       httpReq.Method,
+		SignedHeader: h,
+	}
+
+	return out, metadata, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,86 @@
+package v4
+
+import (
+	"context"
+	"crypto/sha256"
+	"encoding/hex"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+	"strings"
+	"time"
+)
+
+// EventStreamSigner is an AWS EventStream protocol signer.
+type EventStreamSigner interface {
+	GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
+}
+
+// StreamSignerOptions is the configuration options for StreamSigner.
+type StreamSignerOptions struct{}
+
+// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
+type StreamSigner struct {
+	options StreamSignerOptions
+
+	credentials aws.Credentials
+	service     string
+	region      string
+
+	prevSignature []byte
+
+	signingKeyDeriver *v4Internal.SigningKeyDeriver
+}
+
+// NewStreamSigner returns a new AWS EventStream protocol signer.
+func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
+	o := StreamSignerOptions{}
+
+	for _, fn := range optFns {
+		fn(&o)
+	}
+
+	return &StreamSigner{
+		options:           o,
+		credentials:       credentials,
+		service:           service,
+		region:            region,
+		signingKeyDeriver: v4Internal.NewSigningKeyDeriver(),
+		prevSignature:     seedSignature,
+	}
+}
+
+// GetSignature signs the provided header and payload bytes.
+func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) {
+	options := s.options
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	prevSignature := s.prevSignature
+
+	st := v4Internal.NewSigningTime(signingTime)
+
+	sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
+
+	scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
+
+	stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
+
+	signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
+	s.prevSignature = signature
+
+	return signature, nil
+}
+
+func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
+	hash := sha256.New()
+	return strings.Join([]string{
+		"AWS4-HMAC-SHA256-PAYLOAD",
+		signingTime.TimeFormat(),
+		credentialScope,
+		hex.EncodeToString(previousSignature),
+		hex.EncodeToString(makeHash(hash, headers)),
+		hex.EncodeToString(makeHash(hash, payload)),
+	}, "\n")
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,559 @@
+// Package v4 implements the AWS signature version 4 algorithm (commonly known
+// as SigV4).
+//
+// For more information about SigV4, see [Signing AWS API requests] in the IAM
+// user guide.
+//
+// While this implementation CAN work in an external context, it is developed
+// primarily for SDK use and you may encounter fringe behaviors around header
+// canonicalization.
+//
+// # Pre-escaping a request URI
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// component must be the escaped form of the HTTP request's path.
+//
+// The Go HTTP client will perform escaping automatically on the HTTP request.
+// This may cause signature validation errors because the request differs from
+// the URI path or query from which the signature was generated.
+//
+// Because of this, we recommend that you explicitly escape the request when
+// using this signer outside of the SDK to prevent possible signature mismatch.
+// This can be done by setting URL.Opaque on the request. The signer will
+// prefer that value, falling back to the return of URL.EscapedPath if unset.
+//
+// When setting URL.Opaque you must do so in the form of:
+//
+//	"//<hostname>/<path>"
+//
+//	// e.g.
+//	"//example.com/some/path"
+//
+// The leading "//" and hostname are required or the escaping will not work
+// correctly.
+//
+// The TestStandaloneSign unit test provides a complete example of using the
+// signer outside of the SDK and pre-escaping the URI path.
+//
+// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
+package v4
+
+import (
+	"context"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"hash"
+	"net/http"
+	"net/textproto"
+	"net/url"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+	"github.com/aws/smithy-go/encoding/httpbinding"
+	"github.com/aws/smithy-go/logging"
+)
+
+const (
+	signingAlgorithm    = "AWS4-HMAC-SHA256"
+	authorizationHeader = "Authorization"
+
+	// Version of signing v4
+	Version = "SigV4"
+)
+
+// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
+type HTTPSigner interface {
+	SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error
+}
+
+type keyDerivator interface {
+	DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte
+}
+
+// SignerOptions is the SigV4 Signer options.
+type SignerOptions struct {
+	// Disables the Signer's moving HTTP header key/value pairs from the HTTP
+	// request header to the request's query string. This is most commonly used
+	// with pre-signed requests preventing headers from being added to the
+	// request's query string.
+	DisableHeaderHoisting bool
+
+	// Disables the automatic escaping of the URI path of the request for the
+	// siganture's canonical string's path. For services that do not need additional
+	// escaping then use this to disable the signer escaping the path.
+	//
+	// S3 is an example of a service that does not need additional escaping.
+	//
+	// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+	DisableURIPathEscaping bool
+
+	// The logger to send log messages to.
+	Logger logging.Logger
+
+	// Enable logging of signed requests.
+	// This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
+	// presigned URL.
+	LogSigning bool
+
+	// Disables setting the session token on the request as part of signing
+	// through X-Amz-Security-Token. This is needed for variations of v4 that
+	// present the token elsewhere.
+	DisableSessionToken bool
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+	options      SignerOptions
+	keyDerivator keyDerivator
+}
+
+// NewSigner returns a new SigV4 Signer
+func NewSigner(optFns ...func(signer *SignerOptions)) *Signer {
+	options := SignerOptions{}
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()}
+}
+
+type httpSigner struct {
+	Request      *http.Request
+	ServiceName  string
+	Region       string
+	Time         v4Internal.SigningTime
+	Credentials  aws.Credentials
+	KeyDerivator keyDerivator
+	IsPreSign    bool
+
+	PayloadHash string
+
+	DisableHeaderHoisting  bool
+	DisableURIPathEscaping bool
+	DisableSessionToken    bool
+}
+
+func (s *httpSigner) Build() (signedRequest, error) {
+	req := s.Request
+
+	query := req.URL.Query()
+	headers := req.Header
+
+	s.setRequiredSigningFields(headers, query)
+
+	// Sort Each Query Key's Values
+	for key := range query {
+		sort.Strings(query[key])
+	}
+
+	v4Internal.SanitizeHostForHeader(req)
+
+	credentialScope := s.buildCredentialScope()
+	credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope
+	if s.IsPreSign {
+		query.Set(v4Internal.AmzCredentialKey, credentialStr)
+	}
+
+	unsignedHeaders := headers
+	if s.IsPreSign && !s.DisableHeaderHoisting {
+		var urlValues url.Values
+		urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers)
+		for k := range urlValues {
+			query[k] = urlValues[k]
+		}
+	}
+
+	host := req.URL.Host
+	if len(req.Host) > 0 {
+		host = req.Host
+	}
+
+	signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
+
+	if s.IsPreSign {
+		query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr)
+	}
+
+	var rawQuery strings.Builder
+	rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1))
+
+	canonicalURI := v4Internal.GetURIPath(req.URL)
+	if !s.DisableURIPathEscaping {
+		canonicalURI = httpbinding.EscapePath(canonicalURI, false)
+	}
+
+	canonicalString := s.buildCanonicalString(
+		req.Method,
+		canonicalURI,
+		rawQuery.String(),
+		signedHeadersStr,
+		canonicalHeaderStr,
+	)
+
+	strToSign := s.buildStringToSign(credentialScope, canonicalString)
+	signingSignature, err := s.buildSignature(strToSign)
+	if err != nil {
+		return signedRequest{}, err
+	}
+
+	if s.IsPreSign {
+		rawQuery.WriteString("&X-Amz-Signature=")
+		rawQuery.WriteString(signingSignature)
+	} else {
+		headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
+	}
+
+	req.URL.RawQuery = rawQuery.String()
+
+	return signedRequest{
+		Request:         req,
+		SignedHeaders:   signedHeaders,
+		CanonicalString: canonicalString,
+		StringToSign:    strToSign,
+		PreSigned:       s.IsPreSign,
+	}, nil
+}
+
+func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
+	const credential = "Credential="
+	const signedHeaders = "SignedHeaders="
+	const signature = "Signature="
+	const commaSpace = ", "
+
+	var parts strings.Builder
+	parts.Grow(len(signingAlgorithm) + 1 +
+		len(credential) + len(credentialStr) + 2 +
+		len(signedHeaders) + len(signedHeadersStr) + 2 +
+		len(signature) + len(signingSignature),
+	)
+	parts.WriteString(signingAlgorithm)
+	parts.WriteRune(' ')
+	parts.WriteString(credential)
+	parts.WriteString(credentialStr)
+	parts.WriteString(commaSpace)
+	parts.WriteString(signedHeaders)
+	parts.WriteString(signedHeadersStr)
+	parts.WriteString(commaSpace)
+	parts.WriteString(signature)
+	parts.WriteString(signingSignature)
+	return parts.String()
+}
+
+// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
+// must be provided. Even if the request has no payload (aka body). If the
+// request has no payload you should use the hex encoded SHA-256 of an empty
+// string as the payloadHash value.
+//
+//	"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+//
+// Some services such as Amazon S3 accept alternative values for the payload
+// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
+// included in the request signature.
+//
+// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The passed in request will be modified in place.
+func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error {
+	options := s.options
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	signer := &httpSigner{
+		Request:                r,
+		PayloadHash:            payloadHash,
+		ServiceName:            service,
+		Region:                 region,
+		Credentials:            credentials,
+		Time:                   v4Internal.NewSigningTime(signingTime.UTC()),
+		DisableHeaderHoisting:  options.DisableHeaderHoisting,
+		DisableURIPathEscaping: options.DisableURIPathEscaping,
+		DisableSessionToken:    options.DisableSessionToken,
+		KeyDerivator:           s.keyDerivator,
+	}
+
+	signedRequest, err := signer.Build()
+	if err != nil {
+		return err
+	}
+
+	logSigningInfo(ctx, options, &signedRequest, false)
+
+	return nil
+}
+
+// PresignHTTP signs AWS v4 requests with the payload hash, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns the signed URL and the map of HTTP headers that were included in the
+// signature or an error if signing the request failed. For presigned requests
+// these headers and their values must be included on the HTTP request when it
+// is made. This is helpful to know what header values need to be shared with
+// the party the presigned request will be distributed to.
+//
+// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
+// must be provided. Even if the request has no payload (aka body). If the
+// request has no payload you should use the hex encoded SHA-256 of an empty
+// string as the payloadHash value.
+//
+//	"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+//
+// Some services such as Amazon S3 accept alternative values for the payload
+// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
+// included in the request signature.
+//
+// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+//
+// PresignHTTP differs from SignHTTP in that it will sign the request using
+// query string instead of header values. This allows you to share the
+// Presigned Request's URL with third parties, or distribute it throughout your
+// system with minimal dependencies.
+//
+// PresignHTTP will not set the expires time of the presigned request
+// automatically. To specify the expire duration for a request add the
+// "X-Amz-Expires" query parameter on the request with the value as the
+// duration in seconds the presigned URL should be considered valid for. This
+// parameter is not used by all AWS services, and is most notable used by
+// Amazon S3 APIs.
+//
+//	expires := 20 * time.Minute
+//	query := req.URL.Query()
+//	query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10))
+//	req.URL.RawQuery = query.Encode()
+//
+// This method does not modify the provided request.
+func (s *Signer) PresignHTTP(
+	ctx context.Context, credentials aws.Credentials, r *http.Request,
+	payloadHash string, service string, region string, signingTime time.Time,
+	optFns ...func(*SignerOptions),
+) (signedURI string, signedHeaders http.Header, err error) {
+	options := s.options
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	signer := &httpSigner{
+		Request:                r.Clone(r.Context()),
+		PayloadHash:            payloadHash,
+		ServiceName:            service,
+		Region:                 region,
+		Credentials:            credentials,
+		Time:                   v4Internal.NewSigningTime(signingTime.UTC()),
+		IsPreSign:              true,
+		DisableHeaderHoisting:  options.DisableHeaderHoisting,
+		DisableURIPathEscaping: options.DisableURIPathEscaping,
+		DisableSessionToken:    options.DisableSessionToken,
+		KeyDerivator:           s.keyDerivator,
+	}
+
+	signedRequest, err := signer.Build()
+	if err != nil {
+		return "", nil, err
+	}
+
+	logSigningInfo(ctx, options, &signedRequest, true)
+
+	signedHeaders = make(http.Header)
+
+	// For the signed headers we canonicalize the header keys in the returned map.
+	// This avoids situations where can standard library double headers like host header. For example the standard
+	// library will set the Host header, even if it is present in lower-case form.
+	for k, v := range signedRequest.SignedHeaders {
+		key := textproto.CanonicalMIMEHeaderKey(k)
+		signedHeaders[key] = append(signedHeaders[key], v...)
+	}
+
+	return signedRequest.Request.URL.String(), signedHeaders, nil
+}
+
+func (s *httpSigner) buildCredentialScope() string {
+	return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName)
+}
+
+func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
+	query := url.Values{}
+	unsignedHeaders := http.Header{}
+	for k, h := range header {
+		// literally just this header has this constraint for some stupid reason,
+		// see #2508
+		if k == "X-Amz-Expected-Bucket-Owner" {
+			k = "x-amz-expected-bucket-owner"
+		}
+
+		if r.IsValid(k) {
+			query[k] = h
+		} else {
+			unsignedHeaders[k] = h
+		}
+	}
+
+	return query, unsignedHeaders
+}
+
+func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
+	signed = make(http.Header)
+
+	var headers []string
+	const hostHeader = "host"
+	headers = append(headers, hostHeader)
+	signed[hostHeader] = append(signed[hostHeader], host)
+
+	const contentLengthHeader = "content-length"
+	if length > 0 {
+		headers = append(headers, contentLengthHeader)
+		signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
+	}
+
+	for k, v := range header {
+		if !rule.IsValid(k) {
+			continue // ignored header
+		}
+		if strings.EqualFold(k, contentLengthHeader) {
+			// prevent signing already handled content-length header.
+			continue
+		}
+
+		lowerCaseKey := strings.ToLower(k)
+		if _, ok := signed[lowerCaseKey]; ok {
+			// include additional values
+			signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
+			continue
+		}
+
+		headers = append(headers, lowerCaseKey)
+		signed[lowerCaseKey] = v
+	}
+	sort.Strings(headers)
+
+	signedHeaders = strings.Join(headers, ";")
+
+	var canonicalHeaders strings.Builder
+	n := len(headers)
+	const colon = ':'
+	for i := 0; i < n; i++ {
+		if headers[i] == hostHeader {
+			canonicalHeaders.WriteString(hostHeader)
+			canonicalHeaders.WriteRune(colon)
+			canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
+		} else {
+			canonicalHeaders.WriteString(headers[i])
+			canonicalHeaders.WriteRune(colon)
+			// Trim out leading, trailing, and dedup inner spaces from signed header values.
+			values := signed[headers[i]]
+			for j, v := range values {
+				cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
+				canonicalHeaders.WriteString(cleanedValue)
+				if j < len(values)-1 {
+					canonicalHeaders.WriteRune(',')
+				}
+			}
+		}
+		canonicalHeaders.WriteRune('\n')
+	}
+	canonicalHeadersStr = canonicalHeaders.String()
+
+	return signed, signedHeaders, canonicalHeadersStr
+}
+
+func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
+	return strings.Join([]string{
+		method,
+		uri,
+		query,
+		canonicalHeaders,
+		signedHeaders,
+		s.PayloadHash,
+	}, "\n")
+}
+
+func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
+	return strings.Join([]string{
+		signingAlgorithm,
+		s.Time.TimeFormat(),
+		credentialScope,
+		hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
+	}, "\n")
+}
+
+func makeHash(hash hash.Hash, b []byte) []byte {
+	hash.Reset()
+	hash.Write(b)
+	return hash.Sum(nil)
+}
+
+func (s *httpSigner) buildSignature(strToSign string) (string, error) {
+	key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time)
+	return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil
+}
+
+func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
+	amzDate := s.Time.TimeFormat()
+
+	if s.IsPreSign {
+		query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
+		sessionToken := s.Credentials.SessionToken
+		if !s.DisableSessionToken && len(sessionToken) > 0 {
+			query.Set("X-Amz-Security-Token", sessionToken)
+		}
+
+		query.Set(v4Internal.AmzDateKey, amzDate)
+		return
+	}
+
+	headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
+
+	if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 {
+		headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
+	}
+}
+
+func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) {
+	if !options.LogSigning {
+		return
+	}
+	signedURLMsg := ""
+	if isPresign {
+		signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String())
+	}
+	logger := logging.WithContext(ctx, options.Logger)
+	logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg)
+}
+
+type signedRequest struct {
+	Request         *http.Request
+	SignedHeaders   http.Header
+	CanonicalString string
+	StringToSign    string
+	PreSigned       bool
+}
+
+const logSignInfoMsg = `Request Signature:
+---[ CANONICAL STRING  ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,297 @@
+// Code generated by aws/generate.go DO NOT EDIT.
+
+package aws
+
+import (
+	"github.com/aws/smithy-go/ptr"
+	"time"
+)
+
+// Bool returns a pointer value for the bool value passed in.
+func Bool(v bool) *bool {
+	return ptr.Bool(v)
+}
+
+// BoolSlice returns a slice of bool pointers from the values
+// passed in.
+func BoolSlice(vs []bool) []*bool {
+	return ptr.BoolSlice(vs)
+}
+
+// BoolMap returns a map of bool pointers from the values
+// passed in.
+func BoolMap(vs map[string]bool) map[string]*bool {
+	return ptr.BoolMap(vs)
+}
+
+// Byte returns a pointer value for the byte value passed in.
+func Byte(v byte) *byte {
+	return ptr.Byte(v)
+}
+
+// ByteSlice returns a slice of byte pointers from the values
+// passed in.
+func ByteSlice(vs []byte) []*byte {
+	return ptr.ByteSlice(vs)
+}
+
+// ByteMap returns a map of byte pointers from the values
+// passed in.
+func ByteMap(vs map[string]byte) map[string]*byte {
+	return ptr.ByteMap(vs)
+}
+
+// String returns a pointer value for the string value passed in.
+func String(v string) *string {
+	return ptr.String(v)
+}
+
+// StringSlice returns a slice of string pointers from the values
+// passed in.
+func StringSlice(vs []string) []*string {
+	return ptr.StringSlice(vs)
+}
+
+// StringMap returns a map of string pointers from the values
+// passed in.
+func StringMap(vs map[string]string) map[string]*string {
+	return ptr.StringMap(vs)
+}
+
+// Int returns a pointer value for the int value passed in.
+func Int(v int) *int {
+	return ptr.Int(v)
+}
+
+// IntSlice returns a slice of int pointers from the values
+// passed in.
+func IntSlice(vs []int) []*int {
+	return ptr.IntSlice(vs)
+}
+
+// IntMap returns a map of int pointers from the values
+// passed in.
+func IntMap(vs map[string]int) map[string]*int {
+	return ptr.IntMap(vs)
+}
+
+// Int8 returns a pointer value for the int8 value passed in.
+func Int8(v int8) *int8 {
+	return ptr.Int8(v)
+}
+
+// Int8Slice returns a slice of int8 pointers from the values
+// passed in.
+func Int8Slice(vs []int8) []*int8 {
+	return ptr.Int8Slice(vs)
+}
+
+// Int8Map returns a map of int8 pointers from the values
+// passed in.
+func Int8Map(vs map[string]int8) map[string]*int8 {
+	return ptr.Int8Map(vs)
+}
+
+// Int16 returns a pointer value for the int16 value passed in.
+func Int16(v int16) *int16 {
+	return ptr.Int16(v)
+}
+
+// Int16Slice returns a slice of int16 pointers from the values
+// passed in.
+func Int16Slice(vs []int16) []*int16 {
+	return ptr.Int16Slice(vs)
+}
+
+// Int16Map returns a map of int16 pointers from the values
+// passed in.
+func Int16Map(vs map[string]int16) map[string]*int16 {
+	return ptr.Int16Map(vs)
+}
+
+// Int32 returns a pointer value for the int32 value passed in.
+func Int32(v int32) *int32 {
+	return ptr.Int32(v)
+}
+
+// Int32Slice returns a slice of int32 pointers from the values
+// passed in.
+func Int32Slice(vs []int32) []*int32 {
+	return ptr.Int32Slice(vs)
+}
+
+// Int32Map returns a map of int32 pointers from the values
+// passed in.
+func Int32Map(vs map[string]int32) map[string]*int32 {
+	return ptr.Int32Map(vs)
+}
+
+// Int64 returns a pointer value for the int64 value passed in.
+func Int64(v int64) *int64 {
+	return ptr.Int64(v)
+}
+
+// Int64Slice returns a slice of int64 pointers from the values
+// passed in.
+func Int64Slice(vs []int64) []*int64 {
+	return ptr.Int64Slice(vs)
+}
+
+// Int64Map returns a map of int64 pointers from the values
+// passed in.
+func Int64Map(vs map[string]int64) map[string]*int64 {
+	return ptr.Int64Map(vs)
+}
+
+// Uint returns a pointer value for the uint value passed in.
+func Uint(v uint) *uint {
+	return ptr.Uint(v)
+}
+
+// UintSlice returns a slice of uint pointers from the values
+// passed in.
+func UintSlice(vs []uint) []*uint {
+	return ptr.UintSlice(vs)
+}
+
+// UintMap returns a map of uint pointers from the values
+// passed in.
+func UintMap(vs map[string]uint) map[string]*uint {
+	return ptr.UintMap(vs)
+}
+
+// Uint8 returns a pointer value for the uint8 value passed in.
+func Uint8(v uint8) *uint8 {
+	return ptr.Uint8(v)
+}
+
+// Uint8Slice returns a slice of uint8 pointers from the values
+// passed in.
+func Uint8Slice(vs []uint8) []*uint8 {
+	return ptr.Uint8Slice(vs)
+}
+
+// Uint8Map returns a map of uint8 pointers from the values
+// passed in.
+func Uint8Map(vs map[string]uint8) map[string]*uint8 {
+	return ptr.Uint8Map(vs)
+}
+
+// Uint16 returns a pointer value for the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+	return ptr.Uint16(v)
+}
+
+// Uint16Slice returns a slice of uint16 pointers from the values
+// passed in.
+func Uint16Slice(vs []uint16) []*uint16 {
+	return ptr.Uint16Slice(vs)
+}
+
+// Uint16Map returns a map of uint16 pointers from the values
+// passed in.
+func Uint16Map(vs map[string]uint16) map[string]*uint16 {
+	return ptr.Uint16Map(vs)
+}
+
+// Uint32 returns a pointer value for the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+	return ptr.Uint32(v)
+}
+
+// Uint32Slice returns a slice of uint32 pointers from the values
+// passed in.
+func Uint32Slice(vs []uint32) []*uint32 {
+	return ptr.Uint32Slice(vs)
+}
+
+// Uint32Map returns a map of uint32 pointers from the values
+// passed in.
+func Uint32Map(vs map[string]uint32) map[string]*uint32 {
+	return ptr.Uint32Map(vs)
+}
+
+// Uint64 returns a pointer value for the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+	return ptr.Uint64(v)
+}
+
+// Uint64Slice returns a slice of uint64 pointers from the values
+// passed in.
+func Uint64Slice(vs []uint64) []*uint64 {
+	return ptr.Uint64Slice(vs)
+}
+
+// Uint64Map returns a map of uint64 pointers from the values
+// passed in.
+func Uint64Map(vs map[string]uint64) map[string]*uint64 {
+	return ptr.Uint64Map(vs)
+}
+
+// Float32 returns a pointer value for the float32 value passed in.
+func Float32(v float32) *float32 {
+	return ptr.Float32(v)
+}
+
+// Float32Slice returns a slice of float32 pointers from the values
+// passed in.
+func Float32Slice(vs []float32) []*float32 {
+	return ptr.Float32Slice(vs)
+}
+
+// Float32Map returns a map of float32 pointers from the values
+// passed in.
+func Float32Map(vs map[string]float32) map[string]*float32 {
+	return ptr.Float32Map(vs)
+}
+
+// Float64 returns a pointer value for the float64 value passed in.
+func Float64(v float64) *float64 {
+	return ptr.Float64(v)
+}
+
+// Float64Slice returns a slice of float64 pointers from the values
+// passed in.
+func Float64Slice(vs []float64) []*float64 {
+	return ptr.Float64Slice(vs)
+}
+
+// Float64Map returns a map of float64 pointers from the values
+// passed in.
+func Float64Map(vs map[string]float64) map[string]*float64 {
+	return ptr.Float64Map(vs)
+}
+
+// Time returns a pointer value for the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+	return ptr.Time(v)
+}
+
+// TimeSlice returns a slice of time.Time pointers from the values
+// passed in.
+func TimeSlice(vs []time.Time) []*time.Time {
+	return ptr.TimeSlice(vs)
+}
+
+// TimeMap returns a map of time.Time pointers from the values
+// passed in.
+func TimeMap(vs map[string]time.Time) map[string]*time.Time {
+	return ptr.TimeMap(vs)
+}
+
+// Duration returns a pointer value for the time.Duration value passed in.
+func Duration(v time.Duration) *time.Duration {
+	return ptr.Duration(v)
+}
+
+// DurationSlice returns a slice of time.Duration pointers from the values
+// passed in.
+func DurationSlice(vs []time.Duration) []*time.Duration {
+	return ptr.DurationSlice(vs)
+}
+
+// DurationMap returns a map of time.Duration pointers from the values
+// passed in.
+func DurationMap(vs map[string]time.Duration) map[string]*time.Duration {
+	return ptr.DurationMap(vs)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,310 @@
+package http
+
+import (
+	"crypto/tls"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"net"
+	"net/http"
+	"reflect"
+	"sync"
+	"time"
+)
+
+// Defaults for the HTTPTransportBuilder.
+var (
+	// Default connection pool options
+	DefaultHTTPTransportMaxIdleConns        = 100
+	DefaultHTTPTransportMaxIdleConnsPerHost = 10
+
+	// Default connection timeouts
+	DefaultHTTPTransportIdleConnTimeout       = 90 * time.Second
+	DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second
+	DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second
+
+	// Default to TLS 1.2 for all HTTPS requests.
+	DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12
+)
+
+// Timeouts for net.Dialer's network connection.
+var (
+	DefaultDialConnectTimeout   = 30 * time.Second
+	DefaultDialKeepAliveTimeout = 30 * time.Second
+)
+
+// BuildableClient provides a HTTPClient implementation with options to
+// create copies of the HTTPClient when additional configuration is provided.
+//
+// The client's methods will not share the http.Transport value between copies
+// of the BuildableClient. Only exported member values of the Transport and
+// optional Dialer will be copied between copies of BuildableClient.
+type BuildableClient struct {
+	transport *http.Transport
+	dialer    *net.Dialer
+
+	initOnce sync.Once
+
+	clientTimeout time.Duration
+	client        *http.Client
+}
+
+// NewBuildableClient returns an initialized client for invoking HTTP
+// requests.
+func NewBuildableClient() *BuildableClient {
+	return &BuildableClient{}
+}
+
+// Do implements the HTTPClient interface's Do method to invoke a HTTP request,
+// and receive the response. Uses the BuildableClient's current
+// configuration to invoke the http.Request.
+//
+// If connection pooling is enabled (aka HTTP KeepAlive) the client will only
+// share pooled connections with its own instance. Copies of the
+// BuildableClient will have their own connection pools.
+//
+// Redirect (3xx) responses will not be followed, the HTTP response received
+// will returned instead.
+func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) {
+	b.initOnce.Do(b.build)
+
+	return b.client.Do(req)
+}
+
+// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient.
+// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client.
+func (b *BuildableClient) Freeze() aws.HTTPClient {
+	cpy := b.clone()
+	cpy.build()
+	return cpy.client
+}
+
+func (b *BuildableClient) build() {
+	b.client = wrapWithLimitedRedirect(&http.Client{
+		Timeout:   b.clientTimeout,
+		Transport: b.GetTransport(),
+	})
+}
+
+func (b *BuildableClient) clone() *BuildableClient {
+	cpy := NewBuildableClient()
+	cpy.transport = b.GetTransport()
+	cpy.dialer = b.GetDialer()
+	cpy.clientTimeout = b.clientTimeout
+
+	return cpy
+}
+
+// WithTransportOptions copies the BuildableClient and returns it with the
+// http.Transport options applied.
+//
+// If a non (*http.Transport) was set as the round tripper, the round tripper
+// will be replaced with a default Transport value before invoking the option
+// functions.
+func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient {
+	cpy := b.clone()
+
+	tr := cpy.GetTransport()
+	for _, opt := range opts {
+		opt(tr)
+	}
+	cpy.transport = tr
+
+	return cpy
+}
+
+// WithDialerOptions copies the BuildableClient and returns it with the
+// net.Dialer options applied. Will set the client's http.Transport DialContext
+// member.
+func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient {
+	cpy := b.clone()
+
+	dialer := cpy.GetDialer()
+	for _, opt := range opts {
+		opt(dialer)
+	}
+	cpy.dialer = dialer
+
+	tr := cpy.GetTransport()
+	tr.DialContext = cpy.dialer.DialContext
+	cpy.transport = tr
+
+	return cpy
+}
+
+// WithTimeout Sets the timeout used by the client for all requests.
+func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient {
+	cpy := b.clone()
+	cpy.clientTimeout = timeout
+	return cpy
+}
+
+// GetTransport returns a copy of the client's HTTP Transport.
+func (b *BuildableClient) GetTransport() *http.Transport {
+	var tr *http.Transport
+	if b.transport != nil {
+		tr = b.transport.Clone()
+	} else {
+		tr = defaultHTTPTransport()
+	}
+
+	return tr
+}
+
+// GetDialer returns a copy of the client's network dialer.
+func (b *BuildableClient) GetDialer() *net.Dialer {
+	var dialer *net.Dialer
+	if b.dialer != nil {
+		dialer = shallowCopyStruct(b.dialer).(*net.Dialer)
+	} else {
+		dialer = defaultDialer()
+	}
+
+	return dialer
+}
+
+// GetTimeout returns a copy of the client's timeout to cancel requests with.
+func (b *BuildableClient) GetTimeout() time.Duration {
+	return b.clientTimeout
+}
+
+func defaultDialer() *net.Dialer {
+	return &net.Dialer{
+		Timeout:   DefaultDialConnectTimeout,
+		KeepAlive: DefaultDialKeepAliveTimeout,
+		DualStack: true,
+	}
+}
+
+func defaultHTTPTransport() *http.Transport {
+	dialer := defaultDialer()
+
+	tr := &http.Transport{
+		Proxy:                 http.ProxyFromEnvironment,
+		DialContext:           dialer.DialContext,
+		TLSHandshakeTimeout:   DefaultHTTPTransportTLSHandleshakeTimeout,
+		MaxIdleConns:          DefaultHTTPTransportMaxIdleConns,
+		MaxIdleConnsPerHost:   DefaultHTTPTransportMaxIdleConnsPerHost,
+		IdleConnTimeout:       DefaultHTTPTransportIdleConnTimeout,
+		ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout,
+		ForceAttemptHTTP2:     true,
+		TLSClientConfig: &tls.Config{
+			MinVersion: DefaultHTTPTransportTLSMinVersion,
+		},
+	}
+
+	return tr
+}
+
+// shallowCopyStruct creates a shallow copy of the passed in source struct, and
+// returns that copy of the same struct type.
+func shallowCopyStruct(src interface{}) interface{} {
+	srcVal := reflect.ValueOf(src)
+	srcValType := srcVal.Type()
+
+	var returnAsPtr bool
+	if srcValType.Kind() == reflect.Ptr {
+		srcVal = srcVal.Elem()
+		srcValType = srcValType.Elem()
+		returnAsPtr = true
+	}
+	dstVal := reflect.New(srcValType).Elem()
+
+	for i := 0; i < srcValType.NumField(); i++ {
+		ft := srcValType.Field(i)
+		if len(ft.PkgPath) != 0 {
+			// unexported fields have a PkgPath
+			continue
+		}
+
+		dstVal.Field(i).Set(srcVal.Field(i))
+	}
+
+	if returnAsPtr {
+		dstVal = dstVal.Addr()
+	}
+
+	return dstVal.Interface()
+}
+
+// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to
+// not follow any redirect other than 307 and 308. No other redirect will be
+// followed.
+//
+// If the client does not have a Transport defined will use a new SDK default
+// http.Transport configuration.
+func wrapWithLimitedRedirect(c *http.Client) *http.Client {
+	tr := c.Transport
+	if tr == nil {
+		tr = defaultHTTPTransport()
+	}
+
+	cc := *c
+	cc.CheckRedirect = limitedRedirect
+	cc.Transport = suppressBadHTTPRedirectTransport{
+		tr: tr,
+	}
+
+	return &cc
+}
+
+// limitedRedirect is a CheckRedirect that prevents the client from following
+// any non 307/308 HTTP status code redirects.
+//
+// The 307 and 308 redirects are allowed because the client must use the
+// original HTTP method for the redirected to location. Whereas 301 and 302
+// allow the client to switch to GET for the redirect.
+//
+// Suppresses all redirect requests with a URL of badHTTPRedirectLocation.
+func limitedRedirect(r *http.Request, via []*http.Request) error {
+	// Request.Response, in CheckRedirect is the response that is triggering
+	// the redirect.
+	resp := r.Response
+	if r.URL.String() == badHTTPRedirectLocation {
+		resp.Header.Del(badHTTPRedirectLocation)
+		return http.ErrUseLastResponse
+	}
+
+	switch resp.StatusCode {
+	case 307, 308:
+		// Only allow 307 and 308 redirects as they preserve the method.
+		return nil
+	}
+
+	return http.ErrUseLastResponse
+}
+
+// suppressBadHTTPRedirectTransport provides an http.RoundTripper
+// implementation that wraps another http.RoundTripper to prevent HTTP client
+// receiving 301 and 302 HTTP responses redirects without the required location
+// header.
+//
+// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect,
+// that check for responses with having a URL of baseHTTPRedirectLocation, and
+// suppress the redirect.
+type suppressBadHTTPRedirectTransport struct {
+	tr http.RoundTripper
+}
+
+const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation`
+
+// RoundTrip backfills a stub location when a 301/302 response is received
+// without a location. This stub location is used by limitedRedirect to prevent
+// the HTTP client from failing attempting to use follow a redirect without a
+// location value.
+func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+	resp, err := t.tr.RoundTrip(r)
+	if err != nil {
+		return resp, err
+	}
+
+	// S3 is the only known service to return 301 without location header.
+	// The Go standard library HTTP client will return an opaque error if it
+	// tries to follow a 301/302 response missing the location header.
+	switch resp.StatusCode {
+	case 301, 302:
+		if v := resp.Header.Get("Location"); len(v) == 0 {
+			resp.Header.Set("Location", badHTTPRedirectLocation)
+		}
+	}
+
+	return resp, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,42 @@
+package http
+
+import (
+	"context"
+	"fmt"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// removeContentTypeHeader is a build middleware that removes
+// content type header if content-length header is unset or
+// is set to zero,
+type removeContentTypeHeader struct {
+}
+
+// ID the name of the middleware.
+func (m *removeContentTypeHeader) ID() string {
+	return "RemoveContentTypeHeader"
+}
+
+// HandleBuild adds or appends the constructed user agent to the request.
+func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in)
+	}
+
+	// remove contentTypeHeader when content-length is zero
+	if req.ContentLength == 0 {
+		req.Header.Del("content-type")
+	}
+
+	return next.HandleBuild(ctx, in)
+}
+
+// RemoveContentTypeHeader removes content-type header if
+// content length is unset or equal to zero.
+func RemoveContentTypeHeader(stack *middleware.Stack) error {
+	return stack.Build.Add(&removeContentTypeHeader{}, middleware.After)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,33 @@
+package http
+
+import (
+	"errors"
+	"fmt"
+
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ResponseError provides the HTTP centric error type wrapping the underlying error
+// with the HTTP response value and the deserialized RequestID.
+type ResponseError struct {
+	*smithyhttp.ResponseError
+
+	// RequestID associated with response error
+	RequestID string
+}
+
+// ServiceRequestID returns the request id associated with Response Error
+func (e *ResponseError) ServiceRequestID() string { return e.RequestID }
+
+// Error returns the formatted error
+func (e *ResponseError) Error() string {
+	return fmt.Sprintf(
+		"https response error StatusCode: %d, RequestID: %s, %v",
+		e.Response.StatusCode, e.RequestID, e.Err)
+}
+
+// As populates target and returns true if the type of target is a error type that
+// the ResponseError embeds, (e.g.AWS HTTP ResponseError)
+func (e *ResponseError) As(target interface{}) bool {
+	return errors.As(e.ResponseError, target)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,56 @@
+package http
+
+import (
+	"context"
+
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddResponseErrorMiddleware adds response error wrapper middleware
+func AddResponseErrorMiddleware(stack *middleware.Stack) error {
+	// add error wrapper middleware before request id retriever middleware so that it can wrap the error response
+	// returned by operation deserializers
+	return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+}
+
+// ResponseErrorWrapper wraps operation errors with ResponseError.
+type ResponseErrorWrapper struct {
+}
+
+// ID returns the middleware identifier
+func (m *ResponseErrorWrapper) ID() string {
+	return "ResponseErrorWrapper"
+}
+
+// HandleDeserialize wraps the stack error with smithyhttp.ResponseError.
+func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err == nil {
+		// Nothing to do when there is no error.
+		return out, metadata, err
+	}
+
+	resp, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		// No raw response to wrap with.
+		return out, metadata, err
+	}
+
+	// look for request id in metadata
+	reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata)
+
+	// Wrap the returned smithy error with the request id retrieved from the metadata
+	err = &ResponseError{
+		ResponseError: &smithyhttp.ResponseError{
+			Response: resp,
+			Err:      err,
+		},
+		RequestID: reqID,
+	}
+
+	return out, metadata, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,104 @@
+package http
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type readResult struct {
+	n   int
+	err error
+}
+
+// ResponseTimeoutError is an error when the reads from the response are
+// delayed longer than the timeout the read was configured for.
+type ResponseTimeoutError struct {
+	TimeoutDur time.Duration
+}
+
+// Timeout returns that the error is was caused by a timeout, and can be
+// retried.
+func (*ResponseTimeoutError) Timeout() bool { return true }
+
+func (e *ResponseTimeoutError) Error() string {
+	return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur)
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+	reader   io.ReadCloser
+	duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+	timer := time.NewTimer(r.duration)
+	c := make(chan readResult, 1)
+
+	go func() {
+		n, err := r.reader.Read(b)
+		timer.Stop()
+		c <- readResult{n: n, err: err}
+	}()
+
+	select {
+	case data := <-c:
+		return data.n, data.err
+	case <-timer.C:
+		return 0, &ResponseTimeoutError{TimeoutDur: r.duration}
+	}
+}
+
+func (r *timeoutReadCloser) Close() error {
+	return r.reader.Close()
+}
+
+// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the
+// response body so that a read that takes too long will return an error.
+func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error {
+	return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After)
+}
+
+// readTimeout wraps the response body with a timeoutReadCloser
+type readTimeout struct {
+	duration time.Duration
+}
+
+// ID returns the id of the middleware
+func (*readTimeout) ID() string {
+	return "ReadResponseTimeout"
+}
+
+// HandleDeserialize implements the DeserializeMiddleware interface
+func (m *readTimeout) HandleDeserialize(
+	ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	response.Body = &timeoutReadCloser{
+		reader:   response.Body,
+		duration: m.duration,
+	}
+	out.RawResponse = response
+
+	return out, metadata, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,42 @@
+package aws
+
+import (
+	"fmt"
+)
+
+// Ternary is an enum allowing an unknown or none state in addition to a bool's
+// true and false.
+type Ternary int
+
+func (t Ternary) String() string {
+	switch t {
+	case UnknownTernary:
+		return "unknown"
+	case FalseTernary:
+		return "false"
+	case TrueTernary:
+		return "true"
+	default:
+		return fmt.Sprintf("unknown value, %d", int(t))
+	}
+}
+
+// Bool returns true if the value is TrueTernary, false otherwise.
+func (t Ternary) Bool() bool {
+	return t == TrueTernary
+}
+
+// Enumerations for the values of the Ternary type.
+const (
+	UnknownTernary Ternary = iota
+	FalseTernary
+	TrueTernary
+)
+
+// BoolTernary returns a true or false Ternary value for the bool provided.
+func BoolTernary(v bool) Ternary {
+	if v {
+		return TrueTernary
+	}
+	return FalseTernary
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go-v2"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = goModuleVersion
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,678 @@
+# v1.27.27 (2024-07-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.26 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.25 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.24 (2024-07-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.23 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.22 (2024-06-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.21 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.20 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.19 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.18 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.17 (2024-06-03)
+
+* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.16 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.15 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.14 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.13 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.12 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.11 (2024-04-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.10 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.9 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.8 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.7 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.6 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.5 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.4 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.3 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2024-01-22)
+
+* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.5 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.4 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2023-12-08)
+
+* **Bug Fix**: Correct loading of [services *] sections into shared config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2023-12-07)
+
+* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.12 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.11 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.10 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.9 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.8 (2023-11-28.3)
+
+* **Bug Fix**: Correct resolution of S3Express auth disable toggle.
+
+# v1.25.7 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.6 (2023-11-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.5 (2023-11-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.3 (2023-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.2 (2023-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.1 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2023-11-14)
+
+* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2023-11-13)
+
+* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2023-11-09.2)
+
+* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.3 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.2 (2023-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2023-11-06)
+
+* No change notes available for this release.
+
+# v1.22.0 (2023-11-02)
+
+* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2023-10-24)
+
+* No change notes available for this release.
+
+# v1.19.0 (2023-10-16)
+
+* **Feature**: Modify logic of retrieving user agent appID from env config
+
+# v1.18.45 (2023-10-12)
+
+* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.44 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.43 (2023-10-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.42 (2023-09-22)
+
+* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
+* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.41 (2023-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.40 (2023-09-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.39 (2023-09-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.38 (2023-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.37 (2023-08-23)
+
+* No change notes available for this release.
+
+# v1.18.36 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.35 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.34 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.33 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.32 (2023-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.29 (2023-07-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.28 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.27 (2023-06-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.26 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.25 (2023-05-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.24 (2023-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.23 (2023-05-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.22 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.21 (2023-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.20 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.19 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.18 (2023-03-16)
+
+* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015.
+
+# v1.18.17 (2023-03-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.16 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.15 (2023-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.14 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.13 (2023-02-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.12 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.11 (2023-02-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.10 (2023-01-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.9 (2023-01-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.8 (2023-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2022-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2022-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2022-12-15)
+
+* **Bug Fix**: Unify logic between shared config and in finding home directory
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.4 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2022-11-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.2 (2022-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2022-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2022-11-11)
+
+* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
+* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.11 (2022-11-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.10 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.9 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2022-09-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2022-08-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2022-08-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2022-08-14)
+
+* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present.
+
+# v1.16.1 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2022-08-10)
+
+* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`.
+
+# v1.15.17 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.16 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.15 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.14 (2022-07-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.13 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.12 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.11 (2022-06-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.10 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.9 (2022-05-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.8 (2022-05-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.7 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.6 (2022-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.5 (2022-05-09)
+
+* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682)
+
+# v1.15.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2022-02-24)
+
+* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options.
+* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589)
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2022-01-28)
+
+* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR.
+* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug.
+* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563)
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-01-07)
+
+* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2021-12-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2021-12-02)
+
+* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.3 (2021-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.2 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.1 (2021-11-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2021-10-21)
+
+* **Feature**: Updated  to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.3 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.2 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.1 (2021-09-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-09-02)
+
+* **Feature**: Add support for S3 Multi-Region Access Point ARNs.
+
+# v1.7.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-08-04)
+
+* **Feature**: adds error handling for defered close calls
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-07-15)
+
+* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints.
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-07-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-06-25)
+
+* **Feature**: Adds configuration setting for enabling endpoint discovery.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-05-20)
+
+* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile.
+* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/config.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/config.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/config.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,222 @@
+package config
+
+import (
+	"context"
+	"os"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// defaultAWSConfigResolvers are a slice of functions that will resolve external
+// configuration values into AWS configuration values.
+//
+// This will setup the AWS configuration's Region,
+var defaultAWSConfigResolvers = []awsConfigResolver{
+	// Resolves the default configuration the SDK's aws.Config will be
+	// initialized with.
+	resolveDefaultAWSConfig,
+
+	// Sets the logger to be used. Could be user provided logger, and client
+	// logging mode.
+	resolveLogger,
+	resolveClientLogMode,
+
+	// Sets the HTTP client and configuration to use for making requests using
+	// the HTTP transport.
+	resolveHTTPClient,
+	resolveCustomCABundle,
+
+	// Sets the endpoint resolving behavior the API Clients will use for making
+	// requests to. Clients default to their own clients this allows overrides
+	// to be specified. The resolveEndpointResolver option is deprecated, but
+	// we still need to set it for backwards compatibility on config
+	// construction.
+	resolveEndpointResolver,
+	resolveEndpointResolverWithOptions,
+
+	// Sets the retry behavior API clients will use within their retry attempt
+	// middleware. Defaults to unset, allowing API clients to define their own
+	// retry behavior.
+	resolveRetryer,
+
+	// Sets the region the API Clients should use for making requests to.
+	resolveRegion,
+	resolveEC2IMDSRegion,
+	resolveDefaultRegion,
+
+	// Sets the additional set of middleware stack mutators that will custom
+	// API client request pipeline middleware.
+	resolveAPIOptions,
+
+	// Resolves the DefaultsMode that should be used by SDK clients. If this
+	// mode is set to DefaultsModeAuto.
+	//
+	// Comes after HTTPClient and CustomCABundle to ensure the HTTP client is
+	// configured if provided before invoking IMDS if mode is auto. Comes
+	// before resolving credentials so that those subsequent clients use the
+	// configured auto mode.
+	resolveDefaultsModeOptions,
+
+	// Sets the resolved credentials the API clients will use for
+	// authentication. Provides the SDK's default credential chain.
+	//
+	// Should probably be the last step in the resolve chain to ensure that all
+	// other configurations are resolved first in case downstream credentials
+	// implementations depend on or can be configured with earlier resolved
+	// configuration options.
+	resolveCredentials,
+
+	// Sets the resolved bearer authentication token API clients will use for
+	// httpBearerAuth authentication scheme.
+	resolveBearerAuthToken,
+
+	// Sets the sdk app ID if present in env var or shared config profile
+	resolveAppID,
+
+	resolveBaseEndpoint,
+
+	// Sets the DisableRequestCompression if present in env var or shared config profile
+	resolveDisableRequestCompression,
+
+	// Sets the RequestMinCompressSizeBytes if present in env var or shared config profile
+	resolveRequestMinCompressSizeBytes,
+
+	// Sets the AccountIDEndpointMode if present in env var or shared config profile
+	resolveAccountIDEndpointMode,
+}
+
+// A Config represents a generic configuration value or set of values. This type
+// will be used by the AWSConfigResolvers to extract
+//
+// General the Config type will use type assertion against the Provider interfaces
+// to extract specific data from the Config.
+type Config interface{}
+
+// A loader is used to load external configuration data and returns it as
+// a generic Config type.
+//
+// The loader should return an error if it fails to load the external configuration
+// or the configuration data is malformed, or required components missing.
+type loader func(context.Context, configs) (Config, error)
+
+// An awsConfigResolver will extract configuration data from the configs slice
+// using the provider interfaces to extract specific functionality. The extracted
+// configuration values will be written to the AWS Config value.
+//
+// The resolver should return an error if it it fails to extract the data, the
+// data is malformed, or incomplete.
+type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error
+
+// configs is a slice of Config values. These values will be used by the
+// AWSConfigResolvers to extract external configuration values to populate the
+// AWS Config type.
+//
+// Use AppendFromLoaders to add additional external Config values that are
+// loaded from external sources.
+//
+// Use ResolveAWSConfig after external Config values have been added or loaded
+// to extract the loaded configuration values into the AWS Config.
+type configs []Config
+
+// AppendFromLoaders iterates over the slice of loaders passed in calling each
+// loader function in order. The external config value returned by the loader
+// will be added to the returned configs slice.
+//
+// If a loader returns an error this method will stop iterating and return
+// that error.
+func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) {
+	for _, fn := range loaders {
+		cfg, err := fn(ctx, cs)
+		if err != nil {
+			return nil, err
+		}
+
+		cs = append(cs, cfg)
+	}
+
+	return cs, nil
+}
+
+// ResolveAWSConfig returns a AWS configuration populated with values by calling
+// the resolvers slice passed in. Each resolver is called in order. Any resolver
+// may overwrite the AWS Configuration value of a previous resolver.
+//
+// If an resolver returns an error this method will return that error, and stop
+// iterating over the resolvers.
+func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) {
+	var cfg aws.Config
+
+	for _, fn := range resolvers {
+		if err := fn(ctx, &cfg, cs); err != nil {
+			return aws.Config{}, err
+		}
+	}
+
+	return cfg, nil
+}
+
+// ResolveConfig calls the provide function passing slice of configuration sources.
+// This implements the aws.ConfigResolver interface.
+func (cs configs) ResolveConfig(f func(configs []interface{}) error) error {
+	var cfgs []interface{}
+	for i := range cs {
+		cfgs = append(cfgs, cs[i])
+	}
+	return f(cfgs)
+}
+
+// LoadDefaultConfig reads the SDK's default external configurations, and
+// populates an AWS Config with the values from the external configurations.
+//
+// An optional variadic set of additional Config values can be provided as input
+// that will be prepended to the configs slice. Use this to add custom configuration.
+// The custom configurations must satisfy the respective providers for their data
+// or the custom data will be ignored by the resolvers and config loaders.
+//
+//	cfg, err := config.LoadDefaultConfig( context.TODO(),
+//	   config.WithSharedConfigProfile("test-profile"),
+//	)
+//	if err != nil {
+//	   panic(fmt.Sprintf("failed loading config, %v", err))
+//	}
+//
+// The default configuration sources are:
+// * Environment Variables
+// * Shared Configuration and Shared Credentials files.
+func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) {
+	var options LoadOptions
+	for _, optFn := range optFns {
+		if err := optFn(&options); err != nil {
+			return aws.Config{}, err
+		}
+	}
+
+	// assign Load Options to configs
+	var cfgCpy = configs{options}
+
+	cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options))
+	if err != nil {
+		return aws.Config{}, err
+	}
+
+	cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers)
+	if err != nil {
+		return aws.Config{}, err
+	}
+
+	return cfg, nil
+}
+
+func resolveConfigLoaders(options *LoadOptions) []loader {
+	loaders := make([]loader, 2)
+	loaders[0] = loadEnvConfig
+
+	// specification of a profile should cause a load failure if it doesn't exist
+	if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" {
+		loaders[1] = loadSharedConfig
+	} else {
+		loaders[1] = loadSharedConfigIgnoreNotExist
+	}
+
+	return loaders
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,47 @@
+package config
+
+import (
+	"context"
+	"os"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+)
+
+const execEnvVar = "AWS_EXECUTION_ENV"
+
+// DefaultsModeOptions is the set of options that are used to configure
+type DefaultsModeOptions struct {
+	// The SDK configuration defaults mode. Defaults to legacy if not specified.
+	//
+	// Supported modes are: auto, cross-region, in-region, legacy, mobile, standard
+	Mode aws.DefaultsMode
+
+	// The EC2 Instance Metadata Client that should be used when performing environment
+	// discovery when aws.DefaultsModeAuto is set.
+	//
+	// If not specified the SDK will construct a client if the instance metadata service has not been disabled by
+	// the AWS_EC2_METADATA_DISABLED environment variable.
+	IMDSClient *imds.Client
+}
+
+func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) {
+	getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{})
+	// honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection.
+	select {
+	case <-ctx.Done():
+		return aws.RuntimeEnvironment{}, err
+	default:
+	}
+
+	var imdsRegion string
+	if err == nil {
+		imdsRegion = getRegionOutput.Region
+	}
+
+	return aws.RuntimeEnvironment{
+		EnvironmentIdentifier:     aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)),
+		Region:                    envConfig.Region,
+		EC2InstanceMetadataRegion: imdsRegion,
+	}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+// Package config provides utilities for loading configuration from multiple
+// sources that can be used to configure the SDK's API clients, and utilities.
+//
+// The config package will load configuration from environment variables, AWS
+// shared configuration file (~/.aws/config), and AWS shared credentials file
+// (~/.aws/credentials).
+//
+// Use the LoadDefaultConfig to load configuration from all the SDK's supported
+// sources, and resolve credentials using the SDK's default credential chain.
+//
+// LoadDefaultConfig allows for a variadic list of additional Config sources that can
+// provide one or more configuration values which can be used to programmatically control the resolution
+// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK.
+// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will
+// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources
+// implement the same provider interface, priority will be handled by the order in which the sources were passed in.
+//
+// A number of helpers (prefixed by “With“)  are provided in this package that implement their respective provider
+// interface. These helpers should be used for overriding configuration programmatically at runtime.
+package config
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,856 @@
+package config
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+	smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
+)
+
+// CredentialsSourceName provides a name of the provider when config is
+// loaded from environment.
+const CredentialsSourceName = "EnvConfigCredentials"
+
+// Environment variables that will be read for configuration values.
+const (
+	awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
+	awsAccessKeyEnvVar   = "AWS_ACCESS_KEY"
+
+	awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
+	awsSecretKeyEnvVar       = "AWS_SECRET_KEY"
+
+	awsSessionTokenEnvVar = "AWS_SESSION_TOKEN"
+
+	awsContainerCredentialsEndpointEnvVar     = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+	awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+	awsContainerPProviderAuthorizationEnvVar  = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+
+	awsRegionEnvVar        = "AWS_REGION"
+	awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
+
+	awsProfileEnvVar        = "AWS_PROFILE"
+	awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE"
+
+	awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE"
+
+	awsConfigFileEnvVar = "AWS_CONFIG_FILE"
+
+	awsCustomCABundleEnvVar = "AWS_CA_BUNDLE"
+
+	awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE"
+
+	awsRoleARNEnvVar         = "AWS_ROLE_ARN"
+	awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME"
+
+	awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY"
+
+	awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION"
+
+	awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"
+
+	awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT"
+
+	awsEc2MetadataDisabled         = "AWS_EC2_METADATA_DISABLED"
+	awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED"
+
+	awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS"
+
+	awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT"
+
+	awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT"
+
+	awsDefaultMode = "AWS_DEFAULTS_MODE"
+
+	awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS"
+	awsRetryMode        = "AWS_RETRY_MODE"
+	awsSdkAppID         = "AWS_SDK_UA_APP_ID"
+
+	awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS"
+	awsEndpointURL               = "AWS_ENDPOINT_URL"
+
+	awsDisableRequestCompression      = "AWS_DISABLE_REQUEST_COMPRESSION"
+	awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES"
+
+	awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH"
+
+	awsAccountIDEnv             = "AWS_ACCOUNT_ID"
+	awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE"
+)
+
+var (
+	credAccessEnvKeys = []string{
+		awsAccessKeyIDEnvVar,
+		awsAccessKeyEnvVar,
+	}
+	credSecretEnvKeys = []string{
+		awsSecretAccessKeyEnvVar,
+		awsSecretKeyEnvVar,
+	}
+	regionEnvKeys = []string{
+		awsRegionEnvVar,
+		awsDefaultRegionEnvVar,
+	}
+	profileEnvKeys = []string{
+		awsProfileEnvVar,
+		awsDefaultProfileEnvVar,
+	}
+)
+
+// EnvConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type EnvConfig struct {
+	// Environment configuration values. If set both Access Key ID and Secret Access
+	// Key must be provided. Session Token and optionally also be provided, but is
+	// not required.
+	//
+	//	# Access Key ID
+	//	AWS_ACCESS_KEY_ID=AKID
+	//	AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+	//
+	//	# Secret Access Key
+	//	AWS_SECRET_ACCESS_KEY=SECRET
+	//	AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+	//
+	//	# Session Token
+	//	AWS_SESSION_TOKEN=TOKEN
+	Credentials aws.Credentials
+
+	// ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials
+	// using the endpointcreds.Provider
+	ContainerCredentialsEndpoint string
+
+	// ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve
+	// credentials from the container endpoint.
+	ContainerCredentialsRelativePath string
+
+	// ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization
+	// header when attempting to retrieve credentials from the container credentials endpoint.
+	ContainerAuthorizationToken string
+
+	// Region value will instruct the SDK where to make service API requests to. If is
+	// not provided in the environment the region must be provided before a service
+	// client request is made.
+	//
+	//	AWS_REGION=us-west-2
+	//	AWS_DEFAULT_REGION=us-west-2
+	Region string
+
+	// Profile name the SDK should load use when loading shared configuration from the
+	// shared configuration files. If not provided "default" will be used as the
+	// profile name.
+	//
+	//	AWS_PROFILE=my_profile
+	//	AWS_DEFAULT_PROFILE=my_profile
+	SharedConfigProfile string
+
+	// Shared credentials file path can be set to instruct the SDK to use an alternate
+	// file for the shared credentials. If not set the file will be loaded from
+	// $HOME/.aws/credentials on Linux/Unix based systems, and
+	// %USERPROFILE%\.aws\credentials on Windows.
+	//
+	//	AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+	SharedCredentialsFile string
+
+	// Shared config file path can be set to instruct the SDK to use an alternate
+	// file for the shared config. If not set the file will be loaded from
+	// $HOME/.aws/config on Linux/Unix based systems, and
+	// %USERPROFILE%\.aws\config on Windows.
+	//
+	//	AWS_CONFIG_FILE=$HOME/my_shared_config
+	SharedConfigFile string
+
+	// Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+	// that the SDK will use instead of the system's root CA bundle.
+	// Only use this if you want to configure the SDK to use a custom set
+	// of CAs.
+	//
+	// Enabling this option will attempt to merge the Transport
+	// into the SDK's HTTP client. If the client's Transport is
+	// not a http.Transport an error will be returned. If the
+	// Transport's TLS config is set this option will cause the
+	// SDK to overwrite the Transport's TLS config's  RootCAs value.
+	//
+	// Setting a custom HTTPClient in the aws.Config options will override this setting.
+	// To use this option and custom HTTP client, the HTTP client needs to be provided
+	// when creating the config. Not the service client.
+	//
+	//  AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+	CustomCABundle string
+
+	// Enables endpoint discovery via environment variables.
+	//
+	//	AWS_ENABLE_ENDPOINT_DISCOVERY=true
+	EnableEndpointDiscovery aws.EndpointDiscoveryEnableState
+
+	// Specifies the WebIdentity token the SDK should use to assume a role
+	// with.
+	//
+	//  AWS_WEB_IDENTITY_TOKEN_FILE=file_path
+	WebIdentityTokenFilePath string
+
+	// Specifies the IAM role arn to use when assuming an role.
+	//
+	//  AWS_ROLE_ARN=role_arn
+	RoleARN string
+
+	// Specifies the IAM role session name to use when assuming a role.
+	//
+	//  AWS_ROLE_SESSION_NAME=session_name
+	RoleSessionName string
+
+	// Specifies if the S3 service should allow ARNs to direct the region
+	// the client's requests are sent to.
+	//
+	// AWS_S3_USE_ARN_REGION=true
+	S3UseARNRegion *bool
+
+	// Specifies if the EC2 IMDS service client is enabled.
+	//
+	// AWS_EC2_METADATA_DISABLED=true
+	EC2IMDSClientEnableState imds.ClientEnableState
+
+	// Specifies if EC2 IMDSv1 fallback is disabled.
+	//
+	// AWS_EC2_METADATA_V1_DISABLED=true
+	EC2IMDSv1Disabled *bool
+
+	// Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
+	//
+	// AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
+	EC2IMDSEndpointMode imds.EndpointModeState
+
+	// Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode.
+	//
+	// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254
+	EC2IMDSEndpoint string
+
+	// Specifies if the S3 service should disable multi-region access points
+	// support.
+	//
+	// AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true
+	S3DisableMultiRegionAccessPoints *bool
+
+	// Specifies that SDK clients must resolve a dual-stack endpoint for
+	// services.
+	//
+	// AWS_USE_DUALSTACK_ENDPOINT=true
+	UseDualStackEndpoint aws.DualStackEndpointState
+
+	// Specifies that SDK clients must resolve a FIPS endpoint for
+	// services.
+	//
+	// AWS_USE_FIPS_ENDPOINT=true
+	UseFIPSEndpoint aws.FIPSEndpointState
+
+	// Specifies the SDK Defaults Mode used by services.
+	//
+	// AWS_DEFAULTS_MODE=standard
+	DefaultsMode aws.DefaultsMode
+
+	// Specifies the maximum number attempts an API client will call an
+	// operation that fails with a retryable error.
+	//
+	// AWS_MAX_ATTEMPTS=3
+	RetryMaxAttempts int
+
+	// Specifies the retry model the API client will be created with.
+	//
+	// aws_retry_mode=standard
+	RetryMode aws.RetryMode
+
+	// aws sdk app ID that can be added to user agent header string
+	AppID string
+
+	// Flag used to disable configured endpoints.
+	IgnoreConfiguredEndpoints *bool
+
+	// Value to contain configured endpoints to be propagated to
+	// corresponding endpoint resolution field.
+	BaseEndpoint string
+
+	// determine if request compression is allowed, default to false
+	// retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION
+	DisableRequestCompression *bool
+
+	// inclusive threshold request body size to trigger compression,
+	// default to 10240 and must be within 0 and 10485760 bytes inclusive
+	// retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES
+	RequestMinCompressSizeBytes *int64
+
+	// Whether S3Express auth is disabled.
+	//
+	// This will NOT prevent requests from being made to S3Express buckets, it
+	// will only bypass the modified endpoint routing and signing behaviors
+	// associated with the feature.
+	S3DisableExpressAuth *bool
+
+	// Indicates whether account ID will be required/ignored in endpoint2.0 routing
+	AccountIDEndpointMode aws.AccountIDEndpointMode
+}
+
+// loadEnvConfig reads configuration values from the OS's environment variables.
+// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type.
+func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) {
+	return NewEnvConfig()
+}
+
+// NewEnvConfig retrieves the SDK's environment configuration.
+// See `EnvConfig` for the values that will be retrieved.
+func NewEnvConfig() (EnvConfig, error) {
+	var cfg EnvConfig
+
+	creds := aws.Credentials{
+		Source: CredentialsSourceName,
+	}
+	setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys)
+	setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys)
+	if creds.HasKeys() {
+		creds.AccountID = os.Getenv(awsAccountIDEnv)
+		creds.SessionToken = os.Getenv(awsSessionTokenEnvVar)
+		cfg.Credentials = creds
+	}
+
+	cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar)
+	cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar)
+	cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar)
+
+	setStringFromEnvVal(&cfg.Region, regionEnvKeys)
+	setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys)
+
+	cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar)
+	cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar)
+
+	cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar)
+
+	cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar)
+
+	cfg.RoleARN = os.Getenv(awsRoleARNEnvVar)
+	cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar)
+
+	cfg.AppID = os.Getenv(awsSdkAppID)
+
+	if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil {
+		return cfg, err
+	}
+	if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil {
+		return cfg, err
+	}
+
+	if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil {
+		return cfg, err
+	}
+
+	if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil {
+		return cfg, err
+	}
+
+	setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled})
+	if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil {
+		return cfg, err
+	}
+	cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar)
+	if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil {
+		return cfg, err
+	}
+
+	if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil {
+		return cfg, err
+	}
+
+	if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil {
+		return cfg, err
+	}
+
+	if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil {
+		return cfg, err
+	}
+
+	if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil {
+		return cfg, err
+	}
+
+	if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil {
+		return cfg, err
+	}
+	if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil {
+		return cfg, err
+	}
+
+	setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL})
+
+	if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil {
+		return cfg, err
+	}
+
+	if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil {
+		return cfg, err
+	}
+
+	if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil {
+		return cfg, err
+	}
+
+	return cfg, nil
+}
+
+func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) {
+	if len(c.DefaultsMode) == 0 {
+		return "", false, nil
+	}
+	return c.DefaultsMode, true, nil
+}
+
+func (c EnvConfig) getAppID(context.Context) (string, bool, error) {
+	return c.AppID, len(c.AppID) > 0, nil
+}
+
+func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) {
+	if c.DisableRequestCompression == nil {
+		return false, false, nil
+	}
+	return *c.DisableRequestCompression, true, nil
+}
+
+func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) {
+	if c.RequestMinCompressSizeBytes == nil {
+		return 0, false, nil
+	}
+	return *c.RequestMinCompressSizeBytes, true, nil
+}
+
+func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) {
+	return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil
+}
+
+// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified,
+// and not 0.
+func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
+	if c.RetryMaxAttempts == 0 {
+		return 0, false, nil
+	}
+	return c.RetryMaxAttempts, true, nil
+}
+
+// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a
+// valid value.
+func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
+	if len(c.RetryMode) == 0 {
+		return "", false, nil
+	}
+	return c.RetryMode, true, nil
+}
+
+func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) {
+	for _, k := range keys {
+		value := os.Getenv(k)
+		if len(value) == 0 {
+			continue
+		}
+		switch {
+		case strings.EqualFold(value, "true"):
+			*state = imds.ClientDisabled
+		case strings.EqualFold(value, "false"):
+			*state = imds.ClientEnabled
+		default:
+			continue
+		}
+		break
+	}
+}
+
+func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error {
+	for _, k := range keys {
+		if value := os.Getenv(k); len(value) > 0 {
+			if ok := mode.SetFromString(value); !ok {
+				return fmt.Errorf("invalid %s value: %s", k, value)
+			}
+			break
+		}
+	}
+	return nil
+}
+
+func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) {
+	for _, k := range keys {
+		if value := os.Getenv(k); len(value) > 0 {
+			*mode, err = aws.ParseRetryMode(value)
+			if err != nil {
+				return fmt.Errorf("invalid %s value, %w", k, err)
+			}
+			break
+		}
+	}
+	return nil
+}
+
+func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error {
+	for _, k := range keys {
+		value := os.Getenv(k)
+		if len(value) == 0 {
+			continue
+		}
+		if err := mode.SetFromString(value); err != nil {
+			return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err)
+		}
+	}
+	return nil
+}
+
+func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error {
+	for _, k := range keys {
+		value := os.Getenv(k)
+		if len(value) == 0 {
+			continue
+		}
+
+		switch value {
+		case "preferred":
+			*m = aws.AccountIDEndpointModePreferred
+		case "required":
+			*m = aws.AccountIDEndpointModeRequired
+		case "disabled":
+			*m = aws.AccountIDEndpointModeDisabled
+		default:
+			return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value)
+		}
+		break
+	}
+	return nil
+}
+
+// GetRegion returns the AWS Region if set in the environment. Returns an empty
+// string if not set.
+func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) {
+	if len(c.Region) == 0 {
+		return "", false, nil
+	}
+	return c.Region, true, nil
+}
+
+// GetSharedConfigProfile returns the shared config profile if set in the
+// environment. Returns an empty string if not set.
+func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) {
+	if len(c.SharedConfigProfile) == 0 {
+		return "", false, nil
+	}
+
+	return c.SharedConfigProfile, true, nil
+}
+
+// getSharedConfigFiles returns a slice of filenames set in the environment.
+//
+// Will return the filenames in the order of:
+// * Shared Config
+func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) {
+	var files []string
+	if v := c.SharedConfigFile; len(v) > 0 {
+		files = append(files, v)
+	}
+
+	if len(files) == 0 {
+		return nil, false, nil
+	}
+	return files, true, nil
+}
+
+// getSharedCredentialsFiles returns a slice of filenames set in the environment.
+//
+// Will return the filenames in the order of:
+// * Shared Credentials
+func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) {
+	var files []string
+	if v := c.SharedCredentialsFile; len(v) > 0 {
+		files = append(files, v)
+	}
+	if len(files) == 0 {
+		return nil, false, nil
+	}
+	return files, true, nil
+}
+
+// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
+func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
+	if len(c.CustomCABundle) == 0 {
+		return nil, false, nil
+	}
+
+	b, err := ioutil.ReadFile(c.CustomCABundle)
+	if err != nil {
+		return nil, false, err
+	}
+	return bytes.NewReader(b), true, nil
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) {
+	if c.IgnoreConfiguredEndpoints == nil {
+		return false, false, nil
+	}
+
+	return *c.IgnoreConfiguredEndpoints, true, nil
+}
+
+func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) {
+	return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil
+}
+
+// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use
+// with configured endpoints.
+func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) {
+	if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" {
+		return endpt, true, nil
+	}
+	return "", false, nil
+}
+
+func normalizeEnv(sdkID string) string {
+	upper := strings.ToUpper(sdkID)
+	return strings.ReplaceAll(upper, " ", "_")
+}
+
+// GetS3UseARNRegion returns whether to allow ARNs to direct the region
+// the S3 client's requests are sent to.
+func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) {
+	if c.S3UseARNRegion == nil {
+		return false, false, nil
+	}
+
+	return *c.S3UseARNRegion, true, nil
+}
+
+// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point
+// support for the S3 client.
+func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) {
+	if c.S3DisableMultiRegionAccessPoints == nil {
+		return false, false, nil
+	}
+
+	return *c.S3DisableMultiRegionAccessPoints, true, nil
+}
+
+// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be
+// used for requests.
+func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) {
+	if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset {
+		return aws.DualStackEndpointStateUnset, false, nil
+	}
+
+	return c.UseDualStackEndpoint, true, nil
+}
+
+// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be
+// used for requests.
+func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) {
+	if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset {
+		return aws.FIPSEndpointStateUnset, false, nil
+	}
+
+	return c.UseFIPSEndpoint, true, nil
+}
+
+func setStringFromEnvVal(dst *string, keys []string) {
+	for _, k := range keys {
+		if v := os.Getenv(k); len(v) > 0 {
+			*dst = v
+			break
+		}
+	}
+}
+
+func setIntFromEnvVal(dst *int, keys []string) error {
+	for _, k := range keys {
+		if v := os.Getenv(k); len(v) > 0 {
+			i, err := strconv.ParseInt(v, 10, 64)
+			if err != nil {
+				return fmt.Errorf("invalid value %s=%s, %w", k, v, err)
+			}
+			*dst = int(i)
+			break
+		}
+	}
+
+	return nil
+}
+
+func setBoolPtrFromEnvVal(dst **bool, keys []string) error {
+	for _, k := range keys {
+		value := os.Getenv(k)
+		if len(value) == 0 {
+			continue
+		}
+
+		if *dst == nil {
+			*dst = new(bool)
+		}
+
+		switch {
+		case strings.EqualFold(value, "false"):
+			**dst = false
+		case strings.EqualFold(value, "true"):
+			**dst = true
+		default:
+			return fmt.Errorf(
+				"invalid value for environment variable, %s=%s, need true or false",
+				k, value)
+		}
+		break
+	}
+
+	return nil
+}
+
+func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error {
+	for _, k := range keys {
+		value := os.Getenv(k)
+		if len(value) == 0 {
+			continue
+		}
+
+		v, err := strconv.ParseInt(value, 10, 64)
+		if err != nil {
+			return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value)
+		} else if v < 0 || v > max {
+			return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v)
+		}
+		if *dst == nil {
+			*dst = new(int64)
+		}
+
+		**dst = v
+		break
+	}
+
+	return nil
+}
+
+func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error {
+	for _, k := range keys {
+		value := os.Getenv(k)
+		if len(value) == 0 {
+			continue // skip if empty
+		}
+
+		switch {
+		case strings.EqualFold(value, endpointDiscoveryDisabled):
+			*dst = aws.EndpointDiscoveryDisabled
+		case strings.EqualFold(value, endpointDiscoveryEnabled):
+			*dst = aws.EndpointDiscoveryEnabled
+		case strings.EqualFold(value, endpointDiscoveryAuto):
+			*dst = aws.EndpointDiscoveryAuto
+		default:
+			return fmt.Errorf(
+				"invalid value for environment variable, %s=%s, need true, false or auto",
+				k, value)
+		}
+	}
+	return nil
+}
+
+func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error {
+	for _, k := range keys {
+		value := os.Getenv(k)
+		if len(value) == 0 {
+			continue // skip if empty
+		}
+
+		switch {
+		case strings.EqualFold(value, "true"):
+			*dst = aws.DualStackEndpointStateEnabled
+		case strings.EqualFold(value, "false"):
+			*dst = aws.DualStackEndpointStateDisabled
+		default:
+			return fmt.Errorf(
+				"invalid value for environment variable, %s=%s, need true, false",
+				k, value)
+		}
+	}
+	return nil
+}
+
+func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error {
+	for _, k := range keys {
+		value := os.Getenv(k)
+		if len(value) == 0 {
+			continue // skip if empty
+		}
+
+		switch {
+		case strings.EqualFold(value, "true"):
+			*dst = aws.FIPSEndpointStateEnabled
+		case strings.EqualFold(value, "false"):
+			*dst = aws.FIPSEndpointStateDisabled
+		default:
+			return fmt.Errorf(
+				"invalid value for environment variable, %s=%s, need true, false",
+				k, value)
+		}
+	}
+	return nil
+}
+
+// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting.
+func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) {
+	if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset {
+		return aws.EndpointDiscoveryUnset, false, nil
+	}
+
+	return c.EnableEndpointDiscovery, true, nil
+}
+
+// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface.
+func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) {
+	if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState {
+		return imds.ClientDefaultEnableState, false, nil
+	}
+
+	return c.EC2IMDSClientEnableState, true, nil
+}
+
+// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface.
+func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) {
+	if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset {
+		return imds.EndpointModeStateUnset, false, nil
+	}
+
+	return c.EC2IMDSEndpointMode, true, nil
+}
+
+// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface.
+func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) {
+	if len(c.EC2IMDSEndpoint) == 0 {
+		return "", false, nil
+	}
+
+	return c.EC2IMDSEndpoint, true, nil
+}
+
+// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option
+// resolver interface.
+func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) {
+	if c.EC2IMDSv1Disabled == nil {
+		return false, false
+	}
+
+	return *c.EC2IMDSv1Disabled, true
+}
+
+// GetS3DisableExpressAuth returns the configured value for
+// [EnvConfig.S3DisableExpressAuth].
+func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) {
+	if c.S3DisableExpressAuth == nil {
+		return false, false
+	}
+
+	return *c.S3DisableExpressAuth, true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,4 @@
+package config
+
+//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go
+//go:generate gofmt -s -w ./
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package config
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.27.27"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,1141 @@
+package config
+
+import (
+	"context"
+	"io"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+	"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+	smithybearer "github.com/aws/smithy-go/auth/bearer"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// LoadOptionsFunc is a type alias for LoadOptions functional option
+type LoadOptionsFunc func(*LoadOptions) error
+
+// LoadOptions are discrete set of options that are valid for loading the
+// configuration
+type LoadOptions struct {
+
+	// Region is the region to send requests to.
+	Region string
+
+	// Credentials object to use when signing requests.
+	Credentials aws.CredentialsProvider
+
+	// Token provider for authentication operations with bearer authentication.
+	BearerAuthTokenProvider smithybearer.TokenProvider
+
+	// HTTPClient the SDK's API clients will use to invoke HTTP requests.
+	HTTPClient HTTPClient
+
+	// EndpointResolver that can be used to provide or override an endpoint for
+	// the given service and region.
+	//
+	// See the `aws.EndpointResolver` documentation on usage.
+	//
+	// Deprecated: See EndpointResolverWithOptions
+	EndpointResolver aws.EndpointResolver
+
+	// EndpointResolverWithOptions that can be used to provide or override an
+	// endpoint for the given service and region.
+	//
+	// See the `aws.EndpointResolverWithOptions` documentation on usage.
+	EndpointResolverWithOptions aws.EndpointResolverWithOptions
+
+	// RetryMaxAttempts specifies the maximum number attempts an API client
+	// will call an operation that fails with a retryable error.
+	//
+	// This value will only be used if Retryer option is nil.
+	RetryMaxAttempts int
+
+	// RetryMode specifies the retry model the API client will be created with.
+	//
+	// This value will only be used if Retryer option is nil.
+	RetryMode aws.RetryMode
+
+	// Retryer is a function that provides a Retryer implementation. A Retryer
+	// guides how HTTP requests should be retried in case of recoverable
+	// failures.
+	//
+	// If not nil, RetryMaxAttempts, and RetryMode will be ignored.
+	Retryer func() aws.Retryer
+
+	// APIOptions provides the set of middleware mutations modify how the API
+	// client requests will be handled. This is useful for adding additional
+	// tracing data to a request, or changing behavior of the SDK's client.
+	APIOptions []func(*middleware.Stack) error
+
+	// Logger writer interface to write logging messages to.
+	Logger logging.Logger
+
+	// ClientLogMode is used to configure the events that will be sent to the
+	// configured logger. This can be used to configure the logging of signing,
+	// retries, request, and responses of the SDK clients.
+	//
+	// See the ClientLogMode type documentation for the complete set of logging
+	// modes and available configuration.
+	ClientLogMode *aws.ClientLogMode
+
+	// SharedConfigProfile is the profile to be used when loading the SharedConfig
+	SharedConfigProfile string
+
+	// SharedConfigFiles is the slice of custom shared config files to use when
+	// loading the SharedConfig. A non-default profile used within config file
+	// must have name defined with prefix 'profile '. eg [profile xyz]
+	// indicates a profile with name 'xyz'. To read more on the format of the
+	// config file, please refer the documentation at
+	// https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config
+	//
+	// If duplicate profiles are provided within the same, or across multiple
+	// shared config files, the next parsed profile will override only the
+	// properties that conflict with the previously defined profile. Note that
+	// if duplicate profiles are provided within the SharedCredentialsFiles and
+	// SharedConfigFiles, the properties defined in shared credentials file
+	// take precedence.
+	SharedConfigFiles []string
+
+	// SharedCredentialsFile is the slice of custom shared credentials files to
+	// use when loading the SharedConfig. The profile name used within
+	// credentials file must not prefix 'profile '. eg [xyz] indicates a
+	// profile with name 'xyz'. Profile declared as [profile xyz] will be
+	// ignored. To read more on the format of the credentials file, please
+	// refer the documentation at
+	// https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds
+	//
+	// If duplicate profiles are provided with a same, or across multiple
+	// shared credentials files, the next parsed profile will override only
+	// properties that conflict with the previously defined profile. Note that
+	// if duplicate profiles are provided within the SharedCredentialsFiles and
+	// SharedConfigFiles, the properties defined in shared credentials file
+	// take precedence.
+	SharedCredentialsFiles []string
+
+	// CustomCABundle is CA bundle PEM bytes reader
+	CustomCABundle io.Reader
+
+	// DefaultRegion is the fall back region, used if a region was not resolved
+	// from other sources
+	DefaultRegion string
+
+	// UseEC2IMDSRegion indicates if SDK should retrieve the region
+	// from the EC2 Metadata service
+	UseEC2IMDSRegion *UseEC2IMDSRegion
+
+	// CredentialsCacheOptions is a function for setting the
+	// aws.CredentialsCacheOptions
+	CredentialsCacheOptions func(*aws.CredentialsCacheOptions)
+
+	// BearerAuthTokenCacheOptions is a function for setting the smithy-go
+	// auth/bearer#TokenCacheOptions
+	BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions)
+
+	// SSOTokenProviderOptions is a function for setting the
+	// credentials/ssocreds.SSOTokenProviderOptions
+	SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions)
+
+	// ProcessCredentialOptions is a function for setting
+	// the processcreds.Options
+	ProcessCredentialOptions func(*processcreds.Options)
+
+	// EC2RoleCredentialOptions is a function for setting
+	// the ec2rolecreds.Options
+	EC2RoleCredentialOptions func(*ec2rolecreds.Options)
+
+	// EndpointCredentialOptions is a function for setting
+	// the endpointcreds.Options
+	EndpointCredentialOptions func(*endpointcreds.Options)
+
+	// WebIdentityRoleCredentialOptions is a function for setting
+	// the stscreds.WebIdentityRoleOptions
+	WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions)
+
+	// AssumeRoleCredentialOptions is a function for setting the
+	// stscreds.AssumeRoleOptions
+	AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions)
+
+	// SSOProviderOptions is a function for setting
+	// the ssocreds.Options
+	SSOProviderOptions func(options *ssocreds.Options)
+
+	// LogConfigurationWarnings when set to true, enables logging
+	// configuration warnings
+	LogConfigurationWarnings *bool
+
+	// S3UseARNRegion specifies if the S3 service should allow ARNs to direct
+	// the region, the client's requests are sent to.
+	S3UseARNRegion *bool
+
+	// S3DisableMultiRegionAccessPoints specifies if the S3 service should disable
+	// the S3 Multi-Region access points feature.
+	S3DisableMultiRegionAccessPoints *bool
+
+	// EnableEndpointDiscovery specifies if endpoint discovery is enable for
+	// the client.
+	EnableEndpointDiscovery aws.EndpointDiscoveryEnableState
+
+	// Specifies if the EC2 IMDS service client is enabled.
+	//
+	// AWS_EC2_METADATA_DISABLED=true
+	EC2IMDSClientEnableState imds.ClientEnableState
+
+	// Specifies the EC2 Instance Metadata Service default endpoint selection
+	// mode (IPv4 or IPv6)
+	EC2IMDSEndpointMode imds.EndpointModeState
+
+	// Specifies the EC2 Instance Metadata Service endpoint to use. If
+	// specified it overrides EC2IMDSEndpointMode.
+	EC2IMDSEndpoint string
+
+	// Specifies that SDK clients must resolve a dual-stack endpoint for
+	// services.
+	UseDualStackEndpoint aws.DualStackEndpointState
+
+	// Specifies that SDK clients must resolve a FIPS endpoint for
+	// services.
+	UseFIPSEndpoint aws.FIPSEndpointState
+
+	// Specifies the SDK configuration mode for defaults.
+	DefaultsModeOptions DefaultsModeOptions
+
+	// The sdk app ID retrieved from env var or shared config to be added to request user agent header
+	AppID string
+
+	// Specifies whether an operation request could be compressed
+	DisableRequestCompression *bool
+
+	// The inclusive min bytes of a request body that could be compressed
+	RequestMinCompressSizeBytes *int64
+
+	// Whether S3 Express auth is disabled.
+	S3DisableExpressAuth *bool
+
+	AccountIDEndpointMode aws.AccountIDEndpointMode
+}
+
+func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) {
+	if len(o.DefaultsModeOptions.Mode) == 0 {
+		return "", false, nil
+	}
+	return o.DefaultsModeOptions.Mode, true, nil
+}
+
+// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the
+// LoadOptions and not 0.
+func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
+	if o.RetryMaxAttempts == 0 {
+		return 0, false, nil
+	}
+	return o.RetryMaxAttempts, true, nil
+}
+
+// GetRetryMode returns the RetryMode specified in the LoadOptions.
+func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
+	if len(o.RetryMode) == 0 {
+		return "", false, nil
+	}
+	return o.RetryMode, true, nil
+}
+
+func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) {
+	if o.DefaultsModeOptions.IMDSClient == nil {
+		return nil, false, nil
+	}
+	return o.DefaultsModeOptions.IMDSClient, true, nil
+}
+
+// getRegion returns Region from config's LoadOptions
+func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) {
+	if len(o.Region) == 0 {
+		return "", false, nil
+	}
+
+	return o.Region, true, nil
+}
+
+// getAppID returns AppID from config's LoadOptions
+func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) {
+	return o.AppID, len(o.AppID) > 0, nil
+}
+
+// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions
+func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
+	if o.DisableRequestCompression == nil {
+		return false, false, nil
+	}
+	return *o.DisableRequestCompression, true, nil
+}
+
+// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions
+func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
+	if o.RequestMinCompressSizeBytes == nil {
+		return 0, false, nil
+	}
+	return *o.RequestMinCompressSizeBytes, true, nil
+}
+
+func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) {
+	return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil
+}
+
+// WithRegion is a helper function to construct functional options
+// that sets Region on config's LoadOptions. Setting the region to
+// an empty string, will result in the region value being ignored.
+// If multiple WithRegion calls are made, the last call overrides
+// the previous call values.
+func WithRegion(v string) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.Region = v
+		return nil
+	}
+}
+
+// WithAppID is a helper function to construct functional options
+// that sets AppID on config's LoadOptions.
+func WithAppID(ID string) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.AppID = ID
+		return nil
+	}
+}
+
+// WithDisableRequestCompression is a helper function to construct functional options
+// that sets DisableRequestCompression on config's LoadOptions.
+func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		if DisableRequestCompression == nil {
+			return nil
+		}
+		o.DisableRequestCompression = DisableRequestCompression
+		return nil
+	}
+}
+
+// WithRequestMinCompressSizeBytes is a helper function to construct functional options
+// that sets RequestMinCompressSizeBytes on config's LoadOptions.
+func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		if RequestMinCompressSizeBytes == nil {
+			return nil
+		}
+		o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes
+		return nil
+	}
+}
+
+// WithAccountIDEndpointMode is a helper function to construct functional options
+// that sets AccountIDEndpointMode on config's LoadOptions
+func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		if m != "" {
+			o.AccountIDEndpointMode = m
+		}
+		return nil
+	}
+}
+
+// getDefaultRegion returns DefaultRegion from config's LoadOptions
+func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) {
+	if len(o.DefaultRegion) == 0 {
+		return "", false, nil
+	}
+
+	return o.DefaultRegion, true, nil
+}
+
+// WithDefaultRegion is a helper function to construct functional options
+// that sets a DefaultRegion on config's LoadOptions. Setting the default
+// region to an empty string, will result in the default region value
+// being ignored. If multiple WithDefaultRegion calls are made, the last
+// call overrides the previous call values. Note that both WithRegion and
+// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call
+// when resolving region.
+func WithDefaultRegion(v string) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.DefaultRegion = v
+		return nil
+	}
+}
+
+// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions
+func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) {
+	if len(o.SharedConfigProfile) == 0 {
+		return "", false, nil
+	}
+
+	return o.SharedConfigProfile, true, nil
+}
+
+// WithSharedConfigProfile is a helper function to construct functional options
+// that sets SharedConfigProfile on config's LoadOptions. Setting the shared
+// config profile to an empty string, will result in the shared config profile
+// value being ignored.
+// If multiple WithSharedConfigProfile calls are made, the last call overrides
+// the previous call values.
+func WithSharedConfigProfile(v string) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.SharedConfigProfile = v
+		return nil
+	}
+}
+
+// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions
+func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) {
+	if o.SharedConfigFiles == nil {
+		return nil, false, nil
+	}
+
+	return o.SharedConfigFiles, true, nil
+}
+
+// WithSharedConfigFiles is a helper function to construct functional options
+// that sets slice of SharedConfigFiles on config's LoadOptions.
+// Setting the shared config files to an nil string slice, will result in the
+// shared config files value being ignored.
+// If multiple WithSharedConfigFiles calls are made, the last call overrides
+// the previous call values.
+func WithSharedConfigFiles(v []string) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.SharedConfigFiles = v
+		return nil
+	}
+}
+
+// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions
+func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) {
+	if o.SharedCredentialsFiles == nil {
+		return nil, false, nil
+	}
+
+	return o.SharedCredentialsFiles, true, nil
+}
+
+// WithSharedCredentialsFiles is a helper function to construct functional options
+// that sets slice of SharedCredentialsFiles on config's LoadOptions.
+// Setting the shared credentials files to an nil string slice, will result in the
+// shared credentials files value being ignored.
+// If multiple WithSharedCredentialsFiles calls are made, the last call overrides
+// the previous call values.
+func WithSharedCredentialsFiles(v []string) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.SharedCredentialsFiles = v
+		return nil
+	}
+}
+
+// getCustomCABundle returns CustomCABundle from LoadOptions
+func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) {
+	if o.CustomCABundle == nil {
+		return nil, false, nil
+	}
+
+	return o.CustomCABundle, true, nil
+}
+
+// WithCustomCABundle is a helper function to construct functional options
+// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle
+// to nil will result in custom CA Bundle value being ignored.
+// If multiple WithCustomCABundle calls are made, the last call overrides the
+// previous call values.
+func WithCustomCABundle(v io.Reader) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.CustomCABundle = v
+		return nil
+	}
+}
+
+// UseEC2IMDSRegion provides a regionProvider that retrieves the region
+// from the EC2 Metadata service.
+type UseEC2IMDSRegion struct {
+	// If unset will default to generic EC2 IMDS client.
+	Client *imds.Client
+}
+
+// getRegion attempts to retrieve the region from EC2 Metadata service.
+func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	client := p.Client
+	if client == nil {
+		client = imds.New(imds.Options{})
+	}
+
+	result, err := client.GetRegion(ctx, nil)
+	if err != nil {
+		return "", false, err
+	}
+	if len(result.Region) != 0 {
+		return result.Region, true, nil
+	}
+	return "", false, nil
+}
+
+// getEC2IMDSRegion returns the value of EC2 IMDS region.
+func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) {
+	if o.UseEC2IMDSRegion == nil {
+		return "", false, nil
+	}
+
+	return o.UseEC2IMDSRegion.getRegion(ctx)
+}
+
+// WithEC2IMDSRegion is a helper function to construct functional options
+// that enables resolving EC2IMDS region. The function takes
+// in a UseEC2IMDSRegion functional option, and can be used to set the
+// EC2IMDS client which will be used to resolve EC2IMDSRegion.
+// If no functional option is provided, an EC2IMDS client is built and used
+// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last
+// call overrides the previous call values. Note that the WithRegion calls takes
+// precedence over WithEC2IMDSRegion when resolving region.
+func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.UseEC2IMDSRegion = &UseEC2IMDSRegion{}
+
+		for _, fn := range fnOpts {
+			fn(o.UseEC2IMDSRegion)
+		}
+		return nil
+	}
+}
+
+// getCredentialsProvider returns the credentials value
+func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) {
+	if o.Credentials == nil {
+		return nil, false, nil
+	}
+
+	return o.Credentials, true, nil
+}
+
+// WithCredentialsProvider is a helper function to construct functional options
+// that sets Credential provider value on config's LoadOptions. If credentials
+// provider is set to nil, the credentials provider value will be ignored.
+// If multiple WithCredentialsProvider calls are made, the last call overrides
+// the previous call values.
+func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.Credentials = v
+		return nil
+	}
+}
+
+// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions
+func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) {
+	if o.CredentialsCacheOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.CredentialsCacheOptions, true, nil
+}
+
+// WithCredentialsCacheOptions is a helper function to construct functional
+// options that sets a function to modify the aws.CredentialsCacheOptions the
+// aws.CredentialsCache will be configured with, if the CredentialsCache is used
+// by the configuration loader.
+//
+// If multiple WithCredentialsCacheOptions calls are made, the last call
+// overrides the previous call values.
+func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.CredentialsCacheOptions = v
+		return nil
+	}
+}
+
+// getBearerAuthTokenProvider returns the credentials value
+func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) {
+	if o.BearerAuthTokenProvider == nil {
+		return nil, false, nil
+	}
+
+	return o.BearerAuthTokenProvider, true, nil
+}
+
+// WithBearerAuthTokenProvider is a helper function to construct functional options
+// that sets Credential provider value on config's LoadOptions. If credentials
+// provider is set to nil, the credentials provider value will be ignored.
+// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides
+// the previous call values.
+func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.BearerAuthTokenProvider = v
+		return nil
+	}
+}
+
+// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions
+func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) {
+	if o.BearerAuthTokenCacheOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.BearerAuthTokenCacheOptions, true, nil
+}
+
+// WithBearerAuthTokenCacheOptions is a helper function to construct functional options
+// that sets a function to modify the TokenCacheOptions the smithy-go
+// auth/bearer#TokenCache will be configured with, if the TokenCache is used by
+// the configuration loader.
+//
+// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides
+// the previous call values.
+func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.BearerAuthTokenCacheOptions = v
+		return nil
+	}
+}
+
+// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions
+func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) {
+	if o.SSOTokenProviderOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.SSOTokenProviderOptions, true, nil
+}
+
+// WithSSOTokenProviderOptions is a helper function to construct functional
+// options that sets a function to modify the SSOtokenProviderOptions the SDK's
+// credentials/ssocreds#SSOProvider will be configured with, if the
+// SSOTokenProvider is used by the configuration loader.
+//
+// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides
+// the previous call values.
+func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.SSOTokenProviderOptions = v
+		return nil
+	}
+}
+
+// getProcessCredentialOptions returns the wrapped function to set processcreds.Options
+func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) {
+	if o.ProcessCredentialOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.ProcessCredentialOptions, true, nil
+}
+
+// WithProcessCredentialOptions is a helper function to construct functional options
+// that sets a function to use processcreds.Options on config's LoadOptions.
+// If process credential options is set to nil, the process credential value will
+// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call
+// overrides the previous call values.
+func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.ProcessCredentialOptions = v
+		return nil
+	}
+}
+
+// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options
+func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) {
+	if o.EC2RoleCredentialOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.EC2RoleCredentialOptions, true, nil
+}
+
+// WithEC2RoleCredentialOptions is a helper function to construct functional options
+// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If
+// EC2 role credential options is set to nil, the EC2 role credential options value
+// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made,
+// the last call overrides the previous call values.
+func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.EC2RoleCredentialOptions = v
+		return nil
+	}
+}
+
+// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options
+func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) {
+	if o.EndpointCredentialOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.EndpointCredentialOptions, true, nil
+}
+
+// WithEndpointCredentialOptions is a helper function to construct functional options
+// that sets a function to use endpointcreds.Options on config's LoadOptions. If
+// endpoint credential options is set to nil, the endpoint credential options
+// value will be ignored. If multiple WithEndpointCredentialOptions calls are made,
+// the last call overrides the previous call values.
+func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.EndpointCredentialOptions = v
+		return nil
+	}
+}
+
+// getWebIdentityRoleCredentialOptions returns the wrapped function
+func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) {
+	if o.WebIdentityRoleCredentialOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.WebIdentityRoleCredentialOptions, true, nil
+}
+
+// WithWebIdentityRoleCredentialOptions is a helper function to construct
+// functional options that sets a function to use stscreds.WebIdentityRoleOptions
+// on config's LoadOptions. If web identity role credentials options is set to nil,
+// the web identity role credentials value will be ignored. If multiple
+// WithWebIdentityRoleCredentialOptions calls are made, the last call
+// overrides the previous call values.
+func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.WebIdentityRoleCredentialOptions = v
+		return nil
+	}
+}
+
+// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions
+func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) {
+	if o.AssumeRoleCredentialOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.AssumeRoleCredentialOptions, true, nil
+}
+
+// WithAssumeRoleCredentialOptions  is a helper function to construct
+// functional options that sets a function to use stscreds.AssumeRoleOptions
+// on config's LoadOptions. If assume role credentials options is set to nil,
+// the assume role credentials value will be ignored. If multiple
+// WithAssumeRoleCredentialOptions calls are made, the last call overrides
+// the previous call values.
+func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.AssumeRoleCredentialOptions = v
+		return nil
+	}
+}
+
+func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) {
+	if o.HTTPClient == nil {
+		return nil, false, nil
+	}
+
+	return o.HTTPClient, true, nil
+}
+
+// WithHTTPClient is a helper function to construct functional options
+// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil,
+// the HTTPClient value will be ignored.
+// If multiple WithHTTPClient calls are made, the last call overrides
+// the previous call values.
+func WithHTTPClient(v HTTPClient) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.HTTPClient = v
+		return nil
+	}
+}
+
+func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) {
+	if o.APIOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.APIOptions, true, nil
+}
+
+// WithAPIOptions is a helper function to construct functional options
+// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the
+// APIOptions value is ignored. If multiple WithAPIOptions calls are
+// made, the last call overrides the previous call values.
+func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		if v == nil {
+			return nil
+		}
+
+		o.APIOptions = append(o.APIOptions, v...)
+		return nil
+	}
+}
+
+func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) {
+	if o.RetryMaxAttempts == 0 {
+		return 0, false, nil
+	}
+
+	return o.RetryMaxAttempts, true, nil
+}
+
+// WithRetryMaxAttempts is a helper function to construct functional options that sets
+// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is
+// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides
+// the previous call values.
+//
+// Will be ignored of LoadOptions.Retryer or WithRetryer are used.
+func WithRetryMaxAttempts(v int) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.RetryMaxAttempts = v
+		return nil
+	}
+}
+
+func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
+	if o.RetryMode == "" {
+		return "", false, nil
+	}
+
+	return o.RetryMode, true, nil
+}
+
+// WithRetryMode is a helper function to construct functional options that sets
+// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is
+// ignored. If multiple WithRetryMode calls are made, the last call overrides
+// the previous call values.
+//
+// Will be ignored of LoadOptions.Retryer or WithRetryer are used.
+func WithRetryMode(v aws.RetryMode) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.RetryMode = v
+		return nil
+	}
+}
+
+func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) {
+	if o.Retryer == nil {
+		return nil, false, nil
+	}
+
+	return o.Retryer, true, nil
+}
+
+// WithRetryer is a helper function to construct functional options
+// that sets Retryer on LoadOptions. If Retryer is set to nil, the
+// Retryer value is ignored. If multiple WithRetryer calls are
+// made, the last call overrides the previous call values.
+func WithRetryer(v func() aws.Retryer) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.Retryer = v
+		return nil
+	}
+}
+
+func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) {
+	if o.EndpointResolver == nil {
+		return nil, false, nil
+	}
+
+	return o.EndpointResolver, true, nil
+}
+
+// WithEndpointResolver is a helper function to construct functional options
+// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil,
+// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
+// are made, the last call overrides the previous call values.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. The API
+// for endpoint resolution is now unique to each service and is set via the
+// EndpointResolverV2 field on service client options. Use of
+// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you
+// from using any endpoint-related service features released after the
+// introduction of EndpointResolverV2. You may also encounter broken or
+// unexpected behavior when using the old global interface with services that
+// use many endpoint-related customizations such as S3.
+func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.EndpointResolver = v
+		return nil
+	}
+}
+
+func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) {
+	if o.EndpointResolverWithOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.EndpointResolverWithOptions, true, nil
+}
+
+// WithEndpointResolverWithOptions is a helper function to construct functional options
+// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil,
+// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
+// are made, the last call overrides the previous call values.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [WithEndpointResolver].
+func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.EndpointResolverWithOptions = v
+		return nil
+	}
+}
+
+func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) {
+	if o.Logger == nil {
+		return nil, false, nil
+	}
+
+	return o.Logger, true, nil
+}
+
+// WithLogger is a helper function to construct functional options
+// that sets Logger on LoadOptions. If Logger is set to nil, the
+// Logger value will be ignored. If multiple WithLogger calls are made,
+// the last call overrides the previous call values.
+func WithLogger(v logging.Logger) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.Logger = v
+		return nil
+	}
+}
+
+func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) {
+	if o.ClientLogMode == nil {
+		return 0, false, nil
+	}
+
+	return *o.ClientLogMode, true, nil
+}
+
+// WithClientLogMode is a helper function to construct functional options
+// that sets client log mode on LoadOptions. If client log mode is set to nil,
+// the client log mode value will be ignored. If multiple WithClientLogMode calls are made,
+// the last call overrides the previous call values.
+func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.ClientLogMode = &v
+		return nil
+	}
+}
+
+func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) {
+	if o.LogConfigurationWarnings == nil {
+		return false, false, nil
+	}
+	return *o.LogConfigurationWarnings, true, nil
+}
+
+// WithLogConfigurationWarnings is a helper function to construct
+// functional options that can be used to set LogConfigurationWarnings
+// on LoadOptions.
+//
+// If multiple WithLogConfigurationWarnings calls are made, the last call
+// overrides the previous call values.
+func WithLogConfigurationWarnings(v bool) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.LogConfigurationWarnings = &v
+		return nil
+	}
+}
+
+// GetS3UseARNRegion returns whether to allow ARNs to direct the region
+// the S3 client's requests are sent to.
+func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) {
+	if o.S3UseARNRegion == nil {
+		return false, false, nil
+	}
+	return *o.S3UseARNRegion, true, nil
+}
+
+// WithS3UseARNRegion is a helper function to construct functional options
+// that can be used to set S3UseARNRegion on LoadOptions.
+// If multiple WithS3UseARNRegion calls are made, the last call overrides
+// the previous call values.
+func WithS3UseARNRegion(v bool) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.S3UseARNRegion = &v
+		return nil
+	}
+}
+
+// GetS3DisableMultiRegionAccessPoints returns whether to disable
+// the S3 multi-region access points feature.
+func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) {
+	if o.S3DisableMultiRegionAccessPoints == nil {
+		return false, false, nil
+	}
+	return *o.S3DisableMultiRegionAccessPoints, true, nil
+}
+
+// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options
+// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions.
+// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides
+// the previous call values.
+func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.S3DisableMultiRegionAccessPoints = &v
+		return nil
+	}
+}
+
+// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set.
+func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) {
+	if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset {
+		return aws.EndpointDiscoveryUnset, false, nil
+	}
+	return o.EnableEndpointDiscovery, true, nil
+}
+
+// WithEndpointDiscovery is a helper function to construct functional options
+// that can be used to enable endpoint discovery on LoadOptions for supported clients.
+// If multiple WithEndpointDiscovery calls are made, the last call overrides
+// the previous call values.
+func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.EnableEndpointDiscovery = v
+		return nil
+	}
+}
+
+// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions
+func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) {
+	if o.SSOProviderOptions == nil {
+		return nil, false, nil
+	}
+
+	return o.SSOProviderOptions, true, nil
+}
+
+// WithSSOProviderOptions is a helper function to construct
+// functional options that sets a function to use ssocreds.Options
+// on config's LoadOptions. If the SSO credential provider options is set to nil,
+// the sso provider options value will be ignored. If multiple
+// WithSSOProviderOptions calls are made, the last call overrides
+// the previous call values.
+func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.SSOProviderOptions = v
+		return nil
+	}
+}
+
+// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface.
+func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) {
+	if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState {
+		return imds.ClientDefaultEnableState, false, nil
+	}
+
+	return o.EC2IMDSClientEnableState, true, nil
+}
+
+// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface.
+func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) {
+	if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset {
+		return imds.EndpointModeStateUnset, false, nil
+	}
+
+	return o.EC2IMDSEndpointMode, true, nil
+}
+
+// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface.
+func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) {
+	if len(o.EC2IMDSEndpoint) == 0 {
+		return "", false, nil
+	}
+
+	return o.EC2IMDSEndpoint, true, nil
+}
+
+// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState.
+func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.EC2IMDSClientEnableState = v
+		return nil
+	}
+}
+
+// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode.
+func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.EC2IMDSEndpointMode = v
+		return nil
+	}
+}
+
+// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint.
+func WithEC2IMDSEndpoint(v string) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.EC2IMDSEndpoint = v
+		return nil
+	}
+}
+
+// WithUseDualStackEndpoint is a helper function to construct
+// functional options that can be used to set UseDualStackEndpoint on LoadOptions.
+func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.UseDualStackEndpoint = v
+		return nil
+	}
+}
+
+// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be
+// used for requests.
+func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) {
+	if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset {
+		return aws.DualStackEndpointStateUnset, false, nil
+	}
+	return o.UseDualStackEndpoint, true, nil
+}
+
+// WithUseFIPSEndpoint is a helper function to construct
+// functional options that can be used to set UseFIPSEndpoint on LoadOptions.
+func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.UseFIPSEndpoint = v
+		return nil
+	}
+}
+
+// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be
+// used for requests.
+func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) {
+	if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset {
+		return aws.FIPSEndpointStateUnset, false, nil
+	}
+	return o.UseFIPSEndpoint, true, nil
+}
+
+// WithDefaultsMode sets the SDK defaults configuration mode to the value provided.
+//
+// Zero or more functional options can be provided to provide configuration options for performing
+// environment discovery when using aws.DefaultsModeAuto.
+func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc {
+	do := DefaultsModeOptions{
+		Mode: mode,
+	}
+	for _, fn := range optFns {
+		fn(&do)
+	}
+	return func(options *LoadOptions) error {
+		options.DefaultsModeOptions = do
+		return nil
+	}
+}
+
+// GetS3DisableExpressAuth returns the configured value for
+// [EnvConfig.S3DisableExpressAuth].
+func (o LoadOptions) GetS3DisableExpressAuth() (value, ok bool) {
+	if o.S3DisableExpressAuth == nil {
+		return false, false
+	}
+
+	return *o.S3DisableExpressAuth, true
+}
+
+// WithS3DisableExpressAuth sets [LoadOptions.S3DisableExpressAuth]
+// to the value provided.
+func WithS3DisableExpressAuth(v bool) LoadOptionsFunc {
+	return func(o *LoadOptions) error {
+		o.S3DisableExpressAuth = &v
+		return nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/local.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/local.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/local.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/local.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,51 @@
+package config
+
+import (
+	"fmt"
+	"net"
+	"net/url"
+)
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+	ip := net.ParseIP(host)
+	if ip != nil {
+		return ip.IsLoopback(), nil
+	}
+
+	// Host is not an ip, perform lookup
+	addrs, err := lookupHostFn(host)
+	if err != nil {
+		return false, err
+	}
+	if len(addrs) == 0 {
+		return false, fmt.Errorf("no addrs found for host, %s", host)
+	}
+
+	for _, addr := range addrs {
+		if !net.ParseIP(addr).IsLoopback() {
+			return false, nil
+		}
+	}
+
+	return true, nil
+}
+
+func validateLocalURL(v string) error {
+	u, err := url.Parse(v)
+	if err != nil {
+		return err
+	}
+
+	host := u.Hostname()
+	if len(host) == 0 {
+		return fmt.Errorf("unable to parse host from local HTTP cred provider URL")
+	} else if isLoopback, err := isLoopbackHost(host); err != nil {
+		return fmt.Errorf("failed to resolve host %q, %v", host, err)
+	} else if !isLoopback {
+		return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host)
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,721 @@
+package config
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+	"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+	smithybearer "github.com/aws/smithy-go/auth/bearer"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// sharedConfigProfileProvider provides access to the shared config profile
+// name external configuration value.
+type sharedConfigProfileProvider interface {
+	getSharedConfigProfile(ctx context.Context) (string, bool, error)
+}
+
+// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(sharedConfigProfileProvider); ok {
+			value, found, err = p.getSharedConfigProfile(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// sharedConfigFilesProvider provides access to the shared config filesnames
+// external configuration value.
+type sharedConfigFilesProvider interface {
+	getSharedConfigFiles(ctx context.Context) ([]string, bool, error)
+}
+
+// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(sharedConfigFilesProvider); ok {
+			value, found, err = p.getSharedConfigFiles(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+
+	return
+}
+
+// sharedCredentialsFilesProvider provides access to the shared credentials filesnames
+// external configuration value.
+type sharedCredentialsFilesProvider interface {
+	getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error)
+}
+
+// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(sharedCredentialsFilesProvider); ok {
+			value, found, err = p.getSharedCredentialsFiles(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+
+	return
+}
+
+// customCABundleProvider provides access to the custom CA bundle PEM bytes.
+type customCABundleProvider interface {
+	getCustomCABundle(ctx context.Context) (io.Reader, bool, error)
+}
+
+// getCustomCABundle searches the configs for a customCABundleProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(customCABundleProvider); ok {
+			value, found, err = p.getCustomCABundle(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+
+	return
+}
+
+// regionProvider provides access to the region external configuration value.
+type regionProvider interface {
+	getRegion(ctx context.Context) (string, bool, error)
+}
+
+// getRegion searches the configs for a regionProvider and returns the value
+// if found. Returns an error if a provider fails before a value is found.
+func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(regionProvider); ok {
+			value, found, err = p.getRegion(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// IgnoreConfiguredEndpointsProvider is needed to search for all providers
+// that provide a flag to disable configured endpoints.
+type IgnoreConfiguredEndpointsProvider interface {
+	GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error)
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok {
+			value, found, err = p.GetIgnoreConfiguredEndpoints(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+type baseEndpointProvider interface {
+	getBaseEndpoint(ctx context.Context) (string, bool, error)
+}
+
+func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(baseEndpointProvider); ok {
+			value, found, err = p.getBaseEndpoint(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+type servicesObjectProvider interface {
+	getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error)
+}
+
+func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(servicesObjectProvider); ok {
+			value, found, err = p.getServicesObject(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// appIDProvider provides access to the sdk app ID value
+type appIDProvider interface {
+	getAppID(ctx context.Context) (string, bool, error)
+}
+
+func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(appIDProvider); ok {
+			value, found, err = p.getAppID(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// disableRequestCompressionProvider provides access to the DisableRequestCompression
+type disableRequestCompressionProvider interface {
+	getDisableRequestCompression(context.Context) (bool, bool, error)
+}
+
+func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(disableRequestCompressionProvider); ok {
+			value, found, err = p.getDisableRequestCompression(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes
+type requestMinCompressSizeBytesProvider interface {
+	getRequestMinCompressSizeBytes(context.Context) (int64, bool, error)
+}
+
+func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok {
+			value, found, err = p.getRequestMinCompressSizeBytes(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode
+type accountIDEndpointModeProvider interface {
+	getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error)
+}
+
+func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(accountIDEndpointModeProvider); ok {
+			value, found, err = p.getAccountIDEndpointMode(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// ec2IMDSRegionProvider provides access to the ec2 imds region
+// configuration value
+type ec2IMDSRegionProvider interface {
+	getEC2IMDSRegion(ctx context.Context) (string, bool, error)
+}
+
+// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and
+// returns the value if found. Returns an error if a provider fails before
+// a value is found.
+func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) {
+	for _, cfg := range configs {
+		if provider, ok := cfg.(ec2IMDSRegionProvider); ok {
+			region, found, err = provider.getEC2IMDSRegion(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// credentialsProviderProvider provides access to the credentials external
+// configuration value.
+type credentialsProviderProvider interface {
+	getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error)
+}
+
+// getCredentialsProvider searches the configs for a credentialsProviderProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) {
+	for _, cfg := range configs {
+		if provider, ok := cfg.(credentialsProviderProvider); ok {
+			p, found, err = provider.getCredentialsProvider(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// credentialsCacheOptionsProvider is an interface for retrieving a function for setting
+// the aws.CredentialsCacheOptions.
+type credentialsCacheOptionsProvider interface {
+	getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error)
+}
+
+// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting
+// the aws.CredentialsCacheOptions.
+func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) (
+	f func(*aws.CredentialsCacheOptions), found bool, err error,
+) {
+	for _, config := range configs {
+		if p, ok := config.(credentialsCacheOptionsProvider); ok {
+			f, found, err = p.getCredentialsCacheOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// bearerAuthTokenProviderProvider provides access to the bearer authentication
+// token external configuration value.
+type bearerAuthTokenProviderProvider interface {
+	getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error)
+}
+
+// getBearerAuthTokenProvider searches the config sources for a
+// bearerAuthTokenProviderProvider and returns the value if found. Returns an
+// error if a provider fails before a value is found.
+func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) {
+	for _, cfg := range configs {
+		if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok {
+			p, found, err = provider.getBearerAuthTokenProvider(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for
+// setting the smithy-go auth/bearer#TokenCacheOptions.
+type bearerAuthTokenCacheOptionsProvider interface {
+	getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error)
+}
+
+// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for
+// setting the smithy-go auth/bearer#TokenCacheOptions.
+func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) (
+	f func(*smithybearer.TokenCacheOptions), found bool, err error,
+) {
+	for _, config := range configs {
+		if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok {
+			f, found, err = p.getBearerAuthTokenCacheOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// ssoTokenProviderOptionsProvider is an interface for retrieving a function for
+// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions.
+type ssoTokenProviderOptionsProvider interface {
+	getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error)
+}
+
+// getSSOTokenProviderOptions is an interface for retrieving a function for
+// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions.
+func getSSOTokenProviderOptions(ctx context.Context, configs configs) (
+	f func(*ssocreds.SSOTokenProviderOptions), found bool, err error,
+) {
+	for _, config := range configs {
+		if p, ok := config.(ssoTokenProviderOptionsProvider); ok {
+			f, found, err = p.getSSOTokenProviderOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// ssoTokenProviderOptionsProvider
+
+// processCredentialOptions is an interface for retrieving a function for setting
+// the processcreds.Options.
+type processCredentialOptions interface {
+	getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error)
+}
+
+// getProcessCredentialOptions searches the slice of configs and returns the first function found
+func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) {
+	for _, config := range configs {
+		if p, ok := config.(processCredentialOptions); ok {
+			f, found, err = p.getProcessCredentialOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// ec2RoleCredentialOptionsProvider is an interface for retrieving a function
+// for setting the ec2rolecreds.Provider options.
+type ec2RoleCredentialOptionsProvider interface {
+	getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error)
+}
+
+// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found
+func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) {
+	for _, config := range configs {
+		if p, ok := config.(ec2RoleCredentialOptionsProvider); ok {
+			f, found, err = p.getEC2RoleCredentialOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources
+type defaultRegionProvider interface {
+	getDefaultRegion(ctx context.Context) (string, bool, error)
+}
+
+// getDefaultRegion searches the slice of configs and returns the first fallback region found
+func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) {
+	for _, config := range configs {
+		if p, ok := config.(defaultRegionProvider); ok {
+			value, found, err = p.getDefaultRegion(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// endpointCredentialOptionsProvider is an interface for retrieving a function for setting
+// the endpointcreds.ProviderOptions.
+type endpointCredentialOptionsProvider interface {
+	getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error)
+}
+
+// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found
+func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) {
+	for _, config := range configs {
+		if p, ok := config.(endpointCredentialOptionsProvider); ok {
+			f, found, err = p.getEndpointCredentialOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting
+// the stscreds.WebIdentityRoleProvider.
+type webIdentityRoleCredentialOptionsProvider interface {
+	getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error)
+}
+
+// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found
+func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) {
+	for _, config := range configs {
+		if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok {
+			f, found, err = p.getWebIdentityRoleCredentialOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting
+// the stscreds.AssumeRoleOptions.
+type assumeRoleCredentialOptionsProvider interface {
+	getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error)
+}
+
+// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found
+func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) {
+	for _, config := range configs {
+		if p, ok := config.(assumeRoleCredentialOptionsProvider); ok {
+			f, found, err = p.getAssumeRoleCredentialOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// HTTPClient is an HTTP client implementation
+type HTTPClient interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+// httpClientProvider is an interface for retrieving HTTPClient
+type httpClientProvider interface {
+	getHTTPClient(ctx context.Context) (HTTPClient, bool, error)
+}
+
+// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs
+func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) {
+	for _, config := range configs {
+		if p, ok := config.(httpClientProvider); ok {
+			client, found, err = p.getHTTPClient(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// apiOptionsProvider is an interface for retrieving APIOptions
+type apiOptionsProvider interface {
+	getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error)
+}
+
+// getAPIOptions searches the slice of configs and returns the APIOptions set on configs
+func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) {
+	for _, config := range configs {
+		if p, ok := config.(apiOptionsProvider); ok {
+			// retrieve APIOptions from configs and set it on cfg
+			apiOptions, found, err = p.getAPIOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source
+type endpointResolverProvider interface {
+	getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error)
+}
+
+// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used
+// to configure the aws.Config.EndpointResolver value.
+func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(endpointResolverProvider); ok {
+			f, found, err = p.getEndpointResolver(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source
+type endpointResolverWithOptionsProvider interface {
+	getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error)
+}
+
+// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used
+// to configure the aws.Config.EndpointResolver value.
+func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(endpointResolverWithOptionsProvider); ok {
+			f, found, err = p.getEndpointResolverWithOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// loggerProvider is an interface for retrieving a logging.Logger from a configuration source.
+type loggerProvider interface {
+	getLogger(ctx context.Context) (logging.Logger, bool, error)
+}
+
+// getLogger searches the provided config sources for a logging.Logger that can be used
+// to configure the aws.Config.Logger value.
+func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(loggerProvider); ok {
+			l, found, err = p.getLogger(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source.
+type clientLogModeProvider interface {
+	getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error)
+}
+
+func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(clientLogModeProvider); ok {
+			m, found, err = p.getClientLogMode(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// retryProvider is an configuration provider for custom Retryer.
+type retryProvider interface {
+	getRetryer(ctx context.Context) (func() aws.Retryer, bool, error)
+}
+
+func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(retryProvider); ok {
+			v, found, err = p.getRetryer(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// logConfigurationWarningsProvider is an configuration provider for
+// retrieving a boolean indicating whether configuration issues should
+// be logged when loading from config sources
+type logConfigurationWarningsProvider interface {
+	getLogConfigurationWarnings(ctx context.Context) (bool, bool, error)
+}
+
+func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(logConfigurationWarningsProvider); ok {
+			v, found, err = p.getLogConfigurationWarnings(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// ssoCredentialOptionsProvider is an interface for retrieving a function for setting
+// the ssocreds.Options.
+type ssoCredentialOptionsProvider interface {
+	getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error)
+}
+
+func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(ssoCredentialOptionsProvider); ok {
+			v, found, err = p.getSSOProviderOptions(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return v, found, err
+}
+
+type defaultsModeIMDSClientProvider interface {
+	getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error)
+}
+
+func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(defaultsModeIMDSClientProvider); ok {
+			v, found, err = p.getDefaultsModeIMDSClient(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return v, found, err
+}
+
+type defaultsModeProvider interface {
+	getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error)
+}
+
+func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(defaultsModeProvider); ok {
+			v, found, err = p.getDefaultsMode(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return v, found, err
+}
+
+type retryMaxAttemptsProvider interface {
+	GetRetryMaxAttempts(context.Context) (int, bool, error)
+}
+
+func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(retryMaxAttemptsProvider); ok {
+			v, found, err = p.GetRetryMaxAttempts(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return v, found, err
+}
+
+type retryModeProvider interface {
+	GetRetryMode(context.Context) (aws.RetryMode, bool, error)
+}
+
+func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) {
+	for _, c := range configs {
+		if p, ok := c.(retryModeProvider); ok {
+			v, found, err = p.GetRetryMode(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return v, found, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,383 @@
+package config
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+	"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+	"github.com/aws/smithy-go/logging"
+)
+
+// resolveDefaultAWSConfig will write default configuration values into the cfg
+// value. It will write the default values, overwriting any previous value.
+//
+// This should be used as the first resolver in the slice of resolvers when
+// resolving external configuration.
+func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error {
+	var sources []interface{}
+	for _, s := range cfgs {
+		sources = append(sources, s)
+	}
+
+	*cfg = aws.Config{
+		Logger:        logging.NewStandardLogger(os.Stderr),
+		ConfigSources: sources,
+	}
+	return nil
+}
+
+// resolveCustomCABundle extracts the first instance of a custom CA bundle filename
+// from the external configurations. It will update the HTTP Client's builder
+// to be configured with the custom CA bundle.
+//
+// Config provider used:
+// * customCABundleProvider
+func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error {
+	pemCerts, found, err := getCustomCABundle(ctx, cfgs)
+	if err != nil {
+		// TODO error handling, What is the best way to handle this?
+		// capture previous errors continue. error out if all errors
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	if cfg.HTTPClient == nil {
+		cfg.HTTPClient = awshttp.NewBuildableClient()
+	}
+
+	trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient)
+	if !ok {
+		return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+
+			"has no WithTransportOptions, %T", cfg.HTTPClient)
+	}
+
+	var appendErr error
+	client := trOpts.WithTransportOptions(func(tr *http.Transport) {
+		if tr.TLSClientConfig == nil {
+			tr.TLSClientConfig = &tls.Config{}
+		}
+		if tr.TLSClientConfig.RootCAs == nil {
+			tr.TLSClientConfig.RootCAs = x509.NewCertPool()
+		}
+
+		b, err := ioutil.ReadAll(pemCerts)
+		if err != nil {
+			appendErr = fmt.Errorf("failed to read custom CA bundle PEM file")
+		}
+
+		if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) {
+			appendErr = fmt.Errorf("failed to load custom CA bundle PEM file")
+		}
+	})
+	if appendErr != nil {
+		return appendErr
+	}
+
+	cfg.HTTPClient = client
+	return err
+}
+
+// resolveRegion extracts the first instance of a Region from the configs slice.
+//
+// Config providers used:
+// * regionProvider
+func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+	v, found, err := getRegion(ctx, configs)
+	if err != nil {
+		// TODO error handling, What is the best way to handle this?
+		// capture previous errors continue. error out if all errors
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.Region = v
+	return nil
+}
+
+func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error {
+	var downcastCfgSources []interface{}
+	for _, cs := range configs {
+		downcastCfgSources = append(downcastCfgSources, interface{}(cs))
+	}
+
+	if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil {
+		cfg.BaseEndpoint = nil
+		return nil
+	}
+
+	v, found, err := getBaseEndpoint(ctx, configs)
+	if err != nil {
+		return err
+	}
+
+	if !found {
+		return nil
+	}
+	cfg.BaseEndpoint = aws.String(v)
+	return nil
+}
+
+// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var
+func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error {
+	ID, _, err := getAppID(ctx, configs)
+	if err != nil {
+		return err
+	}
+
+	cfg.AppID = ID
+	return nil
+}
+
+// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's
+// SharedConfig or EnvConfig
+func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error {
+	disable, _, err := getDisableRequestCompression(ctx, configs)
+	if err != nil {
+		return err
+	}
+
+	cfg.DisableRequestCompression = disable
+	return nil
+}
+
+// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's
+// SharedConfig or EnvConfig
+func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error {
+	minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs)
+	if err != nil {
+		return err
+	}
+	// must set a default min size 10240 if not configured
+	if !found {
+		minBytes = 10240
+	}
+	cfg.RequestMinCompressSizeBytes = minBytes
+	return nil
+}
+
+// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's
+// SharedConfig or EnvConfig
+func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error {
+	m, found, err := getAccountIDEndpointMode(ctx, configs)
+	if err != nil {
+		return err
+	}
+
+	if !found {
+		m = aws.AccountIDEndpointModePreferred
+	}
+
+	cfg.AccountIDEndpointMode = m
+	return nil
+}
+
+// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default
+// region if region had not been resolved from other sources.
+func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+	if len(cfg.Region) > 0 {
+		return nil
+	}
+
+	v, found, err := getDefaultRegion(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.Region = v
+
+	return nil
+}
+
+// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance
+// if one has not been resolved from other sources.
+func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error {
+	c, found, err := getHTTPClient(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.HTTPClient = c
+	return nil
+}
+
+// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options
+// if one has not been resolved from other sources.
+func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
+	o, found, err := getAPIOptions(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.APIOptions = o
+
+	return nil
+}
+
+// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice
+// and sets the functions result on the aws.Config.EndpointResolver
+func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error {
+	endpointResolver, found, err := getEndpointResolver(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.EndpointResolver = endpointResolver
+
+	return nil
+}
+
+// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice
+// and sets the functions result on the aws.Config.EndpointResolver
+func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
+	endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.EndpointResolverWithOptions = endpointResolver
+
+	return nil
+}
+
+func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error {
+	logger, found, err := getLogger(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.Logger = logger
+
+	return nil
+}
+
+func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error {
+	mode, found, err := getClientLogMode(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.ClientLogMode = mode
+
+	return nil
+}
+
+func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error {
+	retryer, found, err := getRetryer(ctx, configs)
+	if err != nil {
+		return err
+	}
+
+	if found {
+		cfg.Retryer = retryer
+		return nil
+	}
+
+	// Only load the retry options if a custom retryer has not be specified.
+	if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil {
+		return err
+	}
+	return resolveRetryMode(ctx, cfg, configs)
+}
+
+func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+	if len(cfg.Region) > 0 {
+		return nil
+	}
+
+	region, found, err := getEC2IMDSRegion(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+
+	cfg.Region = region
+
+	return nil
+}
+
+func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
+	defaultsMode, found, err := getDefaultsMode(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if !found {
+		defaultsMode = aws.DefaultsModeLegacy
+	}
+
+	var environment aws.RuntimeEnvironment
+	if defaultsMode == aws.DefaultsModeAuto {
+		envConfig, _, _ := getAWSConfigSources(configs)
+
+		client, found, err := getDefaultsModeIMDSClient(ctx, configs)
+		if err != nil {
+			return err
+		}
+		if !found {
+			client = imds.NewFromConfig(*cfg)
+		}
+
+		environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client)
+		if err != nil {
+			return err
+		}
+	}
+
+	cfg.DefaultsMode = defaultsMode
+	cfg.RuntimeEnvironment = environment
+
+	return nil
+}
+
+func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error {
+	maxAttempts, found, err := getRetryMaxAttempts(ctx, configs)
+	if err != nil || !found {
+		return err
+	}
+	cfg.RetryMaxAttempts = maxAttempts
+
+	return nil
+}
+
+func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error {
+	retryMode, found, err := getRetryMode(ctx, configs)
+	if err != nil || !found {
+		return err
+	}
+	cfg.RetryMode = retryMode
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,122 @@
+package config
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+	"github.com/aws/aws-sdk-go-v2/service/ssooidc"
+	smithybearer "github.com/aws/smithy-go/auth/bearer"
+)
+
+// resolveBearerAuthToken extracts a token provider from the config sources.
+//
+// If an explicit bearer authentication token provider is not found the
+// resolver will fallback to resolving token provider via other config sources
+// such as SharedConfig.
+func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error {
+	found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs)
+	if found || err != nil {
+		return err
+	}
+
+	return resolveBearerAuthTokenProviderChain(ctx, cfg, configs)
+}
+
+// resolveBearerAuthTokenProvider extracts the first instance of
+// BearerAuthTokenProvider from the config sources.
+//
+// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure
+// the Token is only refreshed when needed. This also protects the
+// TokenProvider so it can be used concurrently.
+//
+// Config providers used:
+// * bearerAuthTokenProviderProvider
+func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
+	tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs)
+	if !found || err != nil {
+		return false, err
+	}
+
+	cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache(
+		ctx, configs, tokenProvider)
+	if err != nil {
+		return false, err
+	}
+
+	return true, nil
+}
+
+func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) {
+	_, sharedConfig, _ := getAWSConfigSources(configs)
+
+	var provider smithybearer.TokenProvider
+
+	if sharedConfig.SSOSession != nil {
+		provider, err = resolveBearerAuthSSOTokenProvider(
+			ctx, cfg, sharedConfig.SSOSession, configs)
+	}
+
+	if err == nil && provider != nil {
+		cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache(
+			ctx, configs, provider)
+	}
+
+	return err
+}
+
+func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) {
+	ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err)
+	}
+
+	var optFns []func(*ssocreds.SSOTokenProviderOptions)
+	if found {
+		optFns = append(optFns, ssoTokenProviderOptionsFn)
+	}
+
+	cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err)
+	}
+
+	client := ssooidc.NewFromConfig(*cfg)
+	provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...)
+
+	return provider, nil
+}
+
+// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go
+// bearer/auth#TokenCache with the provided options if the provider is not
+// already a TokenCache.
+func wrapWithBearerAuthTokenCache(
+	ctx context.Context,
+	cfgs configs,
+	provider smithybearer.TokenProvider,
+	optFns ...func(*smithybearer.TokenCacheOptions),
+) (smithybearer.TokenProvider, error) {
+	_, ok := provider.(*smithybearer.TokenCache)
+	if ok {
+		return provider, nil
+	}
+
+	tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs)
+	if err != nil {
+		return nil, err
+	}
+
+	opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns))
+	opts = append(opts, func(o *smithybearer.TokenCacheOptions) {
+		o.RefreshBeforeExpires = 5 * time.Minute
+		o.RetrieveBearerTokenTimeout = 30 * time.Second
+	})
+	opts = append(opts, optFns...)
+	if optionsFound {
+		opts = append(opts, tokenCacheConfigOptions)
+	}
+
+	return smithybearer.NewTokenCache(provider, opts...), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,566 @@
+package config
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/url"
+	"os"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/credentials"
+	"github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+	"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+	"github.com/aws/aws-sdk-go-v2/service/sso"
+	"github.com/aws/aws-sdk-go-v2/service/ssooidc"
+	"github.com/aws/aws-sdk-go-v2/service/sts"
+)
+
+const (
+	// valid credential source values
+	credSourceEc2Metadata      = "Ec2InstanceMetadata"
+	credSourceEnvironment      = "Environment"
+	credSourceECSContainer     = "EcsContainer"
+	httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
+)
+
+// direct representation of the IPv4 address for the ECS container
+// "169.254.170.2"
+var ecsContainerIPv4 net.IP = []byte{
+	169, 254, 170, 2,
+}
+
+// direct representation of the IPv4 address for the EKS container
+// "169.254.170.23"
+var eksContainerIPv4 net.IP = []byte{
+	169, 254, 170, 23,
+}
+
+// direct representation of the IPv6 address for the EKS container
+// "fd00:ec2::23"
+var eksContainerIPv6 net.IP = []byte{
+	0xFD, 0, 0xE, 0xC2,
+	0, 0, 0, 0,
+	0, 0, 0, 0,
+	0, 0, 0, 0x23,
+}
+
+var (
+	ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing
+)
+
+// resolveCredentials extracts a credential provider from slice of config
+// sources.
+//
+// If an explicit credential provider is not found the resolver will fallback
+// to resolving credentials by extracting a credential provider from EnvConfig
+// and SharedConfig.
+func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
+	found, err := resolveCredentialProvider(ctx, cfg, configs)
+	if found || err != nil {
+		return err
+	}
+
+	return resolveCredentialChain(ctx, cfg, configs)
+}
+
+// resolveCredentialProvider extracts the first instance of Credentials from the
+// config slices.
+//
+// The resolved CredentialProvider will be wrapped in a cache to ensure the
+// credentials are only refreshed when needed. This also protects the
+// credential provider to be used concurrently.
+//
+// Config providers used:
+// * credentialsProviderProvider
+func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
+	credProvider, found, err := getCredentialsProvider(ctx, configs)
+	if !found || err != nil {
+		return false, err
+	}
+
+	cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider)
+	if err != nil {
+		return false, err
+	}
+
+	return true, nil
+}
+
+// resolveCredentialChain resolves a credential provider chain using EnvConfig
+// and SharedConfig if present in the slice of provided configs.
+//
+// The resolved CredentialProvider will be wrapped in a cache to ensure the
+// credentials are only refreshed when needed. This also protects the
+// credential provider to be used concurrently.
+func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) {
+	envConfig, sharedConfig, other := getAWSConfigSources(configs)
+
+	// When checking if a profile was specified programmatically we should only consider the "other"
+	// configuration sources that have been provided. This ensures we correctly honor the expected credential
+	// hierarchy.
+	_, sharedProfileSet, err := getSharedConfigProfile(ctx, other)
+	if err != nil {
+		return err
+	}
+
+	switch {
+	case sharedProfileSet:
+		err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
+	case envConfig.Credentials.HasKeys():
+		cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials}
+	case len(envConfig.WebIdentityTokenFilePath) > 0:
+		err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs)
+	default:
+		err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
+	}
+	if err != nil {
+		return err
+	}
+
+	// Wrap the resolved provider in a cache so the SDK will cache credentials.
+	cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) {
+
+	switch {
+	case sharedConfig.Source != nil:
+		// Assume IAM role with credentials source from a different profile.
+		err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs)
+
+	case sharedConfig.Credentials.HasKeys():
+		// Static Credentials from Shared Config/Credentials file.
+		cfg.Credentials = credentials.StaticCredentialsProvider{
+			Value: sharedConfig.Credentials,
+		}
+
+	case len(sharedConfig.CredentialSource) != 0:
+		err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs)
+
+	case len(sharedConfig.WebIdentityTokenFile) != 0:
+		// Credentials from Assume Web Identity token require an IAM Role, and
+		// that roll will be assumed. May be wrapped with another assume role
+		// via SourceProfile.
+		return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs)
+
+	case sharedConfig.hasSSOConfiguration():
+		err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs)
+
+	case len(sharedConfig.CredentialProcess) != 0:
+		// Get credentials from CredentialProcess
+		err = processCredentials(ctx, cfg, sharedConfig, configs)
+
+	case len(envConfig.ContainerCredentialsEndpoint) != 0:
+		err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs)
+
+	case len(envConfig.ContainerCredentialsRelativePath) != 0:
+		err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+
+	default:
+		err = resolveEC2RoleCredentials(ctx, cfg, configs)
+	}
+	if err != nil {
+		return err
+	}
+
+	if len(sharedConfig.RoleARN) > 0 {
+		return credsFromAssumeRole(ctx, cfg, sharedConfig, configs)
+	}
+
+	return nil
+}
+
+func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error {
+	if err := sharedConfig.validateSSOConfiguration(); err != nil {
+		return err
+	}
+
+	var options []func(*ssocreds.Options)
+	v, found, err := getSSOProviderOptions(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if found {
+		options = append(options, v)
+	}
+
+	cfgCopy := cfg.Copy()
+
+	if sharedConfig.SSOSession != nil {
+		ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs)
+		if err != nil {
+			return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err)
+		}
+		var optFns []func(*ssocreds.SSOTokenProviderOptions)
+		if found {
+			optFns = append(optFns, ssoTokenProviderOptionsFn)
+		}
+		cfgCopy.Region = sharedConfig.SSOSession.SSORegion
+		cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name)
+		if err != nil {
+			return err
+		}
+		oidcClient := ssooidc.NewFromConfig(cfgCopy)
+		tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...)
+		options = append(options, func(o *ssocreds.Options) {
+			o.SSOTokenProvider = tokenProvider
+			o.CachedTokenFilepath = cachedPath
+		})
+	} else {
+		cfgCopy.Region = sharedConfig.SSORegion
+	}
+
+	cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...)
+
+	return nil
+}
+
+func ecsContainerURI(path string) string {
+	return fmt.Sprintf("%s%s", ecsContainerEndpoint, path)
+}
+
+func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error {
+	var opts []func(*processcreds.Options)
+
+	options, found, err := getProcessCredentialOptions(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if found {
+		opts = append(opts, options)
+	}
+
+	cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...)
+
+	return nil
+}
+
+// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
+//
+// host can either be an IP address OR an unresolved hostname - resolution will
+// be automatically performed in the latter case
+func isAllowedHost(host string) (bool, error) {
+	if ip := net.ParseIP(host); ip != nil {
+		return isIPAllowed(ip), nil
+	}
+
+	addrs, err := lookupHostFn(host)
+	if err != nil {
+		return false, err
+	}
+
+	for _, addr := range addrs {
+		if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
+			return false, nil
+		}
+	}
+
+	return true, nil
+}
+
+func isIPAllowed(ip net.IP) bool {
+	return ip.IsLoopback() ||
+		ip.Equal(ecsContainerIPv4) ||
+		ip.Equal(eksContainerIPv4) ||
+		ip.Equal(eksContainerIPv6)
+}
+
+func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error {
+	var resolveErr error
+
+	parsed, err := url.Parse(endpointURL)
+	if err != nil {
+		resolveErr = fmt.Errorf("invalid URL, %w", err)
+	} else {
+		host := parsed.Hostname()
+		if len(host) == 0 {
+			resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL")
+		} else if parsed.Scheme == "http" {
+			if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
+				resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr)
+			} else if !isAllowedHost {
+				resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host)
+			}
+		}
+	}
+
+	if resolveErr != nil {
+		return resolveErr
+	}
+
+	return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs)
+}
+
+func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error {
+	optFns := []func(*endpointcreds.Options){
+		func(options *endpointcreds.Options) {
+			if len(authToken) != 0 {
+				options.AuthorizationToken = authToken
+			}
+			if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
+				options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
+					var contents []byte
+					var err error
+					if contents, err = ioutil.ReadFile(authFilePath); err != nil {
+						return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
+					}
+					return string(contents), nil
+				})
+			}
+			options.APIOptions = cfg.APIOptions
+			if cfg.Retryer != nil {
+				options.Retryer = cfg.Retryer()
+			}
+		},
+	}
+
+	optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if found {
+		optFns = append(optFns, optFn)
+	}
+
+	provider := endpointcreds.New(url, optFns...)
+
+	cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) {
+		options.ExpiryWindow = 5 * time.Minute
+	})
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) {
+	switch sharedCfg.CredentialSource {
+	case credSourceEc2Metadata:
+		return resolveEC2RoleCredentials(ctx, cfg, configs)
+
+	case credSourceEnvironment:
+		cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials}
+
+	case credSourceECSContainer:
+		if len(envConfig.ContainerCredentialsRelativePath) == 0 {
+			return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set")
+		}
+		return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+
+	default:
+		return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment")
+	}
+
+	return nil
+}
+
+func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
+	optFns := make([]func(*ec2rolecreds.Options), 0, 2)
+
+	optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if found {
+		optFns = append(optFns, optFn)
+	}
+
+	optFns = append(optFns, func(o *ec2rolecreds.Options) {
+		// Only define a client from config if not already defined.
+		if o.Client == nil {
+			o.Client = imds.NewFromConfig(*cfg)
+		}
+	})
+
+	provider := ec2rolecreds.New(optFns...)
+
+	cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) {
+	var (
+		envConfig    *EnvConfig
+		sharedConfig *SharedConfig
+		other        configs
+	)
+
+	for i := range cfgs {
+		switch c := cfgs[i].(type) {
+		case EnvConfig:
+			if envConfig == nil {
+				envConfig = &c
+			}
+		case *EnvConfig:
+			if envConfig == nil {
+				envConfig = c
+			}
+		case SharedConfig:
+			if sharedConfig == nil {
+				sharedConfig = &c
+			}
+		case *SharedConfig:
+			if envConfig == nil {
+				sharedConfig = c
+			}
+		default:
+			other = append(other, c)
+		}
+	}
+
+	if envConfig == nil {
+		envConfig = &EnvConfig{}
+	}
+
+	if sharedConfig == nil {
+		sharedConfig = &SharedConfig{}
+	}
+
+	return envConfig, sharedConfig, other
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a
+// session when the MFAToken option is not set when shared config is configured
+// load assume a role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Error is the error message
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+	return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error {
+	if len(filepath) == 0 {
+		return fmt.Errorf("token file path is not set")
+	}
+
+	optFns := []func(*stscreds.WebIdentityRoleOptions){
+		func(options *stscreds.WebIdentityRoleOptions) {
+			options.RoleSessionName = sessionName
+		},
+	}
+
+	optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs)
+	if err != nil {
+		return err
+	}
+
+	if found {
+		optFns = append(optFns, optFn)
+	}
+
+	opts := stscreds.WebIdentityRoleOptions{
+		RoleARN: roleARN,
+	}
+
+	for _, fn := range optFns {
+		fn(&opts)
+	}
+
+	if len(opts.RoleARN) == 0 {
+		return fmt.Errorf("role ARN is not set")
+	}
+
+	client := opts.Client
+	if client == nil {
+		client = sts.NewFromConfig(*cfg)
+	}
+
+	provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...)
+
+	cfg.Credentials = provider
+
+	return nil
+}
+
+func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) {
+	optFns := []func(*stscreds.AssumeRoleOptions){
+		func(options *stscreds.AssumeRoleOptions) {
+			options.RoleSessionName = sharedCfg.RoleSessionName
+			if sharedCfg.RoleDurationSeconds != nil {
+				if *sharedCfg.RoleDurationSeconds/time.Minute > 15 {
+					options.Duration = *sharedCfg.RoleDurationSeconds
+				}
+			}
+			// Assume role with external ID
+			if len(sharedCfg.ExternalID) > 0 {
+				options.ExternalID = aws.String(sharedCfg.ExternalID)
+			}
+
+			// Assume role with MFA
+			if len(sharedCfg.MFASerial) != 0 {
+				options.SerialNumber = aws.String(sharedCfg.MFASerial)
+			}
+		},
+	}
+
+	optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs)
+	if err != nil {
+		return err
+	}
+	if found {
+		optFns = append(optFns, optFn)
+	}
+
+	{
+		// Synthesize options early to validate configuration errors sooner to ensure a token provider
+		// is present if the SerialNumber was set.
+		var o stscreds.AssumeRoleOptions
+		for _, fn := range optFns {
+			fn(&o)
+		}
+		if o.TokenProvider == nil && o.SerialNumber != nil {
+			return AssumeRoleTokenProviderNotSetError{}
+		}
+	}
+
+	cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...)
+
+	return nil
+}
+
+// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache
+// with the provided options if the provider is not already a
+// aws.CredentialsCache.
+func wrapWithCredentialsCache(
+	ctx context.Context,
+	cfgs configs,
+	provider aws.CredentialsProvider,
+	optFns ...func(options *aws.CredentialsCacheOptions),
+) (aws.CredentialsProvider, error) {
+	_, ok := provider.(*aws.CredentialsCache)
+	if ok {
+		return provider, nil
+	}
+
+	credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs)
+	if err != nil {
+		return nil, err
+	}
+
+	// force allocation of a new slice if the additional options are
+	// needed, to prevent overwriting the passed in slice of options.
+	optFns = optFns[:len(optFns):len(optFns)]
+	if optionsFound {
+		optFns = append(optFns, credCacheOptions)
+	}
+
+	return aws.NewCredentialsCache(provider, optFns...), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,1618 @@
+package config
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+	"github.com/aws/aws-sdk-go-v2/internal/ini"
+	"github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
+	"github.com/aws/smithy-go/logging"
+	smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
+)
+
+const (
+	// Prefix to use for filtering profiles. The profile prefix should only
+	// exist in the shared config file, not the credentials file.
+	profilePrefix = `profile `
+
+	// Prefix to be used for SSO sections. These are supposed to only exist in
+	// the shared config file, not the credentials file.
+	ssoSectionPrefix = `sso-session `
+
+	// Prefix for services section. It is referenced in profile via the services
+	// parameter to configure clients for service-specific parameters.
+	servicesPrefix = `services `
+
+	// string equivalent for boolean
+	endpointDiscoveryDisabled = `false`
+	endpointDiscoveryEnabled  = `true`
+	endpointDiscoveryAuto     = `auto`
+
+	// Static Credentials group
+	accessKeyIDKey  = `aws_access_key_id`     // group required
+	secretAccessKey = `aws_secret_access_key` // group required
+	sessionTokenKey = `aws_session_token`     // optional
+
+	// Assume Role Credentials group
+	roleArnKey             = `role_arn`          // group required
+	sourceProfileKey       = `source_profile`    // group required
+	credentialSourceKey    = `credential_source` // group required (or source_profile)
+	externalIDKey          = `external_id`       // optional
+	mfaSerialKey           = `mfa_serial`        // optional
+	roleSessionNameKey     = `role_session_name` // optional
+	roleDurationSecondsKey = "duration_seconds"  // optional
+
+	// AWS Single Sign-On (AWS SSO) group
+	ssoSessionNameKey = "sso_session"
+
+	ssoRegionKey   = "sso_region"
+	ssoStartURLKey = "sso_start_url"
+
+	ssoAccountIDKey = "sso_account_id"
+	ssoRoleNameKey  = "sso_role_name"
+
+	// Additional Config fields
+	regionKey = `region`
+
+	// endpoint discovery group
+	enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+
+	// External Credential process
+	credentialProcessKey = `credential_process` // optional
+
+	// Web Identity Token File
+	webIdentityTokenFileKey = `web_identity_token_file` // optional
+
+	// S3 ARN Region Usage
+	s3UseARNRegionKey = "s3_use_arn_region"
+
+	ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode"
+
+	ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint"
+
+	ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled"
+
+	// Use DualStack Endpoint Resolution
+	useDualStackEndpoint = "use_dualstack_endpoint"
+
+	// DefaultSharedConfigProfile is the default profile to be used when
+	// loading configuration from the config files if another profile name
+	// is not provided.
+	DefaultSharedConfigProfile = `default`
+
+	// S3 Disable Multi-Region AccessPoints
+	s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points`
+
+	useFIPSEndpointKey = "use_fips_endpoint"
+
+	defaultsModeKey = "defaults_mode"
+
+	// Retry options
+	retryMaxAttemptsKey = "max_attempts"
+	retryModeKey        = "retry_mode"
+
+	caBundleKey = "ca_bundle"
+
+	sdkAppID = "sdk_ua_app_id"
+
+	ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls"
+
+	endpointURL = "endpoint_url"
+
+	servicesSectionKey = "services"
+
+	disableRequestCompression      = "disable_request_compression"
+	requestMinCompressionSizeBytes = "request_min_compression_size_bytes"
+
+	s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth"
+
+	accountIDKey          = "aws_account_id"
+	accountIDEndpointMode = "account_id_endpoint_mode"
+)
+
+// defaultSharedConfigProfile allows for swapping the default profile for testing
+var defaultSharedConfigProfile = DefaultSharedConfigProfile
+
+// DefaultSharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+//   - Linux/Unix: $HOME/.aws/credentials
+//   - Windows: %USERPROFILE%\.aws\credentials
+func DefaultSharedCredentialsFilename() string {
+	return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials")
+}
+
+// DefaultSharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+//   - Linux/Unix: $HOME/.aws/config
+//   - Windows: %USERPROFILE%\.aws\config
+func DefaultSharedConfigFilename() string {
+	return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config")
+}
+
+// DefaultSharedConfigFiles is a slice of the default shared config files that
+// the will be used in order to load the SharedConfig.
+var DefaultSharedConfigFiles = []string{
+	DefaultSharedConfigFilename(),
+}
+
+// DefaultSharedCredentialsFiles is a slice of the default shared credentials
+// files that the will be used in order to load the SharedConfig.
+var DefaultSharedCredentialsFiles = []string{
+	DefaultSharedCredentialsFilename(),
+}
+
+// SSOSession provides the shared configuration parameters of the sso-session
+// section.
+type SSOSession struct {
+	Name        string
+	SSORegion   string
+	SSOStartURL string
+}
+
+func (s *SSOSession) setFromIniSection(section ini.Section) {
+	updateString(&s.Name, section, ssoSessionNameKey)
+	updateString(&s.SSORegion, section, ssoRegionKey)
+	updateString(&s.SSOStartURL, section, ssoStartURLKey)
+}
+
+// Services contains values configured in the services section
+// of the AWS configuration file.
+type Services struct {
+	// Services section values
+	// {"serviceId": {"key": "value"}}
+	// e.g. {"s3": {"endpoint_url": "example.com"}}
+	ServiceValues map[string]map[string]string
+}
+
+func (s *Services) setFromIniSection(section ini.Section) {
+	if s.ServiceValues == nil {
+		s.ServiceValues = make(map[string]map[string]string)
+	}
+	for _, service := range section.List() {
+		s.ServiceValues[service] = section.Map(service)
+	}
+}
+
+// SharedConfig represents the configuration fields of the SDK config files.
+type SharedConfig struct {
+	Profile string
+
+	// Credentials values from the config file. Both aws_access_key_id
+	// and aws_secret_access_key must be provided together in the same file
+	// to be considered valid. The values will be ignored if not a complete group.
+	// aws_session_token is an optional field that can be provided if both of the
+	// other two fields are also provided.
+	//
+	//	aws_access_key_id
+	//	aws_secret_access_key
+	//	aws_session_token
+	Credentials aws.Credentials
+
+	CredentialSource     string
+	CredentialProcess    string
+	WebIdentityTokenFile string
+
+	// SSO session options
+	SSOSessionName string
+	SSOSession     *SSOSession
+
+	// Legacy SSO session options
+	SSORegion   string
+	SSOStartURL string
+
+	// SSO fields not used
+	SSOAccountID string
+	SSORoleName  string
+
+	RoleARN             string
+	ExternalID          string
+	MFASerial           string
+	RoleSessionName     string
+	RoleDurationSeconds *time.Duration
+
+	SourceProfileName string
+	Source            *SharedConfig
+
+	// Region is the region the SDK should use for looking up AWS service endpoints
+	// and signing requests.
+	//
+	//	region = us-west-2
+	Region string
+
+	// EnableEndpointDiscovery can be enabled or disabled in the shared config
+	// by setting endpoint_discovery_enabled to true, or false respectively.
+	//
+	//	endpoint_discovery_enabled = true
+	EnableEndpointDiscovery aws.EndpointDiscoveryEnableState
+
+	// Specifies if the S3 service should allow ARNs to direct the region
+	// the client's requests are sent to.
+	//
+	// s3_use_arn_region=true
+	S3UseARNRegion *bool
+
+	// Specifies the EC2 Instance Metadata Service default endpoint selection
+	// mode (IPv4 or IPv6)
+	//
+	// ec2_metadata_service_endpoint_mode=IPv6
+	EC2IMDSEndpointMode imds.EndpointModeState
+
+	// Specifies the EC2 Instance Metadata Service endpoint to use. If
+	// specified it overrides EC2IMDSEndpointMode.
+	//
+	// ec2_metadata_service_endpoint=http://fd00:ec2::254
+	EC2IMDSEndpoint string
+
+	// Specifies that IMDS clients should not fallback to IMDSv1 if token
+	// requests fail.
+	//
+	// ec2_metadata_v1_disabled=true
+	EC2IMDSv1Disabled *bool
+
+	// Specifies if the S3 service should disable support for Multi-Region
+	// access-points
+	//
+	// s3_disable_multiregion_access_points=true
+	S3DisableMultiRegionAccessPoints *bool
+
+	// Specifies that SDK clients must resolve a dual-stack endpoint for
+	// services.
+	//
+	// use_dualstack_endpoint=true
+	UseDualStackEndpoint aws.DualStackEndpointState
+
+	// Specifies that SDK clients must resolve a FIPS endpoint for
+	// services.
+	//
+	// use_fips_endpoint=true
+	UseFIPSEndpoint aws.FIPSEndpointState
+
+	// Specifies which defaults mode should be used by services.
+	//
+	// defaults_mode=standard
+	DefaultsMode aws.DefaultsMode
+
+	// Specifies the maximum number attempts an API client will call an
+	// operation that fails with a retryable error.
+	//
+	// max_attempts=3
+	RetryMaxAttempts int
+
+	// Specifies the retry model the API client will be created with.
+	//
+	// retry_mode=standard
+	RetryMode aws.RetryMode
+
+	// Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+	// that the SDK will use instead of the system's root CA bundle. Only use
+	// this if you want to configure the SDK to use a custom set of CAs.
+	//
+	// Enabling this option will attempt to merge the Transport into the SDK's
+	// HTTP client. If the client's Transport is not a http.Transport an error
+	// will be returned. If the Transport's TLS config is set this option will
+	// cause the SDK to overwrite the Transport's TLS config's  RootCAs value.
+	//
+	// Setting a custom HTTPClient in the aws.Config options will override this
+	// setting. To use this option and custom HTTP client, the HTTP client
+	// needs to be provided when creating the config. Not the service client.
+	//
+	//  ca_bundle=$HOME/my_custom_ca_bundle
+	CustomCABundle string
+
+	// aws sdk app ID that can be added to user agent header string
+	AppID string
+
+	// Flag used to disable configured endpoints.
+	IgnoreConfiguredEndpoints *bool
+
+	// Value to contain configured endpoints to be propagated to
+	// corresponding endpoint resolution field.
+	BaseEndpoint string
+
+	// Services section config.
+	ServicesSectionName string
+	Services            Services
+
+	// determine if request compression is allowed, default to false
+	// retrieved from config file's profile field disable_request_compression
+	DisableRequestCompression *bool
+
+	// inclusive threshold request body size to trigger compression,
+	// default to 10240 and must be within 0 and 10485760 bytes inclusive
+	// retrieved from config file's profile field request_min_compression_size_bytes
+	RequestMinCompressSizeBytes *int64
+
+	// Whether S3Express auth is disabled.
+	//
+	// This will NOT prevent requests from being made to S3Express buckets, it
+	// will only bypass the modified endpoint routing and signing behaviors
+	// associated with the feature.
+	S3DisableExpressAuth *bool
+
+	AccountIDEndpointMode aws.AccountIDEndpointMode
+}
+
+func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) {
+	if len(c.DefaultsMode) == 0 {
+		return "", false, nil
+	}
+
+	return c.DefaultsMode, true, nil
+}
+
+// GetRetryMaxAttempts returns the maximum number of attempts an API client
+// created Retryer should attempt an operation call before failing.
+func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) {
+	if c.RetryMaxAttempts == 0 {
+		return 0, false, nil
+	}
+
+	return c.RetryMaxAttempts, true, nil
+}
+
+// GetRetryMode returns the model the API client should create its Retryer in.
+func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) {
+	if len(c.RetryMode) == 0 {
+		return "", false, nil
+	}
+
+	return c.RetryMode, true, nil
+}
+
+// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region
+// the client's requests are sent to.
+func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) {
+	if c.S3UseARNRegion == nil {
+		return false, false, nil
+	}
+
+	return *c.S3UseARNRegion, true, nil
+}
+
+// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set.
+func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) {
+	if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset {
+		return aws.EndpointDiscoveryUnset, false, nil
+	}
+
+	return c.EnableEndpointDiscovery, true, nil
+}
+
+// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region
+// access-points.
+func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) {
+	if c.S3DisableMultiRegionAccessPoints == nil {
+		return false, false, nil
+	}
+
+	return *c.S3DisableMultiRegionAccessPoints, true, nil
+}
+
+// GetRegion returns the region for the profile if a region is set.
+func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) {
+	if len(c.Region) == 0 {
+		return "", false, nil
+	}
+	return c.Region, true, nil
+}
+
+// GetCredentialsProvider returns the credentials for a profile if they were set.
+func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) {
+	return c.Credentials, true, nil
+}
+
+// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface.
+func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) {
+	if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset {
+		return imds.EndpointModeStateUnset, false, nil
+	}
+
+	return c.EC2IMDSEndpointMode, true, nil
+}
+
+// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface.
+func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) {
+	if len(c.EC2IMDSEndpoint) == 0 {
+		return "", false, nil
+	}
+
+	return c.EC2IMDSEndpoint, true, nil
+}
+
+// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option
+// resolver interface.
+func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) {
+	if c.EC2IMDSv1Disabled == nil {
+		return false, false
+	}
+
+	return *c.EC2IMDSv1Disabled, true
+}
+
+// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be
+// used for requests.
+func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) {
+	if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset {
+		return aws.DualStackEndpointStateUnset, false, nil
+	}
+
+	return c.UseDualStackEndpoint, true, nil
+}
+
+// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be
+// used for requests.
+func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) {
+	if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset {
+		return aws.FIPSEndpointStateUnset, false, nil
+	}
+
+	return c.UseFIPSEndpoint, true, nil
+}
+
+// GetS3DisableExpressAuth returns the configured value for
+// [SharedConfig.S3DisableExpressAuth].
+func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) {
+	if c.S3DisableExpressAuth == nil {
+		return false, false
+	}
+
+	return *c.S3DisableExpressAuth, true
+}
+
+// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
+func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
+	if len(c.CustomCABundle) == 0 {
+		return nil, false, nil
+	}
+
+	b, err := ioutil.ReadFile(c.CustomCABundle)
+	if err != nil {
+		return nil, false, err
+	}
+	return bytes.NewReader(b), true, nil
+}
+
+// getAppID returns the sdk app ID if set in shared config profile
+func (c SharedConfig) getAppID(context.Context) (string, bool, error) {
+	return c.AppID, len(c.AppID) > 0, nil
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) {
+	if c.IgnoreConfiguredEndpoints == nil {
+		return false, false, nil
+	}
+
+	return *c.IgnoreConfiguredEndpoints, true, nil
+}
+
+func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) {
+	return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil
+}
+
+// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use
+// with configured endpoints.
+func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) {
+	if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok {
+		if endpt, ok := service[endpointURL]; ok {
+			return endpt, true, nil
+		}
+	}
+	return "", false, nil
+}
+
+func normalizeShared(sdkID string) string {
+	lower := strings.ToLower(sdkID)
+	return strings.ReplaceAll(lower, " ", "_")
+}
+
+func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) {
+	return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil
+}
+
+// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the
+// addition of ignoring when none of the files exist or when the profile
+// is not found in any of the files.
+func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) {
+	cfg, err := loadSharedConfig(ctx, configs)
+	if err != nil {
+		if _, ok := err.(SharedConfigProfileNotExistError); ok {
+			return SharedConfig{}, nil
+		}
+		return nil, err
+	}
+
+	return cfg, nil
+}
+
+// loadSharedConfig uses the configs passed in to load the SharedConfig from file
+// The file names and profile name are sourced from the configs.
+//
+// If profile name is not provided DefaultSharedConfigProfile (default) will
+// be used.
+//
+// If shared config filenames are not provided DefaultSharedConfigFiles will
+// be used.
+//
+// Config providers used:
+// * sharedConfigProfileProvider
+// * sharedConfigFilesProvider
+func loadSharedConfig(ctx context.Context, configs configs) (Config, error) {
+	var profile string
+	var configFiles []string
+	var credentialsFiles []string
+	var ok bool
+	var err error
+
+	profile, ok, err = getSharedConfigProfile(ctx, configs)
+	if err != nil {
+		return nil, err
+	}
+	if !ok {
+		profile = defaultSharedConfigProfile
+	}
+
+	configFiles, ok, err = getSharedConfigFiles(ctx, configs)
+	if err != nil {
+		return nil, err
+	}
+
+	credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs)
+	if err != nil {
+		return nil, err
+	}
+
+	// setup logger if log configuration warning is seti
+	var logger logging.Logger
+	logWarnings, found, err := getLogConfigurationWarnings(ctx, configs)
+	if err != nil {
+		return SharedConfig{}, err
+	}
+	if found && logWarnings {
+		logger, found, err = getLogger(ctx, configs)
+		if err != nil {
+			return SharedConfig{}, err
+		}
+		if !found {
+			logger = logging.NewStandardLogger(os.Stderr)
+		}
+	}
+
+	return LoadSharedConfigProfile(ctx, profile,
+		func(o *LoadSharedConfigOptions) {
+			o.Logger = logger
+			o.ConfigFiles = configFiles
+			o.CredentialsFiles = credentialsFiles
+		},
+	)
+}
+
+// LoadSharedConfigOptions struct contains optional values that can be used to load the config.
+type LoadSharedConfigOptions struct {
+
+	// CredentialsFiles are the shared credentials files
+	CredentialsFiles []string
+
+	// ConfigFiles are the shared config files
+	ConfigFiles []string
+
+	// Logger is the logger used to log shared config behavior
+	Logger logging.Logger
+}
+
+// LoadSharedConfigProfile retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+//
+// If config files are not set, SDK will default to using a file at location `.aws/config` if present.
+// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present.
+// No default files are set, if files set to an empty slice.
+//
+// You can read more about shared config and credentials file location at
+// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location
+func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) {
+	var option LoadSharedConfigOptions
+	for _, fn := range optFns {
+		fn(&option)
+	}
+
+	if option.ConfigFiles == nil {
+		option.ConfigFiles = DefaultSharedConfigFiles
+	}
+
+	if option.CredentialsFiles == nil {
+		option.CredentialsFiles = DefaultSharedCredentialsFiles
+	}
+
+	// load shared configuration sections from shared configuration INI options
+	configSections, err := loadIniFiles(option.ConfigFiles)
+	if err != nil {
+		return SharedConfig{}, err
+	}
+
+	// check for profile prefix and drop duplicates or invalid profiles
+	err = processConfigSections(ctx, &configSections, option.Logger)
+	if err != nil {
+		return SharedConfig{}, err
+	}
+
+	// load shared credentials sections from shared credentials INI options
+	credentialsSections, err := loadIniFiles(option.CredentialsFiles)
+	if err != nil {
+		return SharedConfig{}, err
+	}
+
+	// check for profile prefix and drop duplicates or invalid profiles
+	err = processCredentialsSections(ctx, &credentialsSections, option.Logger)
+	if err != nil {
+		return SharedConfig{}, err
+	}
+
+	err = mergeSections(&configSections, credentialsSections)
+	if err != nil {
+		return SharedConfig{}, err
+	}
+
+	cfg := SharedConfig{}
+	profiles := map[string]struct{}{}
+
+	if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil {
+		return SharedConfig{}, err
+	}
+
+	return cfg, nil
+}
+
+func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error {
+	skipSections := map[string]struct{}{}
+
+	for _, section := range sections.List() {
+		if _, ok := skipSections[section]; ok {
+			continue
+		}
+
+		// drop sections from config file that do not have expected prefixes.
+		switch {
+		case strings.HasPrefix(section, profilePrefix):
+			// Rename sections to remove "profile " prefixing to match with
+			// credentials file. If default is already present, it will be
+			// dropped.
+			newName, err := renameProfileSection(section, sections, logger)
+			if err != nil {
+				return fmt.Errorf("failed to rename profile section, %w", err)
+			}
+			skipSections[newName] = struct{}{}
+
+		case strings.HasPrefix(section, ssoSectionPrefix):
+		case strings.HasPrefix(section, servicesPrefix):
+		case strings.EqualFold(section, "default"):
+		default:
+			// drop this section, as invalid profile name
+			sections.DeleteSection(section)
+
+			if logger != nil {
+				logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+
+					"For use within a shared configuration file, "+
+					"a non-default profile must have `profile ` "+
+					"prefixed to the profile name.",
+					section,
+				)
+			}
+		}
+	}
+	return nil
+}
+
+func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) {
+	v, ok := sections.GetSection(section)
+	if !ok {
+		return "", fmt.Errorf("error processing profiles within the shared configuration files")
+	}
+
+	// delete section with profile as prefix
+	sections.DeleteSection(section)
+
+	// set the value to non-prefixed name in sections.
+	section = strings.TrimPrefix(section, profilePrefix)
+	if sections.HasSection(section) {
+		oldSection, _ := sections.GetSection(section)
+		v.Logs = append(v.Logs,
+			fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+
+				"overriding non-default profile from %s",
+				v.SourceFile, oldSection.SourceFile))
+		sections.DeleteSection(section)
+	}
+
+	// assign non-prefixed name to section
+	v.Name = section
+	sections.SetSection(section, v)
+
+	return section, nil
+}
+
+func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error {
+	for _, section := range sections.List() {
+		// drop profiles with prefix for credential files
+		if strings.HasPrefix(section, profilePrefix) {
+			// drop this section, as invalid profile name
+			sections.DeleteSection(section)
+
+			if logger != nil {
+				logger.Logf(logging.Debug,
+					"The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+
+						"for the shared credentials file.\n",
+					section,
+				)
+			}
+		}
+	}
+	return nil
+}
+
+func loadIniFiles(filenames []string) (ini.Sections, error) {
+	mergedSections := ini.NewSections()
+
+	for _, filename := range filenames {
+		sections, err := ini.OpenFile(filename)
+		var v *ini.UnableToReadFile
+		if ok := errors.As(err, &v); ok {
+			// Skip files which can't be opened and read for whatever reason.
+			// We treat such files as empty, and do not fall back to other locations.
+			continue
+		} else if err != nil {
+			return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err}
+		}
+
+		// mergeSections into mergedSections
+		err = mergeSections(&mergedSections, sections)
+		if err != nil {
+			return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err}
+		}
+	}
+
+	return mergedSections, nil
+}
+
+// mergeSections merges source section properties into destination section properties
+func mergeSections(dst *ini.Sections, src ini.Sections) error {
+	for _, sectionName := range src.List() {
+		srcSection, _ := src.GetSection(sectionName)
+
+		if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) ||
+			(srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) {
+			srcSection.Errors = append(srcSection.Errors,
+				fmt.Errorf("partial credentials found for profile %v", sectionName))
+		}
+
+		if !dst.HasSection(sectionName) {
+			dst.SetSection(sectionName, srcSection)
+			continue
+		}
+
+		// merge with destination srcSection
+		dstSection, _ := dst.GetSection(sectionName)
+
+		// errors should be overriden if any
+		dstSection.Errors = srcSection.Errors
+
+		// Access key id update
+		if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) {
+			accessKey := srcSection.String(accessKeyIDKey)
+			secretKey := srcSection.String(secretAccessKey)
+
+			if dstSection.Has(accessKeyIDKey) {
+				dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey,
+					dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey]))
+			}
+
+			// update access key
+			v, err := ini.NewStringValue(accessKey)
+			if err != nil {
+				return fmt.Errorf("error merging access key, %w", err)
+			}
+			dstSection.UpdateValue(accessKeyIDKey, v)
+
+			// update secret key
+			v, err = ini.NewStringValue(secretKey)
+			if err != nil {
+				return fmt.Errorf("error merging secret key, %w", err)
+			}
+			dstSection.UpdateValue(secretAccessKey, v)
+
+			// update session token
+			if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil {
+				return err
+			}
+
+			// update source file to reflect where the static creds came from
+			dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey])
+			dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey])
+		}
+
+		stringKeys := []string{
+			roleArnKey,
+			sourceProfileKey,
+			credentialSourceKey,
+			externalIDKey,
+			mfaSerialKey,
+			roleSessionNameKey,
+			regionKey,
+			enableEndpointDiscoveryKey,
+			credentialProcessKey,
+			webIdentityTokenFileKey,
+			s3UseARNRegionKey,
+			s3DisableMultiRegionAccessPointsKey,
+			ec2MetadataServiceEndpointModeKey,
+			ec2MetadataServiceEndpointKey,
+			ec2MetadataV1DisabledKey,
+			useDualStackEndpoint,
+			useFIPSEndpointKey,
+			defaultsModeKey,
+			retryModeKey,
+			caBundleKey,
+			roleDurationSecondsKey,
+			retryMaxAttemptsKey,
+
+			ssoSessionNameKey,
+			ssoAccountIDKey,
+			ssoRegionKey,
+			ssoRoleNameKey,
+			ssoStartURLKey,
+		}
+		for i := range stringKeys {
+			if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil {
+				return err
+			}
+		}
+
+		// set srcSection on dst srcSection
+		*dst = dst.SetSection(sectionName, dstSection)
+	}
+
+	return nil
+}
+
+func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error {
+	if srcSection.Has(key) {
+		srcValue := srcSection.String(key)
+		val, err := ini.NewStringValue(srcValue)
+		if err != nil {
+			return fmt.Errorf("error merging %s, %w", key, err)
+		}
+
+		if dstSection.Has(key) {
+			dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key,
+				dstSection.SourceFile[key], srcSection.SourceFile[key]))
+		}
+
+		dstSection.UpdateValue(key, val)
+		dstSection.UpdateSourceFile(key, srcSection.SourceFile[key])
+	}
+	return nil
+}
+
+func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string {
+	return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+		"with a %v value found in a duplicate profile defined at file %v. \n",
+		sectionName, key, dstSourceFile, key, srcSourceFile)
+}
+
+// Returns an error if all of the files fail to load. If at least one file is
+// successfully loaded and contains the profile, no error will be returned.
+func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string,
+	sections ini.Sections, logger logging.Logger) error {
+	c.Profile = profile
+
+	section, ok := sections.GetSection(profile)
+	if !ok {
+		return SharedConfigProfileNotExistError{
+			Profile: profile,
+		}
+	}
+
+	// if logs are appended to the section, log them
+	if section.Logs != nil && logger != nil {
+		for _, log := range section.Logs {
+			logger.Logf(logging.Debug, log)
+		}
+	}
+
+	// set config from the provided INI section
+	err := c.setFromIniSection(profile, section)
+	if err != nil {
+		return fmt.Errorf("error fetching config from profile, %v, %w", profile, err)
+	}
+
+	if _, ok := profiles[profile]; ok {
+		// if this is the second instance of the profile the Assume Role
+		// options must be cleared because they are only valid for the
+		// first reference of a profile. The self linked instance of the
+		// profile only have credential provider options.
+		c.clearAssumeRoleOptions()
+	} else {
+		// First time a profile has been seen. Assert if the credential type
+		// requires a role ARN, the ARN is also set
+		if err := c.validateCredentialsConfig(profile); err != nil {
+			return err
+		}
+	}
+
+	// if not top level profile and has credentials, return with credentials.
+	if len(profiles) != 0 && c.Credentials.HasKeys() {
+		return nil
+	}
+
+	profiles[profile] = struct{}{}
+
+	// validate no colliding credentials type are present
+	if err := c.validateCredentialType(); err != nil {
+		return err
+	}
+
+	// Link source profiles for assume roles
+	if len(c.SourceProfileName) != 0 {
+		// Linked profile via source_profile ignore credential provider
+		// options, the source profile must provide the credentials.
+		c.clearCredentialOptions()
+
+		srcCfg := &SharedConfig{}
+		err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger)
+		if err != nil {
+			// SourceProfileName that doesn't exist is an error in configuration.
+			if _, ok := err.(SharedConfigProfileNotExistError); ok {
+				err = SharedConfigAssumeRoleError{
+					RoleARN: c.RoleARN,
+					Profile: c.SourceProfileName,
+					Err:     err,
+				}
+			}
+			return err
+		}
+
+		if !srcCfg.hasCredentials() {
+			return SharedConfigAssumeRoleError{
+				RoleARN: c.RoleARN,
+				Profile: c.SourceProfileName,
+			}
+		}
+
+		c.Source = srcCfg
+	}
+
+	// If the profile contains an SSO session parameter, the session MUST exist
+	// as a section in the config file. Load the SSO session using the name
+	// provided. If the session section is not found or incomplete an error
+	// will be returned.
+	if c.hasSSOTokenProviderConfiguration() {
+		section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName))
+		if !ok {
+			return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName)
+		}
+		var ssoSession SSOSession
+		ssoSession.setFromIniSection(section)
+		ssoSession.Name = c.SSOSessionName
+		c.SSOSession = &ssoSession
+	}
+
+	if len(c.ServicesSectionName) > 0 {
+		if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok {
+			var svcs Services
+			svcs.setFromIniSection(section)
+			c.Services = svcs
+		}
+	}
+
+	return nil
+}
+
+// setFromIniSection loads the configuration from the profile section defined in
+// the provided INI file. A SharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error {
+	if len(section.Name) == 0 {
+		sources := make([]string, 0)
+		for _, v := range section.SourceFile {
+			sources = append(sources, v)
+		}
+
+		return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources)
+	}
+
+	if len(section.Errors) != 0 {
+		var errStatement string
+		for i, e := range section.Errors {
+			errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error())
+		}
+		return fmt.Errorf("Error using profile: \n %v", errStatement)
+	}
+
+	// Assume Role
+	updateString(&c.RoleARN, section, roleArnKey)
+	updateString(&c.ExternalID, section, externalIDKey)
+	updateString(&c.MFASerial, section, mfaSerialKey)
+	updateString(&c.RoleSessionName, section, roleSessionNameKey)
+	updateString(&c.SourceProfileName, section, sourceProfileKey)
+	updateString(&c.CredentialSource, section, credentialSourceKey)
+	updateString(&c.Region, section, regionKey)
+
+	// AWS Single Sign-On (AWS SSO)
+	// SSO session options
+	updateString(&c.SSOSessionName, section, ssoSessionNameKey)
+
+	// Legacy SSO session options
+	updateString(&c.SSORegion, section, ssoRegionKey)
+	updateString(&c.SSOStartURL, section, ssoStartURLKey)
+
+	// SSO fields not used
+	updateString(&c.SSOAccountID, section, ssoAccountIDKey)
+	updateString(&c.SSORoleName, section, ssoRoleNameKey)
+
+	// we're retaining a behavioral quirk with this field that existed before
+	// the removal of literal parsing for #2276:
+	//   - if the key is missing, the config field will not be set
+	//   - if the key is set to a non-numeric, the config field will be set to 0
+	if section.Has(roleDurationSecondsKey) {
+		if v, ok := section.Int(roleDurationSecondsKey); ok {
+			c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second)
+		} else {
+			c.RoleDurationSeconds = aws.Duration(time.Duration(0))
+		}
+	}
+
+	updateString(&c.CredentialProcess, section, credentialProcessKey)
+	updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey)
+
+	updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
+	updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey)
+	updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey)
+	updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey)
+
+	if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil {
+		return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err)
+	}
+	updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey)
+	updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey)
+
+	updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint)
+	updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey)
+
+	if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil {
+		return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err)
+	}
+
+	if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil {
+		return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err)
+	}
+	if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil {
+		return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err)
+	}
+
+	updateString(&c.CustomCABundle, section, caBundleKey)
+
+	// user agent app ID added to request User-Agent header
+	updateString(&c.AppID, section, sdkAppID)
+
+	updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints)
+
+	updateString(&c.BaseEndpoint, section, endpointURL)
+
+	if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil {
+		return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err)
+	}
+	if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil {
+		return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err)
+	}
+
+	if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil {
+		return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err)
+	}
+
+	// Shared Credentials
+	creds := aws.Credentials{
+		AccessKeyID:     section.String(accessKeyIDKey),
+		SecretAccessKey: section.String(secretAccessKey),
+		SessionToken:    section.String(sessionTokenKey),
+		Source:          fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]),
+		AccountID:       section.String(accountIDKey),
+	}
+
+	if creds.HasKeys() {
+		c.Credentials = creds
+	}
+
+	updateString(&c.ServicesSectionName, section, servicesSectionKey)
+
+	return nil
+}
+
+func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error {
+	if !sec.Has(key) {
+		return nil
+	}
+
+	v, ok := sec.Int(key)
+	if !ok {
+		return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key))
+	}
+	if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes {
+		return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v)
+	}
+	*bytes = new(int64)
+	**bytes = v
+	return nil
+}
+
+func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error {
+	if !sec.Has(key) {
+		return nil
+	}
+
+	v := sec.String(key)
+	switch {
+	case v == "true":
+		*disable = new(bool)
+		**disable = true
+	case v == "false":
+		*disable = new(bool)
+		**disable = false
+	default:
+		return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v)
+	}
+	return nil
+}
+
+func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error {
+	if !sec.Has(key) {
+		return nil
+	}
+
+	v := sec.String(key)
+	switch v {
+	case "preferred":
+		*m = aws.AccountIDEndpointModePreferred
+	case "required":
+		*m = aws.AccountIDEndpointModeRequired
+	case "disabled":
+		*m = aws.AccountIDEndpointModeDisabled
+	default:
+		return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v)
+	}
+
+	return nil
+}
+
+func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
+	if c.RequestMinCompressSizeBytes == nil {
+		return 0, false, nil
+	}
+	return *c.RequestMinCompressSizeBytes, true, nil
+}
+
+func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
+	if c.DisableRequestCompression == nil {
+		return false, false, nil
+	}
+	return *c.DisableRequestCompression, true, nil
+}
+
+func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) {
+	return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil
+}
+
+func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error {
+	if !section.Has(key) {
+		return nil
+	}
+	value := section.String(key)
+	if ok := mode.SetFromString(value); !ok {
+		return fmt.Errorf("invalid value: %s", value)
+	}
+	return nil
+}
+
+func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) {
+	if !section.Has(key) {
+		return nil
+	}
+	value := section.String(key)
+	if *mode, err = aws.ParseRetryMode(value); err != nil {
+		return err
+	}
+	return nil
+}
+
+func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error {
+	if !section.Has(key) {
+		return nil
+	}
+	value := section.String(key)
+	return endpointMode.SetFromString(value)
+}
+
+func (c *SharedConfig) validateCredentialsConfig(profile string) error {
+	if err := c.validateCredentialsRequireARN(profile); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *SharedConfig) validateCredentialsRequireARN(profile string) error {
+	var credSource string
+
+	switch {
+	case len(c.SourceProfileName) != 0:
+		credSource = sourceProfileKey
+	case len(c.CredentialSource) != 0:
+		credSource = credentialSourceKey
+	case len(c.WebIdentityTokenFile) != 0:
+		credSource = webIdentityTokenFileKey
+	}
+
+	if len(credSource) != 0 && len(c.RoleARN) == 0 {
+		return CredentialRequiresARNError{
+			Type:    credSource,
+			Profile: profile,
+		}
+	}
+
+	return nil
+}
+
+func (c *SharedConfig) validateCredentialType() error {
+	// Only one or no credential type can be defined.
+	if !oneOrNone(
+		len(c.SourceProfileName) != 0,
+		len(c.CredentialSource) != 0,
+		len(c.CredentialProcess) != 0,
+		len(c.WebIdentityTokenFile) != 0,
+	) {
+		return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token")
+	}
+
+	return nil
+}
+
+func (c *SharedConfig) validateSSOConfiguration() error {
+	if c.hasSSOTokenProviderConfiguration() {
+		err := c.validateSSOTokenProviderConfiguration()
+		if err != nil {
+			return err
+		}
+		return nil
+	}
+
+	if c.hasLegacySSOConfiguration() {
+		err := c.validateLegacySSOConfiguration()
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (c *SharedConfig) validateSSOTokenProviderConfiguration() error {
+	var missing []string
+
+	if len(c.SSOSessionName) == 0 {
+		missing = append(missing, ssoSessionNameKey)
+	}
+
+	if c.SSOSession == nil {
+		missing = append(missing, ssoSectionPrefix)
+	} else {
+		if len(c.SSOSession.SSORegion) == 0 {
+			missing = append(missing, ssoRegionKey)
+		}
+
+		if len(c.SSOSession.SSOStartURL) == 0 {
+			missing = append(missing, ssoStartURLKey)
+		}
+	}
+
+	if len(missing) > 0 {
+		return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+			c.Profile, strings.Join(missing, ", "))
+	}
+
+	if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion {
+		return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix)
+	}
+
+	if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL {
+		return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix)
+	}
+
+	return nil
+}
+
+func (c *SharedConfig) validateLegacySSOConfiguration() error {
+	var missing []string
+
+	if len(c.SSORegion) == 0 {
+		missing = append(missing, ssoRegionKey)
+	}
+
+	if len(c.SSOStartURL) == 0 {
+		missing = append(missing, ssoStartURLKey)
+	}
+
+	if len(c.SSOAccountID) == 0 {
+		missing = append(missing, ssoAccountIDKey)
+	}
+
+	if len(c.SSORoleName) == 0 {
+		missing = append(missing, ssoRoleNameKey)
+	}
+
+	if len(missing) > 0 {
+		return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+			c.Profile, strings.Join(missing, ", "))
+	}
+	return nil
+}
+
+func (c *SharedConfig) hasCredentials() bool {
+	switch {
+	case len(c.SourceProfileName) != 0:
+	case len(c.CredentialSource) != 0:
+	case len(c.CredentialProcess) != 0:
+	case len(c.WebIdentityTokenFile) != 0:
+	case c.hasSSOConfiguration():
+	case c.Credentials.HasKeys():
+	default:
+		return false
+	}
+
+	return true
+}
+
+func (c *SharedConfig) hasSSOConfiguration() bool {
+	return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration()
+}
+
+func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool {
+	return len(c.SSOSessionName) > 0
+}
+
+func (c *SharedConfig) hasLegacySSOConfiguration() bool {
+	return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0
+}
+
+func (c *SharedConfig) clearAssumeRoleOptions() {
+	c.RoleARN = ""
+	c.ExternalID = ""
+	c.MFASerial = ""
+	c.RoleSessionName = ""
+	c.SourceProfileName = ""
+}
+
+func (c *SharedConfig) clearCredentialOptions() {
+	c.CredentialSource = ""
+	c.CredentialProcess = ""
+	c.WebIdentityTokenFile = ""
+	c.Credentials = aws.Credentials{}
+	c.SSOAccountID = ""
+	c.SSORegion = ""
+	c.SSORoleName = ""
+	c.SSOStartURL = ""
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+	Filename string
+	Err      error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigLoadError) Unwrap() error {
+	return e.Err
+}
+
+func (e SharedConfigLoadError) Error() string {
+	return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err)
+}
+
+// SharedConfigProfileNotExistError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistError struct {
+	Filename []string
+	Profile  string
+	Err      error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistError) Unwrap() error {
+	return e.Err
+}
+
+func (e SharedConfigProfileNotExistError) Error() string {
+	return fmt.Sprintf("failed to get shared config profile, %s", e.Profile)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+	Profile string
+	RoleARN string
+	Err     error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) Unwrap() error {
+	return e.Err
+}
+
+func (e SharedConfigAssumeRoleError) Error() string {
+	return fmt.Sprintf("failed to load assume role %s, of profile %s, %v",
+		e.RoleARN, e.Profile, e.Err)
+}
+
+// CredentialRequiresARNError provides the error for shared config credentials
+// that are incorrectly configured in the shared config or credentials file.
+type CredentialRequiresARNError struct {
+	// type of credentials that were configured.
+	Type string
+
+	// Profile name the credentials were in.
+	Profile string
+}
+
+// Error satisfies the error interface.
+func (e CredentialRequiresARNError) Error() string {
+	return fmt.Sprintf(
+		"credential type %s requires role_arn, profile %s",
+		e.Type, e.Profile,
+	)
+}
+
+func oneOrNone(bs ...bool) bool {
+	var count int
+
+	for _, b := range bs {
+		if b {
+			count++
+			if count > 1 {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+// updateString will only update the dst with the value in the section key, key
+// is present in the section.
+func updateString(dst *string, section ini.Section, key string) {
+	if !section.Has(key) {
+		return
+	}
+	*dst = section.String(key)
+}
+
+// updateInt will only update the dst with the value in the section key, key
+// is present in the section.
+//
+// Down casts the INI integer value from a int64 to an int, which could be
+// different bit size depending on platform.
+func updateInt(dst *int, section ini.Section, key string) error {
+	if !section.Has(key) {
+		return nil
+	}
+
+	v, ok := section.Int(key)
+	if !ok {
+		return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key))
+	}
+
+	*dst = int(v)
+	return nil
+}
+
+// updateBool will only update the dst with the value in the section key, key
+// is present in the section.
+func updateBool(dst *bool, section ini.Section, key string) {
+	if !section.Has(key) {
+		return
+	}
+
+	// retains pre-#2276 behavior where non-bool value would resolve to false
+	v, _ := section.Bool(key)
+	*dst = v
+}
+
+// updateBoolPtr will only update the dst with the value in the section key,
+// key is present in the section.
+func updateBoolPtr(dst **bool, section ini.Section, key string) {
+	if !section.Has(key) {
+		return
+	}
+
+	// retains pre-#2276 behavior where non-bool value would resolve to false
+	v, _ := section.Bool(key)
+	*dst = new(bool)
+	**dst = v
+}
+
+// updateEndpointDiscoveryType will only update the dst with the value in the section, if
+// a valid key and corresponding EndpointDiscoveryType is found.
+func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) {
+	if !section.Has(key) {
+		return
+	}
+
+	value := section.String(key)
+	if len(value) == 0 {
+		return
+	}
+
+	switch {
+	case strings.EqualFold(value, endpointDiscoveryDisabled):
+		*dst = aws.EndpointDiscoveryDisabled
+	case strings.EqualFold(value, endpointDiscoveryEnabled):
+		*dst = aws.EndpointDiscoveryEnabled
+	case strings.EqualFold(value, endpointDiscoveryAuto):
+		*dst = aws.EndpointDiscoveryAuto
+	}
+}
+
+// updateEndpointDiscoveryType will only update the dst with the value in the section, if
+// a valid key and corresponding EndpointDiscoveryType is found.
+func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) {
+	if !section.Has(key) {
+		return
+	}
+
+	// retains pre-#2276 behavior where non-bool value would resolve to false
+	if v, _ := section.Bool(key); v {
+		*dst = aws.DualStackEndpointStateEnabled
+	} else {
+		*dst = aws.DualStackEndpointStateDisabled
+	}
+
+	return
+}
+
+// updateEndpointDiscoveryType will only update the dst with the value in the section, if
+// a valid key and corresponding EndpointDiscoveryType is found.
+func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) {
+	if !section.Has(key) {
+		return
+	}
+
+	// retains pre-#2276 behavior where non-bool value would resolve to false
+	if v, _ := section.Bool(key); v {
+		*dst = aws.FIPSEndpointStateEnabled
+	} else {
+		*dst = aws.FIPSEndpointStateDisabled
+	}
+
+	return
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,591 @@
+# v1.17.27 (2024-07-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.26 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.25 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.24 (2024-07-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.23 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.22 (2024-06-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.21 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.20 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.19 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.18 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.17 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.16 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.15 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.14 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.13 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.12 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.11 (2024-04-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.10 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.9 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.14 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2023-12-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2023-11-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2023-11-21)
+
+* **Bug Fix**: Don't expect error responses to have a JSON payload in the endpointcreds provider.
+
+# v1.16.3 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2023-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2023-11-14)
+
+* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider.
+
+# v1.15.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-11-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.43 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.42 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.41 (2023-10-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.40 (2023-09-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.39 (2023-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.38 (2023-09-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.37 (2023-09-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.36 (2023-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.31 (2023-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.30 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.29 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.28 (2023-07-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.27 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.26 (2023-06-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.25 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.24 (2023-05-09)
+
+* No change notes available for this release.
+
+# v1.13.23 (2023-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.22 (2023-05-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.21 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.20 (2023-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.19 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.18 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.17 (2023-03-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.16 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.15 (2023-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.14 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.13 (2023-02-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.12 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2023-02-01)
+
+* No change notes available for this release.
+
+# v1.13.10 (2023-01-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2023-01-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2023-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2022-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2022-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2022-12-15)
+
+* **Bug Fix**: Unify logic between shared config and in finding home directory
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2022-11-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2022-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2022-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-11-11)
+
+* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
+* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
+
+# v1.12.24 (2022-11-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.23 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.22 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.21 (2022-09-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.20 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.19 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.18 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.17 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.16 (2022-08-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.15 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2022-08-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.11 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.9 (2022-07-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2022-06-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2022-05-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.3 (2022-05-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.1 (2022-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-04-25)
+
+* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.2 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-03-23)
+
+* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-02-24)
+
+* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575)
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.5 (2021-12-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.4 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.3 (2021-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.2 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2021-11-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-10-21)
+
+* **Feature**: Updated  to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.3 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.2 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-09-10)
+
+* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders.
+
+# v1.4.0 (2021-08-27)
+
+* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,4 @@
+/*
+Package credentials provides types for retrieving credentials from credentials sources.
+*/
+package credentials
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,58 @@
+// Package ec2rolecreds provides the credentials provider implementation for
+// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS.
+//
+// # Concurrency and caching
+//
+// The Provider is not safe to be used concurrently, and does not provide any
+// caching of credentials retrieved. You should wrap the Provider with a
+// `aws.CredentialsCache` to provide concurrency safety, and caching of
+// credentials.
+//
+// # Loading credentials with the SDK's AWS Config
+//
+// The EC2 Instance role credentials provider will automatically be the resolved
+// credential provider in the credential chain if no other credential provider is
+// resolved first.
+//
+// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance
+// role for credentials, you specify a `credentials_source` property in the config
+// profile the SDK will load.
+//
+//	[default]
+//	credential_source = Ec2InstanceMetadata
+//
+// # Loading credentials with the Provider directly
+//
+// Another way to use the EC2 Instance role credentials provider is to create it
+// directly and assign it as the credentials provider for an API client.
+//
+// The following example creates a credentials provider for a command, and wraps
+// it with the CredentialsCache before assigning the provider to the Amazon S3 API
+// client's Credentials option.
+//
+//	provider := imds.New(imds.Options{})
+//
+//	// Create the service client value configured for credentials.
+//	svc := s3.New(s3.Options{
+//	  Credentials: aws.NewCredentialsCache(provider),
+//	})
+//
+// If you need more control, you can set the configuration options on the
+// credentials provider using the imds.Options type to configure the EC2 IMDS
+// API Client and ExpiryWindow of the retrieved credentials.
+//
+//	provider := imds.New(imds.Options{
+//		// See imds.Options type's documentation for more options available.
+//		Client: imds.New(Options{
+//			HTTPClient: customHTTPClient,
+//		}),
+//
+//		// Modify how soon credentials expire prior to their original expiry time.
+//		ExpiryWindow: 5 * time.Minute,
+//	})
+//
+// # EC2 IMDS API Client
+//
+// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on
+// configuring the client, and options available.
+package ec2rolecreds
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,229 @@
+package ec2rolecreds
+
+import (
+	"bufio"
+	"context"
+	"encoding/json"
+	"fmt"
+	"math"
+	"path"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+	sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the
+// GetMetadata operation.
+type GetMetadataAPIClient interface {
+	GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error)
+}
+
+// A Provider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// The New function must be used to create the with a custom EC2 IMDS client.
+//
+//	p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{
+//	     o.Client = imds.New(imds.Options{/* custom options */})
+//	})
+type Provider struct {
+	options Options
+}
+
+// Options is a list of user settable options for setting the behavior of the Provider.
+type Options struct {
+	// The API client that will be used by the provider to make GetMetadata API
+	// calls to EC2 IMDS.
+	//
+	// If nil, the provider will default to the EC2 IMDS client.
+	Client GetMetadataAPIClient
+}
+
+// New returns an initialized Provider value configured to retrieve
+// credentials from EC2 Instance Metadata service.
+func New(optFns ...func(*Options)) *Provider {
+	options := Options{}
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	if options.Client == nil {
+		options.Client = imds.New(imds.Options{})
+	}
+
+	return &Provider{
+		options: options,
+	}
+}
+
+// Retrieve retrieves credentials from the EC2 service. Error will be returned
+// if the request fails, or unable to extract the desired credentials.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	credsList, err := requestCredList(ctx, p.options.Client)
+	if err != nil {
+		return aws.Credentials{Source: ProviderName}, err
+	}
+
+	if len(credsList) == 0 {
+		return aws.Credentials{Source: ProviderName},
+			fmt.Errorf("unexpected empty EC2 IMDS role list")
+	}
+	credsName := credsList[0]
+
+	roleCreds, err := requestCred(ctx, p.options.Client, credsName)
+	if err != nil {
+		return aws.Credentials{Source: ProviderName}, err
+	}
+
+	creds := aws.Credentials{
+		AccessKeyID:     roleCreds.AccessKeyID,
+		SecretAccessKey: roleCreds.SecretAccessKey,
+		SessionToken:    roleCreds.Token,
+		Source:          ProviderName,
+
+		CanExpire: true,
+		Expires:   roleCreds.Expiration,
+	}
+
+	// Cap role credentials Expires to 1 hour so they can be refreshed more
+	// often. Jitter will be applied credentials cache if being used.
+	if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) {
+		creds.Expires = anHour
+	}
+
+	return creds, nil
+}
+
+// HandleFailToRefresh will extend the credentials Expires time if it it is
+// expired. If the credentials will not expire within the minimum time, they
+// will be returned.
+//
+// If the credentials cannot expire, the original error will be returned.
+func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) (
+	aws.Credentials, error,
+) {
+	if !prevCreds.CanExpire {
+		return aws.Credentials{}, err
+	}
+
+	if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) {
+		return prevCreds, nil
+	}
+
+	newCreds := prevCreds
+	randFloat64, err := sdkrand.CryptoRandFloat64()
+	if err != nil {
+		return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err)
+	}
+
+	// Random distribution of [5,15) minutes.
+	expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute
+	newCreds.Expires = sdk.NowTime().Add(expireOffset)
+
+	logger := middleware.GetLogger(ctx)
+	logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes()))
+
+	return newCreds, nil
+}
+
+// AdjustExpiresBy will adds the passed in duration to the passed in
+// credential's Expires time, unless the time until Expires is less than 15
+// minutes. Returns the credentials, even if not updated.
+func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) (
+	aws.Credentials, error,
+) {
+	if !creds.CanExpire {
+		return creds, nil
+	}
+	if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) {
+		return creds, nil
+	}
+
+	creds.Expires = creds.Expires.Add(dur)
+	return creds, nil
+}
+
+// ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+	// Success State
+	Expiration      time.Time
+	AccessKeyID     string
+	SecretAccessKey string
+	Token           string
+
+	// Error state
+	Code    string
+	Message string
+}
+
+const iamSecurityCredsPath = "/iam/security-credentials/"
+
+// requestCredList requests a list of credentials from the EC2 service. If
+// there are no credentials, or there is an error making or receiving the
+// request
+func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) {
+	resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{
+		Path: iamSecurityCredsPath,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("no EC2 IMDS role found, %w", err)
+	}
+	defer resp.Content.Close()
+
+	credsList := []string{}
+	s := bufio.NewScanner(resp.Content)
+	for s.Scan() {
+		credsList = append(credsList, s.Text())
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err)
+	}
+
+	return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) {
+	resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{
+		Path: path.Join(iamSecurityCredsPath, credsName),
+	})
+	if err != nil {
+		return ec2RoleCredRespBody{},
+			fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w",
+				credsName, err)
+	}
+	defer resp.Content.Close()
+
+	var respCreds ec2RoleCredRespBody
+	if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil {
+		return ec2RoleCredRespBody{},
+			fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w",
+				credsName, err)
+	}
+
+	if !strings.EqualFold(respCreds.Code, "Success") {
+		// If an error code was returned something failed requesting the role.
+		return ec2RoleCredRespBody{},
+			fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w",
+				credsName,
+				&smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message})
+	}
+
+	return respCreds, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,48 @@
+package client
+
+import (
+	"context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+	options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+	return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+	return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+	operation string
+	options   Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+	return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,165 @@
+package client
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/aws/retry"
+	awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+	"github.com/aws/smithy-go"
+	smithymiddleware "github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ServiceID is the client identifer
+const ServiceID = "endpoint-credentials"
+
+// HTTPClient is a client for sending HTTP requests
+type HTTPClient interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+// Options is the endpoint client configurable options
+type Options struct {
+	// The endpoint to retrieve credentials from
+	Endpoint string
+
+	// The HTTP client to invoke API calls with. Defaults to client's default HTTP
+	// implementation if nil.
+	HTTPClient HTTPClient
+
+	// Retryer guides how HTTP requests should be retried in case of recoverable
+	// failures. When nil the API client will use a default retryer.
+	Retryer aws.Retryer
+
+	// Set of options to modify how the credentials operation is invoked.
+	APIOptions []func(*smithymiddleware.Stack) error
+}
+
+// Copy creates a copy of the API options.
+func (o Options) Copy() Options {
+	to := o
+	to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions))
+	copy(to.APIOptions, o.APIOptions)
+	return to
+}
+
+// Client is an client for retrieving AWS credentials from an endpoint
+type Client struct {
+	options Options
+}
+
+// New constructs a new Client from the given options
+func New(options Options, optFns ...func(*Options)) *Client {
+	options = options.Copy()
+
+	if options.HTTPClient == nil {
+		options.HTTPClient = awshttp.NewBuildableClient()
+	}
+
+	if options.Retryer == nil {
+		// Amazon-owned implementations of this endpoint are known to sometimes
+		// return plaintext responses (i.e. no Code) like normal, add a few
+		// additional status codes
+		options.Retryer = retry.NewStandard(func(o *retry.StandardOptions) {
+			o.Retryables = append(o.Retryables, retry.RetryableHTTPStatusCode{
+				Codes: map[int]struct{}{
+					http.StatusTooManyRequests: {},
+				},
+			})
+		})
+	}
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	client := &Client{
+		options: options,
+	}
+
+	return client
+}
+
+// GetCredentialsInput is the input to send with the endpoint service to receive credentials.
+type GetCredentialsInput struct {
+	AuthorizationToken string
+}
+
+// GetCredentials retrieves credentials from credential endpoint
+func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) {
+	stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest)
+	options := c.options.Copy()
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After)
+	stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After)
+	stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After)
+	addProtocolFinalizerMiddlewares(stack, options, "GetCredentials")
+	retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer})
+	middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID)
+	smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
+	smithyhttp.AddCloseResponseBodyMiddleware(stack)
+
+	for _, fn := range options.APIOptions {
+		if err := fn(stack); err != nil {
+			return nil, err
+		}
+	}
+
+	handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+	result, _, err := handler.Handle(ctx, params)
+	if err != nil {
+		return nil, err
+	}
+
+	return result.(*GetCredentialsOutput), err
+}
+
+// GetCredentialsOutput is the response from the credential endpoint
+type GetCredentialsOutput struct {
+	Expiration      *time.Time
+	AccessKeyID     string
+	SecretAccessKey string
+	Token           string
+	AccountID       string
+}
+
+// EndpointError is an error returned from the endpoint service
+type EndpointError struct {
+	Code       string            `json:"code"`
+	Message    string            `json:"message"`
+	Fault      smithy.ErrorFault `json:"-"`
+	statusCode int               `json:"-"`
+}
+
+// Error is the error mesage string
+func (e *EndpointError) Error() string {
+	return fmt.Sprintf("%s: %s", e.Code, e.Message)
+}
+
+// ErrorCode is the error code returned by the endpoint
+func (e *EndpointError) ErrorCode() string {
+	return e.Code
+}
+
+// ErrorMessage is the error message returned by the endpoint
+func (e *EndpointError) ErrorMessage() string {
+	return e.Message
+}
+
+// ErrorFault indicates error fault classification
+func (e *EndpointError) ErrorFault() smithy.ErrorFault {
+	return e.Fault
+}
+
+// HTTPStatusCode implements retry.HTTPStatusCode.
+func (e *EndpointError) HTTPStatusCode() int {
+	return e.statusCode
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+package client
+
+import (
+	"context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+	options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+	return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,164 @@
+package client
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/url"
+
+	"github.com/aws/smithy-go"
+	smithymiddleware "github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type buildEndpoint struct {
+	Endpoint string
+}
+
+func (b *buildEndpoint) ID() string {
+	return "BuildEndpoint"
+}
+
+func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) (
+	out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport, %T", in.Request)
+	}
+
+	if len(b.Endpoint) == 0 {
+		return out, metadata, fmt.Errorf("endpoint not provided")
+	}
+
+	parsed, err := url.Parse(b.Endpoint)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err)
+	}
+
+	request.URL = parsed
+
+	return next.HandleBuild(ctx, in)
+}
+
+type serializeOpGetCredential struct{}
+
+func (s *serializeOpGetCredential) ID() string {
+	return "OperationSerializer"
+}
+
+func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) (
+	out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request)
+	}
+
+	params, ok := in.Parameters.(*GetCredentialsInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters)
+	}
+
+	const acceptHeader = "Accept"
+	request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json")
+
+	if len(params.AuthorizationToken) > 0 {
+		const authHeader = "Authorization"
+		request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken)
+	}
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type deserializeOpGetCredential struct{}
+
+func (d *deserializeOpGetCredential) ID() string {
+	return "OperationDeserializer"
+}
+
+func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) (
+	out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, deserializeError(response)
+	}
+
+	var shape *GetCredentialsOutput
+	if err = json.NewDecoder(response.Body).Decode(&shape); err != nil {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)}
+	}
+
+	out.Result = shape
+	return out, metadata, err
+}
+
+func deserializeError(response *smithyhttp.Response) error {
+	// we could be talking to anything, json isn't guaranteed
+	// see https://github.com/aws/aws-sdk-go-v2/issues/2316
+	if response.Header.Get("Content-Type") == "application/json" {
+		return deserializeJSONError(response)
+	}
+
+	msg, err := io.ReadAll(response.Body)
+	if err != nil {
+		return &smithy.DeserializationError{
+			Err: fmt.Errorf("read response, %w", err),
+		}
+	}
+
+	return &EndpointError{
+		// no sensible value for Code
+		Message:    string(msg),
+		Fault:      stof(response.StatusCode),
+		statusCode: response.StatusCode,
+	}
+}
+
+func deserializeJSONError(response *smithyhttp.Response) error {
+	var errShape *EndpointError
+	if err := json.NewDecoder(response.Body).Decode(&errShape); err != nil {
+		return &smithy.DeserializationError{
+			Err: fmt.Errorf("failed to decode error message, %w", err),
+		}
+	}
+
+	errShape.Fault = stof(response.StatusCode)
+	errShape.statusCode = response.StatusCode
+	return errShape
+}
+
+// maps HTTP status code to smithy ErrorFault
+func stof(code int) smithy.ErrorFault {
+	if code >= 500 {
+		return smithy.FaultServer
+	}
+	return smithy.FaultClient
+}
+
+func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error {
+	if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil {
+		return fmt.Errorf("add ResolveAuthScheme: %w", err)
+	}
+	if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil {
+		return fmt.Errorf("add GetIdentity: %w", err)
+	}
+	if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil {
+		return fmt.Errorf("add ResolveEndpointV2: %w", err)
+	}
+	if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil {
+		return fmt.Errorf("add Signing: %w", err)
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,193 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+//
+//	{
+//	    "AccessKeyId" : "MUA...",
+//	    "SecretAccessKey" : "/7PC5om....",
+//	}
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+//
+//	{
+//	    "AccessKeyId" : "MUA...",
+//	    "SecretAccessKey" : "/7PC5om....",
+//	    "Token" : "AQoDY....=",
+//	    "Expiration" : "2016-02-25T06:03:31Z"
+//	}
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+//
+//	{
+//	    "code": "ErrorCode",
+//	    "message": "Helpful error message."
+//	}
+package endpointcreds
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+type getCredentialsAPIClient interface {
+	GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error)
+}
+
+// Provider satisfies the aws.CredentialsProvider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+	// The AWS Client to make HTTP requests to the endpoint with. The endpoint
+	// the request will be made to is provided by the aws.Config's
+	// EndpointResolver.
+	client getCredentialsAPIClient
+
+	options Options
+}
+
+// HTTPClient is a client for sending HTTP requests
+type HTTPClient interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+// Options is structure of configurable options for Provider
+type Options struct {
+	// Endpoint to retrieve credentials from. Required
+	Endpoint string
+
+	// HTTPClient to handle sending HTTP requests to the target endpoint.
+	HTTPClient HTTPClient
+
+	// Set of options to modify how the credentials operation is invoked.
+	APIOptions []func(*middleware.Stack) error
+
+	// The Retryer to be used for determining whether a failed requested should be retried
+	Retryer aws.Retryer
+
+	// Optional authorization token value if set will be used as the value of
+	// the Authorization header of the endpoint credential request.
+	//
+	// When constructed from environment, the provider will use the value of
+	// AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
+	//
+	// Will be overridden if AuthorizationTokenProvider is configured
+	AuthorizationToken string
+
+	// Optional auth provider func to dynamically load the auth token from a file
+	// everytime a credential is retrieved
+	//
+	// When constructed from environment, the provider will read and use the content
+	// of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
+	// as the auth token everytime credentials are retrieved
+	//
+	// Will override AuthorizationToken if configured
+	AuthorizationTokenProvider AuthTokenProvider
+}
+
+// AuthTokenProvider defines an interface to dynamically load a value to be passed
+// for the Authorization header of a credentials request.
+type AuthTokenProvider interface {
+	GetToken() (string, error)
+}
+
+// TokenProviderFunc is a func type implementing AuthTokenProvider interface
+// and enables customizing token provider behavior
+type TokenProviderFunc func() (string, error)
+
+// GetToken func retrieves auth token according to TokenProviderFunc implementation
+func (p TokenProviderFunc) GetToken() (string, error) {
+	return p()
+}
+
+// New returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func New(endpoint string, optFns ...func(*Options)) *Provider {
+	o := Options{
+		Endpoint: endpoint,
+	}
+
+	for _, fn := range optFns {
+		fn(&o)
+	}
+
+	p := &Provider{
+		client: client.New(client.Options{
+			HTTPClient: o.HTTPClient,
+			Endpoint:   o.Endpoint,
+			APIOptions: o.APIOptions,
+			Retryer:    o.Retryer,
+		}),
+		options: o,
+	}
+
+	return p
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	resp, err := p.getCredentials(ctx)
+	if err != nil {
+		return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err)
+	}
+
+	creds := aws.Credentials{
+		AccessKeyID:     resp.AccessKeyID,
+		SecretAccessKey: resp.SecretAccessKey,
+		SessionToken:    resp.Token,
+		Source:          ProviderName,
+		AccountID:       resp.AccountID,
+	}
+
+	if resp.Expiration != nil {
+		creds.CanExpire = true
+		creds.Expires = *resp.Expiration
+	}
+
+	return creds, nil
+}
+
+func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) {
+	authToken, err := p.resolveAuthToken()
+	if err != nil {
+		return nil, fmt.Errorf("resolve auth token: %v", err)
+	}
+
+	return p.client.GetCredentials(ctx, &client.GetCredentialsInput{
+		AuthorizationToken: authToken,
+	})
+}
+
+func (p *Provider) resolveAuthToken() (string, error) {
+	authToken := p.options.AuthorizationToken
+
+	var err error
+	if p.options.AuthorizationTokenProvider != nil {
+		authToken, err = p.options.AuthorizationTokenProvider.GetToken()
+		if err != nil {
+			return "", err
+		}
+	}
+
+	if strings.ContainsAny(authToken, "\r\n") {
+		return "", fmt.Errorf("authorization token contains invalid newline sequence")
+	}
+
+	return authToken, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package credentials
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.17.27"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,92 @@
+// Package processcreds is a credentials provider to retrieve credentials from a
+// external CLI invoked process.
+//
+// WARNING: The following describes a method of sourcing credentials from an external
+// process. This can potentially be dangerous, so proceed with caution. Other
+// credential providers should be preferred if at all possible. If using this
+// option, you should make sure that the config file is as locked down as possible
+// using security best practices for your operating system.
+//
+// # Concurrency and caching
+//
+// The Provider is not safe to be used concurrently, and does not provide any
+// caching of credentials retrieved. You should wrap the Provider with a
+// `aws.CredentialsCache` to provide concurrency safety, and caching of
+// credentials.
+//
+// # Loading credentials with the SDKs AWS Config
+//
+// You can use credentials from a AWS shared config `credential_process` in a
+// variety of ways.
+//
+// One way is to setup your shared config file, located in the default
+// location, with the `credential_process` key and the command you want to be
+// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
+// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
+//
+//	[default]
+//	credential_process = /command/to/call
+//
+// Loading configuration using external will use the credential process to
+// retrieve credentials. NOTE: If there are credentials in the profile you are
+// using, the credential process will not be used.
+//
+//	// Initialize a session to load credentials.
+//	cfg, _ := config.LoadDefaultConfig(context.TODO())
+//
+//	// Create S3 service client to use the credentials.
+//	svc := s3.NewFromConfig(cfg)
+//
+// # Loading credentials with the Provider directly
+//
+// Another way to use the credentials process provider is by using the
+// `NewProvider` constructor to create the provider and providing a it with a
+// command to be executed to retrieve credentials.
+//
+// The following example creates a credentials provider for a command, and wraps
+// it with the CredentialsCache before assigning the provider to the Amazon S3 API
+// client's Credentials option.
+//
+//	 // Create credentials using the Provider.
+//		provider := processcreds.NewProvider("/path/to/command")
+//
+//	 // Create the service client value configured for credentials.
+//	 svc := s3.New(s3.Options{
+//	   Credentials: aws.NewCredentialsCache(provider),
+//	 })
+//
+// If you need more control, you can set any configurable options in the
+// credentials using one or more option functions.
+//
+//	provider := processcreds.NewProvider("/path/to/command",
+//	    func(o *processcreds.Options) {
+//	      // Override the provider's default timeout
+//	      o.Timeout = 2 * time.Minute
+//	    })
+//
+// You can also use your own `exec.Cmd` value by satisfying a value that satisfies
+// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor.
+//
+//	// Create an exec.Cmd
+//	cmdBuilder := processcreds.NewCommandBuilderFunc(
+//		func(ctx context.Context) (*exec.Cmd, error) {
+//			cmd := exec.CommandContext(ctx,
+//				"customCLICommand",
+//				"-a", "argument",
+//			)
+//			cmd.Env = []string{
+//				"ENV_VAR_FOO=value",
+//				"ENV_VAR_BAR=other_value",
+//			}
+//
+//			return cmd, nil
+//		},
+//	)
+//
+//	// Create credentials using your exec.Cmd and custom timeout
+//	provider := processcreds.NewProviderCommand(cmdBuilder,
+//		func(opt *processcreds.Provider) {
+//			// optionally override the provider's default timeout
+//			opt.Timeout = 1 * time.Second
+//		})
+package processcreds
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,285 @@
+package processcreds
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"os/exec"
+	"runtime"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/internal/sdkio"
+)
+
+const (
+	// ProviderName is the name this credentials provider will label any
+	// returned credentials Value with.
+	ProviderName = `ProcessProvider`
+
+	// DefaultTimeout default limit on time a process can run.
+	DefaultTimeout = time.Duration(1) * time.Minute
+)
+
+// ProviderError is an error indicating failure initializing or executing the
+// process credentials provider
+type ProviderError struct {
+	Err error
+}
+
+// Error returns the error message.
+func (e *ProviderError) Error() string {
+	return fmt.Sprintf("process provider error: %v", e.Err)
+}
+
+// Unwrap returns the underlying error the provider error wraps.
+func (e *ProviderError) Unwrap() error {
+	return e.Err
+}
+
+// Provider satisfies the credentials.Provider interface, and is a
+// client to retrieve credentials from a process.
+type Provider struct {
+	// Provides a constructor for exec.Cmd that are invoked by the provider for
+	// retrieving credentials. Use this to provide custom creation of exec.Cmd
+	// with things like environment variables, or other configuration.
+	//
+	// The provider defaults to the DefaultNewCommand function.
+	commandBuilder NewCommandBuilder
+
+	options Options
+}
+
+// Options is the configuration options for configuring the Provider.
+type Options struct {
+	// Timeout limits the time a process can run.
+	Timeout time.Duration
+}
+
+// NewCommandBuilder provides the interface for specifying how command will be
+// created that the Provider will use to retrieve credentials with.
+type NewCommandBuilder interface {
+	NewCommand(context.Context) (*exec.Cmd, error)
+}
+
+// NewCommandBuilderFunc provides a wrapper type around a function pointer to
+// satisfy the NewCommandBuilder interface.
+type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error)
+
+// NewCommand calls the underlying function pointer the builder was initialized with.
+func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) {
+	return fn(ctx)
+}
+
+// DefaultNewCommandBuilder provides the default NewCommandBuilder
+// implementation used by the provider. It takes a command and arguments to
+// invoke. The command will also be initialized with the current process
+// environment variables, stderr, and stdin pipes.
+type DefaultNewCommandBuilder struct {
+	Args []string
+}
+
+// NewCommand returns an initialized exec.Cmd with the builder's initialized
+// Args. The command is also initialized current process environment variables,
+// stderr, and stdin pipes.
+func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) {
+	var cmdArgs []string
+	if runtime.GOOS == "windows" {
+		cmdArgs = []string{"cmd.exe", "/C"}
+	} else {
+		cmdArgs = []string{"sh", "-c"}
+	}
+
+	if len(b.Args) == 0 {
+		return nil, &ProviderError{
+			Err: fmt.Errorf("failed to prepare command: command must not be empty"),
+		}
+	}
+
+	cmdArgs = append(cmdArgs, b.Args...)
+	cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...)
+	cmd.Env = os.Environ()
+
+	cmd.Stderr = os.Stderr // display stderr on console for MFA
+	cmd.Stdin = os.Stdin   // enable stdin for MFA
+
+	return cmd, nil
+}
+
+// NewProvider returns a pointer to a new Credentials object wrapping the
+// Provider.
+//
+// The provider defaults to the DefaultNewCommandBuilder for creating command
+// the Provider will use to retrieve credentials with.
+func NewProvider(command string, options ...func(*Options)) *Provider {
+	var args []string
+
+	// Ensure that the command arguments are not set if the provided command is
+	// empty. This will error out when the command is executed since no
+	// arguments are specified.
+	if len(command) > 0 {
+		args = []string{command}
+	}
+
+	commanBuilder := DefaultNewCommandBuilder{
+		Args: args,
+	}
+	return NewProviderCommand(commanBuilder, options...)
+}
+
+// NewProviderCommand returns a pointer to a new Credentials object with the
+// specified command, and default timeout duration. Use this to provide custom
+// creation of exec.Cmd for options like environment variables, or other
+// configuration.
+func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider {
+	p := &Provider{
+		commandBuilder: builder,
+		options: Options{
+			Timeout: DefaultTimeout,
+		},
+	}
+
+	for _, option := range options {
+		option(&p.options)
+	}
+
+	return p
+}
+
+// A CredentialProcessResponse is the AWS credentials format that must be
+// returned when executing an external credential_process.
+type CredentialProcessResponse struct {
+	// As of this writing, the Version key must be set to 1. This might
+	// increment over time as the structure evolves.
+	Version int
+
+	// The access key ID that identifies the temporary security credentials.
+	AccessKeyID string `json:"AccessKeyId"`
+
+	// The secret access key that can be used to sign requests.
+	SecretAccessKey string
+
+	// The token that users must pass to the service API to use the temporary credentials.
+	SessionToken string
+
+	// The date on which the current credentials expire.
+	Expiration *time.Time
+
+	// The ID of the account for credentials
+	AccountID string `json:"AccountId"`
+}
+
+// Retrieve executes the credential process command and returns the
+// credentials, or error if the command fails.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	out, err := p.executeCredentialProcess(ctx)
+	if err != nil {
+		return aws.Credentials{Source: ProviderName}, err
+	}
+
+	// Serialize and validate response
+	resp := &CredentialProcessResponse{}
+	if err = json.Unmarshal(out, resp); err != nil {
+		return aws.Credentials{Source: ProviderName}, &ProviderError{
+			Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err),
+		}
+	}
+
+	if resp.Version != 1 {
+		return aws.Credentials{Source: ProviderName}, &ProviderError{
+			Err: fmt.Errorf("wrong version in process output (not 1)"),
+		}
+	}
+
+	if len(resp.AccessKeyID) == 0 {
+		return aws.Credentials{Source: ProviderName}, &ProviderError{
+			Err: fmt.Errorf("missing AccessKeyId in process output"),
+		}
+	}
+
+	if len(resp.SecretAccessKey) == 0 {
+		return aws.Credentials{Source: ProviderName}, &ProviderError{
+			Err: fmt.Errorf("missing SecretAccessKey in process output"),
+		}
+	}
+
+	creds := aws.Credentials{
+		Source:          ProviderName,
+		AccessKeyID:     resp.AccessKeyID,
+		SecretAccessKey: resp.SecretAccessKey,
+		SessionToken:    resp.SessionToken,
+		AccountID:       resp.AccountID,
+	}
+
+	// Handle expiration
+	if resp.Expiration != nil {
+		creds.CanExpire = true
+		creds.Expires = *resp.Expiration
+	}
+
+	return creds, nil
+}
+
+// executeCredentialProcess starts the credential process on the OS and
+// returns the results or an error.
+func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) {
+	if p.options.Timeout >= 0 {
+		var cancelFunc func()
+		ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout)
+		defer cancelFunc()
+	}
+
+	cmd, err := p.commandBuilder.NewCommand(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	// get creds json on process's stdout
+	output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte)))
+	if cmd.Stdout != nil {
+		cmd.Stdout = io.MultiWriter(cmd.Stdout, output)
+	} else {
+		cmd.Stdout = output
+	}
+
+	execCh := make(chan error, 1)
+	go executeCommand(cmd, execCh)
+
+	select {
+	case execError := <-execCh:
+		if execError == nil {
+			break
+		}
+		select {
+		case <-ctx.Done():
+			return output.Bytes(), &ProviderError{
+				Err: fmt.Errorf("credential process timed out: %w", execError),
+			}
+		default:
+			return output.Bytes(), &ProviderError{
+				Err: fmt.Errorf("error in credential_process: %w", execError),
+			}
+		}
+	}
+
+	out := output.Bytes()
+	if runtime.GOOS == "windows" {
+		// windows adds slashes to quotes
+		out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`))
+	}
+
+	return out, nil
+}
+
+func executeCommand(cmd *exec.Cmd, exec chan error) {
+	// Start the command
+	err := cmd.Start()
+	if err == nil {
+		err = cmd.Wait()
+	}
+
+	exec <- err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,81 @@
+// Package ssocreds provides a credential provider for retrieving temporary AWS
+// credentials using an SSO access token.
+//
+// IMPORTANT: The provider in this package does not initiate or perform the AWS
+// SSO login flow. The SDK provider expects that you have already performed the
+// SSO login flow using AWS CLI using the "aws sso login" command, or by some
+// other mechanism. The provider must find a valid non-expired access token for
+// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not
+// found, it is expired, or the file is malformed an error will be returned.
+//
+// # Loading AWS SSO credentials with the AWS shared configuration file
+//
+// You can use configure AWS SSO credentials from the AWS shared configuration file by
+// specifying the required keys in the profile and referencing an sso-session:
+//
+//	sso_session
+//	sso_account_id
+//	sso_role_name
+//
+// For example, the following defines a profile "devsso" and specifies the AWS
+// SSO parameters that defines the target account, role, sign-on portal, and
+// the region where the user portal is located. Note: all SSO arguments must be
+// provided, or an error will be returned.
+//
+//	[profile devsso]
+//	sso_session = dev-session
+//	sso_role_name = SSOReadOnlyRole
+//	sso_account_id = 123456789012
+//
+//	[sso-session dev-session]
+//	sso_start_url = https://my-sso-portal.awsapps.com/start
+//	sso_region = us-east-1
+//	sso_registration_scopes = sso:account:access
+//
+// Using the config module, you can load the AWS SDK shared configuration, and
+// specify that this profile be used to retrieve credentials. For example:
+//
+//	config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso"))
+//	if err != nil {
+//	    return err
+//	}
+//
+// # Programmatically loading AWS SSO credentials directly
+//
+// You can programmatically construct the AWS SSO Provider in your application,
+// and provide the necessary information to load and retrieve temporary
+// credentials using an access token from ~/.aws/sso/cache.
+//
+//	ssoClient := sso.NewFromConfig(cfg)
+//	ssoOidcClient := ssooidc.NewFromConfig(cfg)
+//	tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session")
+//	if err != nil {
+//	    return err
+//	}
+//
+//	var provider aws.CredentialsProvider
+//	provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) {
+//	  options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath)
+//	})
+//
+//	// Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time
+//	provider = aws.NewCredentialsCache(provider)
+//
+//	credentials, err := provider.Retrieve(context.TODO())
+//	if err != nil {
+//	    return err
+//	}
+//
+// It is important that you wrap the Provider with aws.CredentialsCache if you
+// are programmatically constructing the provider directly. This prevents your
+// application from accessing the cached access token and requesting new
+// credentials each time the credentials are used.
+//
+// # Additional Resources
+//
+// Configuring the AWS CLI to use AWS Single Sign-On:
+// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+//
+// AWS Single Sign-On User Guide:
+// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
+package ssocreds
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,233 @@
+package ssocreds
+
+import (
+	"crypto/sha1"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
+)
+
+var osUserHomeDur = shareddefaults.UserHomeDir
+
+// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or
+// error if unable get derive the path. Key that will be used to compute a SHA1
+// value that is hex encoded.
+//
+// Derives the filepath using the Key as:
+//
+//	~/.aws/sso/cache/<sha1-hex-encoded-key>.json
+func StandardCachedTokenFilepath(key string) (string, error) {
+	homeDir := osUserHomeDur()
+	if len(homeDir) == 0 {
+		return "", fmt.Errorf("unable to get USER's home directory for cached token")
+	}
+	hash := sha1.New()
+	if _, err := hash.Write([]byte(key)); err != nil {
+		return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err)
+	}
+
+	cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json"
+
+	return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil
+}
+
+type tokenKnownFields struct {
+	AccessToken string   `json:"accessToken,omitempty"`
+	ExpiresAt   *rfc3339 `json:"expiresAt,omitempty"`
+
+	RefreshToken string `json:"refreshToken,omitempty"`
+	ClientID     string `json:"clientId,omitempty"`
+	ClientSecret string `json:"clientSecret,omitempty"`
+}
+
+type token struct {
+	tokenKnownFields
+	UnknownFields map[string]interface{} `json:"-"`
+}
+
+func (t token) MarshalJSON() ([]byte, error) {
+	fields := map[string]interface{}{}
+
+	setTokenFieldString(fields, "accessToken", t.AccessToken)
+	setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt)
+
+	setTokenFieldString(fields, "refreshToken", t.RefreshToken)
+	setTokenFieldString(fields, "clientId", t.ClientID)
+	setTokenFieldString(fields, "clientSecret", t.ClientSecret)
+
+	for k, v := range t.UnknownFields {
+		if _, ok := fields[k]; ok {
+			return nil, fmt.Errorf("unknown token field %v, duplicates known field", k)
+		}
+		fields[k] = v
+	}
+
+	return json.Marshal(fields)
+}
+
+func setTokenFieldString(fields map[string]interface{}, key, value string) {
+	if value == "" {
+		return
+	}
+	fields[key] = value
+}
+func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) {
+	if value == nil {
+		return
+	}
+	fields[key] = value
+}
+
+func (t *token) UnmarshalJSON(b []byte) error {
+	var fields map[string]interface{}
+	if err := json.Unmarshal(b, &fields); err != nil {
+		return nil
+	}
+
+	t.UnknownFields = map[string]interface{}{}
+
+	for k, v := range fields {
+		var err error
+		switch k {
+		case "accessToken":
+			err = getTokenFieldString(v, &t.AccessToken)
+		case "expiresAt":
+			err = getTokenFieldRFC3339(v, &t.ExpiresAt)
+		case "refreshToken":
+			err = getTokenFieldString(v, &t.RefreshToken)
+		case "clientId":
+			err = getTokenFieldString(v, &t.ClientID)
+		case "clientSecret":
+			err = getTokenFieldString(v, &t.ClientSecret)
+		default:
+			t.UnknownFields[k] = v
+		}
+
+		if err != nil {
+			return fmt.Errorf("field %q, %w", k, err)
+		}
+	}
+
+	return nil
+}
+
+func getTokenFieldString(v interface{}, value *string) error {
+	var ok bool
+	*value, ok = v.(string)
+	if !ok {
+		return fmt.Errorf("expect value to be string, got %T", v)
+	}
+	return nil
+}
+
+func getTokenFieldRFC3339(v interface{}, value **rfc3339) error {
+	var stringValue string
+	if err := getTokenFieldString(v, &stringValue); err != nil {
+		return err
+	}
+
+	timeValue, err := parseRFC3339(stringValue)
+	if err != nil {
+		return err
+	}
+
+	*value = &timeValue
+	return nil
+}
+
+func loadCachedToken(filename string) (token, error) {
+	fileBytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err)
+	}
+
+	var t token
+	if err := json.Unmarshal(fileBytes, &t); err != nil {
+		return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err)
+	}
+
+	if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() {
+		return token{}, fmt.Errorf(
+			"cached SSO token must contain accessToken and expiresAt fields")
+	}
+
+	return t, nil
+}
+
+func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) {
+	tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10)
+	if err := writeCacheFile(tmpFilename, fileMode, t); err != nil {
+		return err
+	}
+
+	if err := os.Rename(tmpFilename, filename); err != nil {
+		return fmt.Errorf("failed to replace old cached SSO token file, %w", err)
+	}
+
+	return nil
+}
+
+func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) {
+	var f *os.File
+	f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode)
+	if err != nil {
+		return fmt.Errorf("failed to create cached SSO token file %w", err)
+	}
+
+	defer func() {
+		closeErr := f.Close()
+		if err == nil && closeErr != nil {
+			err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr)
+		}
+	}()
+
+	encoder := json.NewEncoder(f)
+
+	if err = encoder.Encode(t); err != nil {
+		return fmt.Errorf("failed to serialize cached SSO token, %w", err)
+	}
+
+	return nil
+}
+
+type rfc3339 time.Time
+
+func parseRFC3339(v string) (rfc3339, error) {
+	parsed, err := time.Parse(time.RFC3339, v)
+	if err != nil {
+		return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err)
+	}
+
+	return rfc3339(parsed), nil
+}
+
+func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) {
+	var value string
+
+	// Use JSON unmarshal to unescape the quoted value making use of JSON's
+	// unquoting rules.
+	if err = json.Unmarshal(bytes, &value); err != nil {
+		return err
+	}
+
+	*r, err = parseRFC3339(value)
+
+	return nil
+}
+
+func (r *rfc3339) MarshalJSON() ([]byte, error) {
+	value := time.Time(*r).Format(time.RFC3339)
+
+	// Use JSON unmarshal to unescape the quoted value making use of JSON's
+	// quoting rules.
+	return json.Marshal(value)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,153 @@
+package ssocreds
+
+import (
+	"context"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/aws-sdk-go-v2/service/sso"
+)
+
+// ProviderName is the name of the provider used to specify the source of
+// credentials.
+const ProviderName = "SSOProvider"
+
+// GetRoleCredentialsAPIClient is a API client that implements the
+// GetRoleCredentials operation.
+type GetRoleCredentialsAPIClient interface {
+	GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) (
+		*sso.GetRoleCredentialsOutput, error,
+	)
+}
+
+// Options is the Provider options structure.
+type Options struct {
+	// The Client which is configured for the AWS Region where the AWS SSO user
+	// portal is located.
+	Client GetRoleCredentialsAPIClient
+
+	// The AWS account that is assigned to the user.
+	AccountID string
+
+	// The role name that is assigned to the user.
+	RoleName string
+
+	// The URL that points to the organization's AWS Single Sign-On (AWS SSO)
+	// user portal.
+	StartURL string
+
+	// The filepath the cached token will be retrieved from. If unset Provider will
+	// use the startURL to determine the filepath at.
+	//
+	//    ~/.aws/sso/cache/<sha1-hex-encoded-startURL>.json
+	//
+	// If custom cached token filepath is used, the Provider's startUrl
+	// parameter will be ignored.
+	CachedTokenFilepath string
+
+	// Used by the SSOCredentialProvider if a token configuration
+	// profile is used in the shared config
+	SSOTokenProvider *SSOTokenProvider
+}
+
+// Provider is an AWS credential provider that retrieves temporary AWS
+// credentials by exchanging an SSO login token.
+type Provider struct {
+	options Options
+
+	cachedTokenFilepath string
+}
+
+// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The
+// provided client is expected to be configured for the AWS Region where the
+// AWS SSO user portal is located.
+func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider {
+	options := Options{
+		Client:    client,
+		AccountID: accountID,
+		RoleName:  roleName,
+		StartURL:  startURL,
+	}
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	return &Provider{
+		options:             options,
+		cachedTokenFilepath: options.CachedTokenFilepath,
+	}
+}
+
+// Retrieve retrieves temporary AWS credentials from the configured Amazon
+// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present
+// in ~/.aws/sso/cache. However, if a token provider configuration exists
+// in the shared config, then we ought to use the token provider rather then
+// direct access on the cached token.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	var accessToken *string
+	if p.options.SSOTokenProvider != nil {
+		token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx)
+		if err != nil {
+			return aws.Credentials{}, err
+		}
+		accessToken = &token.Value
+	} else {
+		if p.cachedTokenFilepath == "" {
+			cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL)
+			if err != nil {
+				return aws.Credentials{}, &InvalidTokenError{Err: err}
+			}
+			p.cachedTokenFilepath = cachedTokenFilepath
+		}
+
+		tokenFile, err := loadCachedToken(p.cachedTokenFilepath)
+		if err != nil {
+			return aws.Credentials{}, &InvalidTokenError{Err: err}
+		}
+
+		if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) {
+			return aws.Credentials{}, &InvalidTokenError{}
+		}
+		accessToken = &tokenFile.AccessToken
+	}
+
+	output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{
+		AccessToken: accessToken,
+		AccountId:   &p.options.AccountID,
+		RoleName:    &p.options.RoleName,
+	})
+	if err != nil {
+		return aws.Credentials{}, err
+	}
+
+	return aws.Credentials{
+		AccessKeyID:     aws.ToString(output.RoleCredentials.AccessKeyId),
+		SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey),
+		SessionToken:    aws.ToString(output.RoleCredentials.SessionToken),
+		CanExpire:       true,
+		Expires:         time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(),
+		Source:          ProviderName,
+		AccountID:       p.options.AccountID,
+	}, nil
+}
+
+// InvalidTokenError is the error type that is returned if loaded token has
+// expired or is otherwise invalid. To refresh the SSO session run AWS SSO
+// login with the corresponding profile.
+type InvalidTokenError struct {
+	Err error
+}
+
+func (i *InvalidTokenError) Unwrap() error {
+	return i.Err
+}
+
+func (i *InvalidTokenError) Error() string {
+	const msg = "the SSO session has expired or is invalid"
+	if i.Err == nil {
+		return msg
+	}
+	return msg + ": " + i.Err.Error()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,147 @@
+package ssocreds
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/aws-sdk-go-v2/service/ssooidc"
+	"github.com/aws/smithy-go/auth/bearer"
+)
+
+// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API
+// client for calling CreateToken operation to refresh the SSO token.
+type CreateTokenAPIClient interface {
+	CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) (
+		*ssooidc.CreateTokenOutput, error,
+	)
+}
+
+// SSOTokenProviderOptions provides the options for configuring the
+// SSOTokenProvider.
+type SSOTokenProviderOptions struct {
+	// Client that can be overridden
+	Client CreateTokenAPIClient
+
+	// The set of API Client options to be applied when invoking the
+	// CreateToken operation.
+	ClientOptions []func(*ssooidc.Options)
+
+	// The path the file containing the cached SSO token will be read from.
+	// Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter.
+	CachedTokenFilepath string
+}
+
+// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for
+// Bearer Authentication. The SSOTokenProvider can only be used to refresh
+// already cached SSO Tokens. This utility cannot perform the initial SSO
+// create token.
+//
+// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in
+// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's
+// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with
+// the smithy-go TokenCache, if the external configuration loaded configured
+// for an SSO session.
+//
+// The initial SSO create token should be preformed with the AWS CLI before the
+// Go application using the SSOTokenProvider will need to retrieve the SSO
+// token. If the AWS CLI has not created the token cache file, this provider
+// will return an error when attempting to retrieve the cached token.
+//
+// This provider will attempt to refresh the cached SSO token periodically if
+// needed when RetrieveBearerToken is called.
+//
+// A utility such as the AWS CLI must be used to initially create the SSO
+// session and cached token file.
+// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+type SSOTokenProvider struct {
+	options SSOTokenProviderOptions
+}
+
+var _ bearer.TokenProvider = (*SSOTokenProvider)(nil)
+
+// NewSSOTokenProvider returns an initialized SSOTokenProvider that will
+// periodically refresh the SSO token cached stored in the cachedTokenFilepath.
+// The cachedTokenFilepath file's content will be rewritten by the token
+// provider when the token is refreshed.
+//
+// The client must be configured for the AWS region the SSO token was created for.
+func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider {
+	options := SSOTokenProviderOptions{
+		Client:              client,
+		CachedTokenFilepath: cachedTokenFilepath,
+	}
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	provider := &SSOTokenProvider{
+		options: options,
+	}
+
+	return provider
+}
+
+// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath
+// the SSOTokenProvider was created with. If the token has expired
+// RetrieveBearerToken will attempt to refresh it. If the token cannot be
+// refreshed or is not present an error will be returned.
+//
+// A utility such as the AWS CLI must be used to initially create the SSO
+// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) {
+	cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath)
+	if err != nil {
+		return bearer.Token{}, err
+	}
+
+	if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) {
+		cachedToken, err = p.refreshToken(ctx, cachedToken)
+		if err != nil {
+			return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err)
+		}
+	}
+
+	expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt))
+	return bearer.Token{
+		Value:     cachedToken.AccessToken,
+		CanExpire: !expiresAt.IsZero(),
+		Expires:   expiresAt,
+	}, nil
+}
+
+func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) {
+	if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" {
+		return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed")
+	}
+
+	createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{
+		ClientId:     &cachedToken.ClientID,
+		ClientSecret: &cachedToken.ClientSecret,
+		RefreshToken: &cachedToken.RefreshToken,
+		GrantType:    aws.String("refresh_token"),
+	}, p.options.ClientOptions...)
+	if err != nil {
+		return token{}, fmt.Errorf("unable to refresh SSO token, %w", err)
+	}
+
+	expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second)
+
+	cachedToken.AccessToken = aws.ToString(createResult.AccessToken)
+	cachedToken.ExpiresAt = (*rfc3339)(&expiresAt)
+	cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken)
+
+	fileInfo, err := os.Stat(p.options.CachedTokenFilepath)
+	if err != nil {
+		return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err)
+	}
+
+	if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil {
+		return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err)
+	}
+
+	return cachedToken, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,53 @@
+package credentials
+
+import (
+	"context"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+const (
+	// StaticCredentialsName provides a name of Static provider
+	StaticCredentialsName = "StaticCredentials"
+)
+
+// StaticCredentialsEmptyError is emitted when static credentials are empty.
+type StaticCredentialsEmptyError struct{}
+
+func (*StaticCredentialsEmptyError) Error() string {
+	return "static credentials are empty"
+}
+
+// A StaticCredentialsProvider is a set of credentials which are set, and will
+// never expire.
+type StaticCredentialsProvider struct {
+	Value aws.Credentials
+}
+
+// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS
+// credentials passed in.
+func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider {
+	return StaticCredentialsProvider{
+		Value: aws.Credentials{
+			AccessKeyID:     key,
+			SecretAccessKey: secret,
+			SessionToken:    session,
+		},
+	}
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) {
+	v := s.Value
+	if v.AccessKeyID == "" || v.SecretAccessKey == "" {
+		return aws.Credentials{
+			Source: StaticCredentialsName,
+		}, &StaticCredentialsEmptyError{}
+	}
+
+	if len(v.Source) == 0 {
+		v.Source = StaticCredentialsName
+	}
+
+	return v, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,326 @@
+// Package stscreds are credential Providers to retrieve STS AWS credentials.
+//
+// STS provides multiple ways to retrieve credentials which can be used when making
+// future AWS service API operation calls.
+//
+// The SDK will ensure that per instance of credentials.Credentials all requests
+// to refresh the credentials will be synchronized. But, the SDK is unable to
+// ensure synchronous usage of the AssumeRoleProvider if the value is shared
+// between multiple Credentials or service clients.
+//
+// # Assume Role
+//
+// To assume an IAM role using STS with the SDK you can create a new Credentials
+// with the SDKs's stscreds package.
+//
+//	// Initial credentials loaded from SDK's default credential chain. Such as
+//	// the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+//	// Role. These credentials will be used to to make the STS Assume Role API.
+//	cfg, err := config.LoadDefaultConfig(context.TODO())
+//	if err != nil {
+//		panic(err)
+//	}
+//
+//	// Create the credentials from AssumeRoleProvider to assume the role
+//	// referenced by the "myRoleARN" ARN.
+//	stsSvc := sts.NewFromConfig(cfg)
+//	creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn")
+//
+//	cfg.Credentials = aws.NewCredentialsCache(creds)
+//
+//	// Create service client value configured for credentials
+//	// from assumed role.
+//	svc := s3.NewFromConfig(cfg)
+//
+// # Assume Role with custom MFA Token provider
+//
+// To assume an IAM role with a MFA token you can either specify a custom MFA
+// token provider or use the SDK's built in StdinTokenProvider that will prompt
+// the user for a token code each time the credentials need to to be refreshed.
+// Specifying a custom token provider allows you to control where the token
+// code is retrieved from, and how it is refreshed.
+//
+// With a custom token provider, the provider is responsible for refreshing the
+// token code when called.
+//
+//		cfg, err := config.LoadDefaultConfig(context.TODO())
+//		if err != nil {
+//			panic(err)
+//		}
+//
+//	 staticTokenProvider := func() (string, error) {
+//	     return someTokenCode, nil
+//	 }
+//
+//		// Create the credentials from AssumeRoleProvider to assume the role
+//		// referenced by the "myRoleARN" ARN using the MFA token code provided.
+//		creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
+//			o.SerialNumber = aws.String("myTokenSerialNumber")
+//			o.TokenProvider = staticTokenProvider
+//		})
+//
+//		cfg.Credentials = aws.NewCredentialsCache(creds)
+//
+//		// Create service client value configured for credentials
+//		// from assumed role.
+//		svc := s3.NewFromConfig(cfg)
+//
+// # Assume Role with MFA Token Provider
+//
+// To assume an IAM role with MFA for longer running tasks where the credentials
+// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+// will allow the credential provider to prompt for new MFA token code when the
+// role's credentials need to be refreshed.
+//
+// The StdinTokenProvider function is available to prompt on stdin to retrieve
+// the MFA token code from the user. You can also implement custom prompts by
+// satisfying the TokenProvider function signature.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely.
+//
+//	cfg, err := config.LoadDefaultConfig(context.TODO())
+//	if err != nil {
+//		panic(err)
+//	}
+//
+//	// Create the credentials from AssumeRoleProvider to assume the role
+//	// referenced by the "myRoleARN" ARN using the MFA token code provided.
+//	creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
+//		o.SerialNumber = aws.String("myTokenSerialNumber")
+//		o.TokenProvider = stscreds.StdinTokenProvider
+//	})
+//
+//	cfg.Credentials = aws.NewCredentialsCache(creds)
+//
+//	// Create service client value configured for credentials
+//	// from assumed role.
+//	svc := s3.NewFromConfig(cfg)
+package stscreds
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/sts"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+)
+
+// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+	var v string
+	fmt.Printf("Assume Role MFA token code: ")
+	_, err := fmt.Scanln(&v)
+
+	return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation.
+type AssumeRoleAPIClient interface {
+	AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the
+// credentials will be valid for. This value is only used by AssumeRoleProvider
+// for specifying the default expiry duration of an assume role.
+//
+// Other providers such as WebIdentityRoleProvider do not use this value, and
+// instead rely on STS API's default parameter handing to assign a default
+// value.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+	options AssumeRoleOptions
+}
+
+// AssumeRoleOptions is the configurable options for AssumeRoleProvider
+type AssumeRoleOptions struct {
+	// Client implementation of the AssumeRole operation. Required
+	Client AssumeRoleAPIClient
+
+	// IAM Role ARN to be assumed. Required
+	RoleARN string
+
+	// Session name, if you wish to uniquely identify this session.
+	RoleSessionName string
+
+	// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+	Duration time.Duration
+
+	// Optional ExternalID to pass along, defaults to nil if not set.
+	ExternalID *string
+
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
+	// conversion compresses it into a packed binary format with a separate limit.
+	// The PackedPolicySize response element indicates by percentage how close to
+	// the upper size limit the policy is, with 100% equaling the maximum allowed
+	// size.
+	Policy *string
+
+	// The ARNs of IAM managed policies you want to use as managed session policies.
+	// The policies must exist in the same account as the role.
+	//
+	// This parameter is optional. You can provide up to 10 managed policy ARNs.
+	// However, the plain text that you use for both inline and managed session
+	// policies can't exceed 2,048 characters.
+	//
+	// An AWS conversion compresses the passed session policies and session tags
+	// into a packed binary format that has a separate limit. Your request can fail
+	// for this limit even if your plain text meets the other requirements. The
+	// PackedPolicySize response element indicates by percentage how close the policies
+	// and tags for your request are to the upper size limit.
+	//
+	// Passing policies to this operation returns new temporary credentials. The
+	// resulting session's permissions are the intersection of the role's identity-based
+	// policy and the session policies. You can use the role's temporary credentials
+	// in subsequent AWS API calls to access resources in the account that owns
+	// the role. You cannot use session policies to grant more permissions than
+	// those allowed by the identity-based policy of the role that is being assumed.
+	// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+	// in the IAM User Guide.
+	PolicyARNs []types.PolicyDescriptorType
+
+	// The identification number of the MFA device that is associated with the user
+	// who is making the AssumeRole call. Specify this value if the trust policy
+	// of the role being assumed includes a condition that requires MFA authentication.
+	// The value is either the serial number for a hardware device (such as GAHT12345678)
+	// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+	SerialNumber *string
+
+	// The source identity specified by the principal that is calling the AssumeRole
+	// operation. You can require users to specify a source identity when they assume a
+	// role. You do this by using the sts:SourceIdentity condition key in a role trust
+	// policy. You can use source identity information in CloudTrail logs to determine
+	// who took actions with a role. You can use the aws:SourceIdentity condition key
+	// to further control access to Amazon Web Services resources based on the value of
+	// source identity. For more information about using source identity, see Monitor
+	// and control actions taken with assumed roles
+	// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
+	// in the IAM User Guide.
+	SourceIdentity *string
+
+	// Async method of providing MFA token code for assuming an IAM role with MFA.
+	// The value returned by the function will be used as the TokenCode in the Retrieve
+	// call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+	//
+	// This token provider will be called when ever the assumed role's
+	// credentials need to be refreshed when SerialNumber is set.
+	TokenProvider func() (string, error)
+
+	// A list of session tags that you want to pass. Each session tag consists of a key
+	// name and an associated value. For more information about session tags, see
+	// Tagging STS Sessions
+	// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+	// IAM User Guide. This parameter is optional. You can pass up to 50 session tags.
+	Tags []types.Tag
+
+	// A list of keys for session tags that you want to set as transitive. If you set a
+	// tag key as transitive, the corresponding key and value passes to subsequent
+	// sessions in a role chain. For more information, see Chaining Roles with Session
+	// Tags
+	// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+	// in the IAM User Guide. This parameter is optional.
+	TransitiveTagKeys []string
+}
+
+// NewAssumeRoleProvider constructs and returns a credentials provider that
+// will retrieve credentials by assuming a IAM role using STS.
+func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider {
+	o := AssumeRoleOptions{
+		Client:  client,
+		RoleARN: roleARN,
+	}
+
+	for _, fn := range optFns {
+		fn(&o)
+	}
+
+	return &AssumeRoleProvider{
+		options: o,
+	}
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	// Apply defaults where parameters are not set.
+	if len(p.options.RoleSessionName) == 0 {
+		// Try to work out a role name that will hopefully end up unique.
+		p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano())
+	}
+	if p.options.Duration == 0 {
+		// Expire as often as AWS permits.
+		p.options.Duration = DefaultDuration
+	}
+	input := &sts.AssumeRoleInput{
+		DurationSeconds:   aws.Int32(int32(p.options.Duration / time.Second)),
+		PolicyArns:        p.options.PolicyARNs,
+		RoleArn:           aws.String(p.options.RoleARN),
+		RoleSessionName:   aws.String(p.options.RoleSessionName),
+		ExternalId:        p.options.ExternalID,
+		SourceIdentity:    p.options.SourceIdentity,
+		Tags:              p.options.Tags,
+		TransitiveTagKeys: p.options.TransitiveTagKeys,
+	}
+	if p.options.Policy != nil {
+		input.Policy = p.options.Policy
+	}
+	if p.options.SerialNumber != nil {
+		if p.options.TokenProvider != nil {
+			input.SerialNumber = p.options.SerialNumber
+			code, err := p.options.TokenProvider()
+			if err != nil {
+				return aws.Credentials{}, err
+			}
+			input.TokenCode = aws.String(code)
+		} else {
+			return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set")
+		}
+	}
+
+	resp, err := p.options.Client.AssumeRole(ctx, input)
+	if err != nil {
+		return aws.Credentials{Source: ProviderName}, err
+	}
+
+	var accountID string
+	if resp.AssumedRoleUser != nil {
+		accountID = getAccountID(resp.AssumedRoleUser)
+	}
+
+	return aws.Credentials{
+		AccessKeyID:     *resp.Credentials.AccessKeyId,
+		SecretAccessKey: *resp.Credentials.SecretAccessKey,
+		SessionToken:    *resp.Credentials.SessionToken,
+		Source:          ProviderName,
+
+		CanExpire: true,
+		Expires:   *resp.Credentials.Expiration,
+		AccountID: accountID,
+	}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,169 @@
+package stscreds
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/aws/retry"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/aws-sdk-go-v2/service/sts"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+)
+
+var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode()
+
+const (
+	// WebIdentityProviderName is the web identity provider name
+	WebIdentityProviderName = "WebIdentityCredentials"
+)
+
+// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation.
+type AssumeRoleWithWebIdentityAPIClient interface {
+	AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error)
+}
+
+// WebIdentityRoleProvider is used to retrieve credentials using
+// an OIDC token.
+type WebIdentityRoleProvider struct {
+	options WebIdentityRoleOptions
+}
+
+// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider
+type WebIdentityRoleOptions struct {
+	// Client implementation of the AssumeRoleWithWebIdentity operation. Required
+	Client AssumeRoleWithWebIdentityAPIClient
+
+	// JWT Token Provider. Required
+	TokenRetriever IdentityTokenRetriever
+
+	// IAM Role ARN to assume. Required
+	RoleARN string
+
+	// Session name, if you wish to uniquely identify this session.
+	RoleSessionName string
+
+	// Expiry duration of the STS credentials. STS will assign a default expiry
+	// duration if this value is unset. This is different from the Duration
+	// option of AssumeRoleProvider, which automatically assigns 15 minutes if
+	// Duration is unset.
+	//
+	// See the STS AssumeRoleWithWebIdentity API reference guide for more
+	// information on defaults.
+	// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
+	Duration time.Duration
+
+	// An IAM policy in JSON format that you want to use as an inline session policy.
+	Policy *string
+
+	// The Amazon Resource Names (ARNs) of the IAM managed policies that you
+	// want to use as managed session policies.  The policies must exist in the
+	// same account as the role.
+	PolicyARNs []types.PolicyDescriptorType
+}
+
+// IdentityTokenRetriever is an interface for retrieving a JWT
+type IdentityTokenRetriever interface {
+	GetIdentityToken() ([]byte, error)
+}
+
+// IdentityTokenFile is for retrieving an identity token from the given file name
+type IdentityTokenFile string
+
+// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte
+func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) {
+	b, err := ioutil.ReadFile(string(j))
+	if err != nil {
+		return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err)
+	}
+
+	return b, nil
+}
+
+// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
+// provided stsiface.ClientAPI
+func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider {
+	o := WebIdentityRoleOptions{
+		Client:         client,
+		RoleARN:        roleARN,
+		TokenRetriever: tokenRetriever,
+	}
+
+	for _, fn := range optFns {
+		fn(&o)
+	}
+
+	return &WebIdentityRoleProvider{options: o}
+}
+
+// Retrieve will attempt to assume a role from a token which is located at
+// 'WebIdentityTokenFilePath' specified destination and if that is empty an
+// error will be returned.
+func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	b, err := p.options.TokenRetriever.GetIdentityToken()
+	if err != nil {
+		return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err)
+	}
+
+	sessionName := p.options.RoleSessionName
+	if len(sessionName) == 0 {
+		// session name is used to uniquely identify a session. This simply
+		// uses unix time in nanoseconds to uniquely identify sessions.
+		sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10)
+	}
+	input := &sts.AssumeRoleWithWebIdentityInput{
+		PolicyArns:       p.options.PolicyARNs,
+		RoleArn:          &p.options.RoleARN,
+		RoleSessionName:  &sessionName,
+		WebIdentityToken: aws.String(string(b)),
+	}
+	if p.options.Duration != 0 {
+		// If set use the value, otherwise STS will assign a default expiration duration.
+		input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second))
+	}
+	if p.options.Policy != nil {
+		input.Policy = p.options.Policy
+	}
+
+	resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) {
+		options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode)
+	})
+	if err != nil {
+		return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err)
+	}
+
+	var accountID string
+	if resp.AssumedRoleUser != nil {
+		accountID = getAccountID(resp.AssumedRoleUser)
+	}
+
+	// InvalidIdentityToken error is a temporary error that can occur
+	// when assuming an Role with a JWT web identity token.
+
+	value := aws.Credentials{
+		AccessKeyID:     aws.ToString(resp.Credentials.AccessKeyId),
+		SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey),
+		SessionToken:    aws.ToString(resp.Credentials.SessionToken),
+		Source:          WebIdentityProviderName,
+		CanExpire:       true,
+		Expires:         *resp.Credentials.Expiration,
+		AccountID:       accountID,
+	}
+	return value, nil
+}
+
+// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]"
+func getAccountID(u *types.AssumedRoleUser) string {
+	if u.Arn == nil {
+		return ""
+	}
+	parts := strings.Split(*u.Arn, ":")
+	if len(parts) < 5 {
+		return ""
+	}
+	return parts[4]
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,355 @@
+# v1.16.11 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.3 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2024-03-21)
+
+* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls.
+
+# v1.15.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.11 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.10 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.9 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.8 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.7 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.5 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.4 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.3 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.2 (2023-11-02)
+
+* No change notes available for this release.
+
+# v1.14.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.13 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.12 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.10 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2023-03-14)
+
+* **Feature**: Add flag to disable IMDSv1 fallback
+
+# v1.12.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.19 (2022-10-24)
+
+* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.17 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.16 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.15 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.11 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.9 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-11-06)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-10-21)
+
+* **Feature**: Updated  to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-10-11)
+
+* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout.
+* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-08-04)
+
+* **Feature**: adds error handling for defered close calls
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-07-15)
+
+* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints.
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,352 @@
+package imds
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+	"os"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/aws/retry"
+	awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+	internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config"
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ServiceID provides the unique name of this API client
+const ServiceID = "ec2imds"
+
+// Client provides the API client for interacting with the Amazon EC2 Instance
+// Metadata Service API.
+type Client struct {
+	options Options
+}
+
+// ClientEnableState provides an enumeration if the client is enabled,
+// disabled, or default behavior.
+type ClientEnableState = internalconfig.ClientEnableState
+
+// Enumeration values for ClientEnableState
+const (
+	ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior
+	ClientDisabled           ClientEnableState = internalconfig.ClientDisabled           // client disabled
+	ClientEnabled            ClientEnableState = internalconfig.ClientEnabled            // client enabled
+)
+
+// EndpointModeState is an enum configuration variable describing the client endpoint mode.
+// Not configurable directly, but used when using the NewFromConfig.
+type EndpointModeState = internalconfig.EndpointModeState
+
+// Enumeration values for EndpointModeState
+const (
+	EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset
+	EndpointModeStateIPv4  EndpointModeState = internalconfig.EndpointModeStateIPv4
+	EndpointModeStateIPv6  EndpointModeState = internalconfig.EndpointModeStateIPv6
+)
+
+const (
+	disableClientEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+	// Client endpoint options
+	endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT"
+
+	defaultIPv4Endpoint = "http://169.254.169.254"
+	defaultIPv6Endpoint = "http://[fd00:ec2::254]"
+)
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+	options = options.Copy()
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	options.HTTPClient = resolveHTTPClient(options.HTTPClient)
+
+	if options.Retryer == nil {
+		options.Retryer = retry.NewStandard()
+	}
+	options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second)
+
+	if options.ClientEnableState == ClientDefaultEnableState {
+		if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") {
+			options.ClientEnableState = ClientDisabled
+		}
+	}
+
+	if len(options.Endpoint) == 0 {
+		if v := os.Getenv(endpointEnvVar); len(v) != 0 {
+			options.Endpoint = v
+		}
+	}
+
+	client := &Client{
+		options: options,
+	}
+
+	if client.options.tokenProvider == nil && !client.options.disableAPIToken {
+		client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL)
+	}
+
+	return client
+}
+
+// NewFromConfig returns an initialized Client based the AWS SDK config, and
+// functional options. Provide additional functional options to further
+// configure the behavior of the client, such as changing the client's endpoint
+// or adding custom middleware behavior.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+	opts := Options{
+		APIOptions:    append([]func(*middleware.Stack) error{}, cfg.APIOptions...),
+		HTTPClient:    cfg.HTTPClient,
+		ClientLogMode: cfg.ClientLogMode,
+		Logger:        cfg.Logger,
+	}
+
+	if cfg.Retryer != nil {
+		opts.Retryer = cfg.Retryer()
+	}
+
+	resolveClientEnableState(cfg, &opts)
+	resolveEndpointConfig(cfg, &opts)
+	resolveEndpointModeConfig(cfg, &opts)
+	resolveEnableFallback(cfg, &opts)
+
+	return New(opts, optFns...)
+}
+
+// Options provides the fields for configuring the API client's behavior.
+type Options struct {
+	// Set of options to modify how an operation is invoked. These apply to all
+	// operations invoked for this client. Use functional options on operation
+	// call to modify this list for per operation behavior.
+	APIOptions []func(*middleware.Stack) error
+
+	// The endpoint the client will use to retrieve EC2 instance metadata.
+	//
+	// Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode.
+	//
+	// If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT
+	// has a value the client will use the value of the environment variable as
+	// the endpoint for operation calls.
+	//
+	//    AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
+	Endpoint string
+
+	// The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field.
+	//
+	// Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint.
+	// Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint.
+	//
+	// By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4.
+	EndpointMode EndpointModeState
+
+	// The HTTP client to invoke API calls with. Defaults to client's default
+	// HTTP implementation if nil.
+	HTTPClient HTTPClient
+
+	// Retryer guides how HTTP requests should be retried in case of recoverable
+	// failures. When nil the API client will use a default retryer.
+	Retryer aws.Retryer
+
+	// Changes if the EC2 Instance Metadata client is enabled or not. Client
+	// will default to enabled if not set to ClientDisabled. When the client is
+	// disabled it will return an error for all operation calls.
+	//
+	// If ClientEnableState value is ClientDefaultEnableState (default value),
+	// and the environment variable "AWS_EC2_METADATA_DISABLED" is set to
+	// "true", the client will be disabled.
+	//
+	//    AWS_EC2_METADATA_DISABLED=true
+	ClientEnableState ClientEnableState
+
+	// Configures the events that will be sent to the configured logger.
+	ClientLogMode aws.ClientLogMode
+
+	// The logger writer interface to write logging messages to.
+	Logger logging.Logger
+
+	// Configure IMDSv1 fallback behavior. By default, the client will attempt
+	// to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary]
+	// the client will return any errors encountered from attempting to fetch a token
+	// instead of silently using the insecure data flow of IMDSv1.
+	//
+	// See [configuring IMDS] for more information.
+	//
+	// [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
+	EnableFallback aws.Ternary
+
+	// By default, all IMDS client operations enforce a 5-second timeout. You
+	// can disable that behavior with this setting.
+	DisableDefaultTimeout bool
+
+	// provides the caching of API tokens used for operation calls. If unset,
+	// the API token will not be retrieved for the operation.
+	tokenProvider *tokenProvider
+
+	// option to disable the API token provider for testing.
+	disableAPIToken bool
+}
+
+// HTTPClient provides the interface for a client making HTTP requests with the
+// API.
+type HTTPClient interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+// Copy creates a copy of the API options.
+func (o Options) Copy() Options {
+	to := o
+	to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...)
+	return to
+}
+
+// WithAPIOptions wraps the API middleware functions, as a functional option
+// for the API Client Options. Use this helper to add additional functional
+// options to the API client, or operation calls.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, optFns...)
+	}
+}
+
+func (c *Client) invokeOperation(
+	ctx context.Context, opID string, params interface{}, optFns []func(*Options),
+	stackFns ...func(*middleware.Stack, Options) error,
+) (
+	result interface{}, metadata middleware.Metadata, err error,
+) {
+	stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+	options := c.options.Copy()
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	if options.ClientEnableState == ClientDisabled {
+		return nil, metadata, &smithy.OperationError{
+			ServiceID:     ServiceID,
+			OperationName: opID,
+			Err: fmt.Errorf(
+				"access disabled to EC2 IMDS via client option, or %q environment variable",
+				disableClientEnvVar),
+		}
+	}
+
+	for _, fn := range stackFns {
+		if err := fn(stack, options); err != nil {
+			return nil, metadata, err
+		}
+	}
+
+	for _, fn := range options.APIOptions {
+		if err := fn(stack); err != nil {
+			return nil, metadata, err
+		}
+	}
+
+	handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+	result, metadata, err = handler.Handle(ctx, params)
+	if err != nil {
+		return nil, metadata, &smithy.OperationError{
+			ServiceID:     ServiceID,
+			OperationName: opID,
+			Err:           err,
+		}
+	}
+
+	return result, metadata, err
+}
+
+const (
+	// HTTP client constants
+	defaultDialerTimeout         = 250 * time.Millisecond
+	defaultResponseHeaderTimeout = 500 * time.Millisecond
+)
+
+func resolveHTTPClient(client HTTPClient) HTTPClient {
+	if client == nil {
+		client = awshttp.NewBuildableClient()
+	}
+
+	if c, ok := client.(*awshttp.BuildableClient); ok {
+		client = c.
+			WithDialerOptions(func(d *net.Dialer) {
+				// Use a custom Dial timeout for the EC2 Metadata service to account
+				// for the possibility the application might not be running in an
+				// environment with the service present. The client should fail fast in
+				// this case.
+				d.Timeout = defaultDialerTimeout
+			}).
+			WithTransportOptions(func(tr *http.Transport) {
+				// Use a custom Transport timeout for the EC2 Metadata service to
+				// account for the possibility that the application might be running in
+				// a container, and EC2Metadata service drops the connection after a
+				// single IP Hop. The client should fail fast in this case.
+				tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout
+			})
+	}
+
+	return client
+}
+
+func resolveClientEnableState(cfg aws.Config, options *Options) error {
+	if options.ClientEnableState != ClientDefaultEnableState {
+		return nil
+	}
+	value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources)
+	if err != nil || !found {
+		return err
+	}
+	options.ClientEnableState = value
+	return nil
+}
+
+func resolveEndpointModeConfig(cfg aws.Config, options *Options) error {
+	if options.EndpointMode != EndpointModeStateUnset {
+		return nil
+	}
+	value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources)
+	if err != nil || !found {
+		return err
+	}
+	options.EndpointMode = value
+	return nil
+}
+
+func resolveEndpointConfig(cfg aws.Config, options *Options) error {
+	if len(options.Endpoint) != 0 {
+		return nil
+	}
+	value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources)
+	if err != nil || !found {
+		return err
+	}
+	options.Endpoint = value
+	return nil
+}
+
+func resolveEnableFallback(cfg aws.Config, options *Options) {
+	if options.EnableFallback != aws.UnknownTernary {
+		return
+	}
+
+	disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources)
+	if !ok {
+		return
+	}
+
+	if disabled {
+		options.EnableFallback = aws.FalseTernary
+	} else {
+		options.EnableFallback = aws.TrueTernary
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,77 @@
+package imds
+
+import (
+	"context"
+	"fmt"
+	"io"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getDynamicDataPath = "/latest/dynamic"
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) {
+	if params == nil {
+		params = &GetDynamicDataInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns,
+		addGetDynamicDataMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetDynamicDataOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+// GetDynamicDataInput provides the input parameters for the GetDynamicData
+// operation.
+type GetDynamicDataInput struct {
+	// The relative dynamic data path to retrieve. Can be empty string to
+	// retrieve a response containing a new line separated list of dynamic data
+	// resources available.
+	//
+	// Must not include the dynamic data base path.
+	//
+	// May include leading slash. If Path includes trailing slash the trailing
+	// slash will be included in the request for the resource.
+	Path string
+}
+
+// GetDynamicDataOutput provides the output parameters for the GetDynamicData
+// operation.
+type GetDynamicDataOutput struct {
+	Content io.ReadCloser
+
+	ResultMetadata middleware.Metadata
+}
+
+func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error {
+	return addAPIRequestMiddleware(stack,
+		options,
+		"GetDynamicData",
+		buildGetDynamicDataPath,
+		buildGetDynamicDataOutput)
+}
+
+func buildGetDynamicDataPath(params interface{}) (string, error) {
+	p, ok := params.(*GetDynamicDataInput)
+	if !ok {
+		return "", fmt.Errorf("unknown parameter type %T", params)
+	}
+
+	return appendURIPath(getDynamicDataPath, p.Path), nil
+}
+
+func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) {
+	return &GetDynamicDataOutput{
+		Content: resp.Body,
+	}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,103 @@
+package imds
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/aws/smithy-go"
+	smithyio "github.com/aws/smithy-go/io"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getIAMInfoPath = getMetadataPath + "/iam/info"
+
+// GetIAMInfo retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetIAMInfo(
+	ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options),
+) (
+	*GetIAMInfoOutput, error,
+) {
+	if params == nil {
+		params = &GetIAMInfoInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns,
+		addGetIAMInfoMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetIAMInfoOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+// GetIAMInfoInput provides the input parameters for GetIAMInfo operation.
+type GetIAMInfoInput struct{}
+
+// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation.
+type GetIAMInfoOutput struct {
+	IAMInfo
+
+	ResultMetadata middleware.Metadata
+}
+
+func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error {
+	return addAPIRequestMiddleware(stack,
+		options,
+		"GetIAMInfo",
+		buildGetIAMInfoPath,
+		buildGetIAMInfoOutput,
+	)
+}
+
+func buildGetIAMInfoPath(params interface{}) (string, error) {
+	return getIAMInfoPath, nil
+}
+
+func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) {
+	defer func() {
+		closeErr := resp.Body.Close()
+		if err == nil {
+			err = closeErr
+		} else if closeErr != nil {
+			err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err)
+		}
+	}()
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(resp.Body, ringBuffer)
+
+	imdsResult := &GetIAMInfoOutput{}
+	if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil {
+		return nil, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode instance identity document, %w", err),
+			Snapshot: ringBuffer.Bytes(),
+		}
+	}
+	// Any code other success is an error
+	if !strings.EqualFold(imdsResult.Code, "success") {
+		return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s",
+			imdsResult.Code)
+	}
+
+	return imdsResult, nil
+}
+
+// IAMInfo provides the shape for unmarshaling an IAM info from the metadata
+// API.
+type IAMInfo struct {
+	Code               string
+	LastUpdated        time.Time
+	InstanceProfileArn string
+	InstanceProfileID  string
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,110 @@
+package imds
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/aws/smithy-go"
+	smithyio "github.com/aws/smithy-go/io"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document"
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetInstanceIdentityDocument(
+	ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options),
+) (
+	*GetInstanceIdentityDocumentOutput, error,
+) {
+	if params == nil {
+		params = &GetInstanceIdentityDocumentInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns,
+		addGetInstanceIdentityDocumentMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetInstanceIdentityDocumentOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+// GetInstanceIdentityDocumentInput provides the input parameters for
+// GetInstanceIdentityDocument operation.
+type GetInstanceIdentityDocumentInput struct{}
+
+// GetInstanceIdentityDocumentOutput provides the output parameters for
+// GetInstanceIdentityDocument operation.
+type GetInstanceIdentityDocumentOutput struct {
+	InstanceIdentityDocument
+
+	ResultMetadata middleware.Metadata
+}
+
+func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error {
+	return addAPIRequestMiddleware(stack,
+		options,
+		"GetInstanceIdentityDocument",
+		buildGetInstanceIdentityDocumentPath,
+		buildGetInstanceIdentityDocumentOutput,
+	)
+}
+
+func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) {
+	return getInstanceIdentityDocumentPath, nil
+}
+
+func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) {
+	defer func() {
+		closeErr := resp.Body.Close()
+		if err == nil {
+			err = closeErr
+		} else if closeErr != nil {
+			err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err)
+		}
+	}()
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(resp.Body, ringBuffer)
+
+	output := &GetInstanceIdentityDocumentOutput{}
+	if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil {
+		return nil, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode instance identity document, %w", err),
+			Snapshot: ringBuffer.Bytes(),
+		}
+	}
+
+	return output, nil
+}
+
+// InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type InstanceIdentityDocument struct {
+	DevpayProductCodes      []string  `json:"devpayProductCodes"`
+	MarketplaceProductCodes []string  `json:"marketplaceProductCodes"`
+	AvailabilityZone        string    `json:"availabilityZone"`
+	PrivateIP               string    `json:"privateIp"`
+	Version                 string    `json:"version"`
+	Region                  string    `json:"region"`
+	InstanceID              string    `json:"instanceId"`
+	BillingProducts         []string  `json:"billingProducts"`
+	InstanceType            string    `json:"instanceType"`
+	AccountID               string    `json:"accountId"`
+	PendingTime             time.Time `json:"pendingTime"`
+	ImageID                 string    `json:"imageId"`
+	KernelID                string    `json:"kernelId"`
+	RamdiskID               string    `json:"ramdiskId"`
+	Architecture            string    `json:"architecture"`
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,77 @@
+package imds
+
+import (
+	"context"
+	"fmt"
+	"io"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getMetadataPath = "/latest/meta-data"
+
+// GetMetadata uses the path provided to request information from the Amazon
+// EC2 Instance Metadata Service. The content will be returned as a string, or
+// error if the request failed.
+func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) {
+	if params == nil {
+		params = &GetMetadataInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns,
+		addGetMetadataMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetMetadataOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+// GetMetadataInput provides the input parameters for the GetMetadata
+// operation.
+type GetMetadataInput struct {
+	// The relative metadata path to retrieve. Can be empty string to retrieve
+	// a response containing a new line separated list of metadata resources
+	// available.
+	//
+	// Must not include the metadata base path.
+	//
+	// May include leading slash. If Path includes trailing slash the trailing slash
+	// will be included in the request for the resource.
+	Path string
+}
+
+// GetMetadataOutput provides the output parameters for the GetMetadata
+// operation.
+type GetMetadataOutput struct {
+	Content io.ReadCloser
+
+	ResultMetadata middleware.Metadata
+}
+
+func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error {
+	return addAPIRequestMiddleware(stack,
+		options,
+		"GetMetadata",
+		buildGetMetadataPath,
+		buildGetMetadataOutput)
+}
+
+func buildGetMetadataPath(params interface{}) (string, error) {
+	p, ok := params.(*GetMetadataInput)
+	if !ok {
+		return "", fmt.Errorf("unknown parameter type %T", params)
+	}
+
+	return appendURIPath(getMetadataPath, p.Path), nil
+}
+
+func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) {
+	return &GetMetadataOutput{
+		Content: resp.Body,
+	}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,73 @@
+package imds
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// GetRegion retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetRegion(
+	ctx context.Context, params *GetRegionInput, optFns ...func(*Options),
+) (
+	*GetRegionOutput, error,
+) {
+	if params == nil {
+		params = &GetRegionInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns,
+		addGetRegionMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetRegionOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+// GetRegionInput provides the input parameters for GetRegion operation.
+type GetRegionInput struct{}
+
+// GetRegionOutput provides the output parameters for GetRegion operation.
+type GetRegionOutput struct {
+	Region string
+
+	ResultMetadata middleware.Metadata
+}
+
+func addGetRegionMiddleware(stack *middleware.Stack, options Options) error {
+	return addAPIRequestMiddleware(stack,
+		options,
+		"GetRegion",
+		buildGetInstanceIdentityDocumentPath,
+		buildGetRegionOutput,
+	)
+}
+
+func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) {
+	out, err := buildGetInstanceIdentityDocumentOutput(resp)
+	if err != nil {
+		return nil, err
+	}
+
+	result, ok := out.(*GetInstanceIdentityDocumentOutput)
+	if !ok {
+		return nil, fmt.Errorf("unexpected instance identity document type, %T", out)
+	}
+
+	region := result.Region
+	if len(region) == 0 {
+		return "", fmt.Errorf("instance metadata did not return a region value")
+	}
+
+	return &GetRegionOutput{
+		Region: region,
+	}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,119 @@
+package imds
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getTokenPath = "/latest/api/token"
+const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds"
+
+// getToken uses the duration to return a token for EC2 IMDS, or an error if
+// the request failed.
+func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) {
+	if params == nil {
+		params = &getTokenInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns,
+		addGetTokenMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*getTokenOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type getTokenInput struct {
+	TokenTTL time.Duration
+}
+
+type getTokenOutput struct {
+	Token    string
+	TokenTTL time.Duration
+
+	ResultMetadata middleware.Metadata
+}
+
+func addGetTokenMiddleware(stack *middleware.Stack, options Options) error {
+	err := addRequestMiddleware(stack,
+		options,
+		"PUT",
+		"GetToken",
+		buildGetTokenPath,
+		buildGetTokenOutput)
+	if err != nil {
+		return err
+	}
+
+	err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func buildGetTokenPath(interface{}) (string, error) {
+	return getTokenPath, nil
+}
+
+func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) {
+	defer func() {
+		closeErr := resp.Body.Close()
+		if err == nil {
+			err = closeErr
+		} else if closeErr != nil {
+			err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err)
+		}
+	}()
+
+	ttlHeader := resp.Header.Get(tokenTTLHeader)
+	tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64)
+	if err != nil {
+		return nil, fmt.Errorf("unable to parse API token, %w", err)
+	}
+
+	var token strings.Builder
+	if _, err = io.Copy(&token, resp.Body); err != nil {
+		return nil, fmt.Errorf("unable to read API token, %w", err)
+	}
+
+	return &getTokenOutput{
+		Token:    token.String(),
+		TokenTTL: time.Duration(tokenTTL) * time.Second,
+	}, nil
+}
+
+type tokenTTLRequestHeader struct{}
+
+func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" }
+func (*tokenTTLRequestHeader) HandleSerialize(
+	ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request)
+	}
+
+	input, ok := in.Parameters.(*getTokenInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters)
+	}
+
+	req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second)))
+
+	return next.HandleSerialize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,61 @@
+package imds
+
+import (
+	"context"
+	"io"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getUserDataPath = "/latest/user-data"
+
+// GetUserData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) {
+	if params == nil {
+		params = &GetUserDataInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns,
+		addGetUserDataMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetUserDataOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+// GetUserDataInput provides the input parameters for the GetUserData
+// operation.
+type GetUserDataInput struct{}
+
+// GetUserDataOutput provides the output parameters for the GetUserData
+// operation.
+type GetUserDataOutput struct {
+	Content io.ReadCloser
+
+	ResultMetadata middleware.Metadata
+}
+
+func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error {
+	return addAPIRequestMiddleware(stack,
+		options,
+		"GetUserData",
+		buildGetUserDataPath,
+		buildGetUserDataOutput)
+}
+
+func buildGetUserDataPath(params interface{}) (string, error) {
+	return getUserDataPath, nil
+}
+
+func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) {
+	return &GetUserDataOutput{
+		Content: resp.Body,
+	}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,48 @@
+package imds
+
+import (
+	"context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+	options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+	return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+	return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+	operation string
+	options   Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+	return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,12 @@
+// Package imds provides the API client for interacting with the Amazon EC2
+// Instance Metadata Service.
+//
+// All Client operation calls have a default timeout. If the operation is not
+// completed before this timeout expires, the operation will be canceled. This
+// timeout can be overridden through the following:
+//   - Set the options flag DisableDefaultTimeout
+//   - Provide a Context with a timeout or deadline with calling the client's operations.
+//
+// See the EC2 IMDS user guide for more information on using the API.
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
+package imds
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+package imds
+
+import (
+	"context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+	options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+	return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package imds
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.16.11"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,114 @@
+package config
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ClientEnableState provides an enumeration if the client is enabled,
+// disabled, or default behavior.
+type ClientEnableState uint
+
+// Enumeration values for ClientEnableState
+const (
+	ClientDefaultEnableState ClientEnableState = iota
+	ClientDisabled
+	ClientEnabled
+)
+
+// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode
+type EndpointModeState uint
+
+// Enumeration values for ClientEnableState
+const (
+	EndpointModeStateUnset EndpointModeState = iota
+	EndpointModeStateIPv4
+	EndpointModeStateIPv6
+)
+
+// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset
+func (e *EndpointModeState) SetFromString(v string) error {
+	v = strings.TrimSpace(v)
+
+	switch {
+	case len(v) == 0:
+		*e = EndpointModeStateUnset
+	case strings.EqualFold(v, "IPv6"):
+		*e = EndpointModeStateIPv6
+	case strings.EqualFold(v, "IPv4"):
+		*e = EndpointModeStateIPv4
+	default:
+		return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4")
+	}
+	return nil
+}
+
+// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled.
+type ClientEnableStateResolver interface {
+	GetEC2IMDSClientEnableState() (ClientEnableState, bool, error)
+}
+
+// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration.
+type EndpointModeResolver interface {
+	GetEC2IMDSEndpointMode() (EndpointModeState, bool, error)
+}
+
+// EndpointResolver is a config resolver interface for retrieving the endpoint.
+type EndpointResolver interface {
+	GetEC2IMDSEndpoint() (string, bool, error)
+}
+
+type v1FallbackDisabledResolver interface {
+	GetEC2IMDSV1FallbackDisabled() (bool, bool)
+}
+
+// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources.
+func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) {
+	for _, source := range sources {
+		if resolver, ok := source.(ClientEnableStateResolver); ok {
+			value, found, err = resolver.GetEC2IMDSClientEnableState()
+			if err != nil || found {
+				return value, found, err
+			}
+		}
+	}
+	return value, found, err
+}
+
+// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources.
+func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) {
+	for _, source := range sources {
+		if resolver, ok := source.(EndpointModeResolver); ok {
+			value, found, err = resolver.GetEC2IMDSEndpointMode()
+			if err != nil || found {
+				return value, found, err
+			}
+		}
+	}
+	return value, found, err
+}
+
+// ResolveEndpointConfig resolves the endpoint from a list of configuration sources.
+func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) {
+	for _, source := range sources {
+		if resolver, ok := source.(EndpointResolver); ok {
+			value, found, err = resolver.GetEC2IMDSEndpoint()
+			if err != nil || found {
+				return value, found, err
+			}
+		}
+	}
+	return value, found, err
+}
+
+// ResolveV1FallbackDisabled ...
+func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) {
+	for _, source := range sources {
+		if resolver, ok := source.(v1FallbackDisabledResolver); ok {
+			if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found {
+				return v, true
+			}
+		}
+	}
+	return false, false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,313 @@
+package imds
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io/ioutil"
+	"net/url"
+	"path"
+	"time"
+
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/aws/retry"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func addAPIRequestMiddleware(stack *middleware.Stack,
+	options Options,
+	operation string,
+	getPath func(interface{}) (string, error),
+	getOutput func(*smithyhttp.Response) (interface{}, error),
+) (err error) {
+	err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput)
+	if err != nil {
+		return err
+	}
+
+	// Token Serializer build and state management.
+	if !options.disableAPIToken {
+		err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After)
+		if err != nil {
+			return err
+		}
+
+		err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func addRequestMiddleware(stack *middleware.Stack,
+	options Options,
+	method string,
+	operation string,
+	getPath func(interface{}) (string, error),
+	getOutput func(*smithyhttp.Response) (interface{}, error),
+) (err error) {
+	err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack)
+	if err != nil {
+		return err
+	}
+
+	// Operation timeout
+	err = stack.Initialize.Add(&operationTimeout{
+		Disabled:       options.DisableDefaultTimeout,
+		DefaultTimeout: defaultOperationTimeout,
+	}, middleware.Before)
+	if err != nil {
+		return err
+	}
+
+	// Operation Serializer
+	err = stack.Serialize.Add(&serializeRequest{
+		GetPath: getPath,
+		Method:  method,
+	}, middleware.After)
+	if err != nil {
+		return err
+	}
+
+	// Operation endpoint resolver
+	err = stack.Serialize.Insert(&resolveEndpoint{
+		Endpoint:     options.Endpoint,
+		EndpointMode: options.EndpointMode,
+	}, "OperationSerializer", middleware.Before)
+	if err != nil {
+		return err
+	}
+
+	// Operation Deserializer
+	err = stack.Deserialize.Add(&deserializeResponse{
+		GetOutput: getOutput,
+	}, middleware.After)
+	if err != nil {
+		return err
+	}
+
+	err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+		LogRequest:          options.ClientLogMode.IsRequest(),
+		LogRequestWithBody:  options.ClientLogMode.IsRequestWithBody(),
+		LogResponse:         options.ClientLogMode.IsResponse(),
+		LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(),
+	}, middleware.After)
+	if err != nil {
+		return err
+	}
+
+	err = addSetLoggerMiddleware(stack, options)
+	if err != nil {
+		return err
+	}
+
+	if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil {
+		return fmt.Errorf("add protocol finalizers: %w", err)
+	}
+
+	// Retry support
+	return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{
+		Retryer:          options.Retryer,
+		LogRetryAttempts: options.ClientLogMode.IsRetries(),
+	})
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+	return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+type serializeRequest struct {
+	GetPath func(interface{}) (string, error)
+	Method  string
+}
+
+func (*serializeRequest) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *serializeRequest) HandleSerialize(
+	ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	reqPath, err := m.GetPath(in.Parameters)
+	if err != nil {
+		return out, metadata, fmt.Errorf("unable to get request URL path, %w", err)
+	}
+
+	request.Request.URL.Path = reqPath
+	request.Request.Method = m.Method
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type deserializeResponse struct {
+	GetOutput func(*smithyhttp.Response) (interface{}, error)
+}
+
+func (*deserializeResponse) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *deserializeResponse) HandleDeserialize(
+	ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	resp, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, fmt.Errorf(
+			"unexpected transport response type, %T, want %T", out.RawResponse, resp)
+	}
+	defer resp.Body.Close()
+
+	// read the full body so that any operation timeouts cleanup will not race
+	// the body being read.
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return out, metadata, fmt.Errorf("read response body failed, %w", err)
+	}
+	resp.Body = ioutil.NopCloser(bytes.NewReader(body))
+
+	// Anything that's not 200 |< 300 is error
+	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+		return out, metadata, &smithyhttp.ResponseError{
+			Response: resp,
+			Err:      fmt.Errorf("request to EC2 IMDS failed"),
+		}
+	}
+
+	result, err := m.GetOutput(resp)
+	if err != nil {
+		return out, metadata, fmt.Errorf(
+			"unable to get deserialized result for response, %w", err,
+		)
+	}
+	out.Result = result
+
+	return out, metadata, err
+}
+
+type resolveEndpoint struct {
+	Endpoint     string
+	EndpointMode EndpointModeState
+}
+
+func (*resolveEndpoint) ID() string {
+	return "ResolveEndpoint"
+}
+
+func (m *resolveEndpoint) HandleSerialize(
+	ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	var endpoint string
+	if len(m.Endpoint) > 0 {
+		endpoint = m.Endpoint
+	} else {
+		switch m.EndpointMode {
+		case EndpointModeStateIPv6:
+			endpoint = defaultIPv6Endpoint
+		case EndpointModeStateIPv4:
+			fallthrough
+		case EndpointModeStateUnset:
+			endpoint = defaultIPv4Endpoint
+		default:
+			return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode")
+		}
+	}
+
+	req.URL, err = url.Parse(endpoint)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+	}
+
+	return next.HandleSerialize(ctx, in)
+}
+
+const (
+	defaultOperationTimeout = 5 * time.Second
+)
+
+// operationTimeout adds a timeout on the middleware stack if the Context the
+// stack was called with does not have a deadline. The next middleware must
+// complete before the timeout, or the context will be canceled.
+//
+// If DefaultTimeout is zero, no default timeout will be used if the Context
+// does not have a timeout.
+//
+// The next middleware must also ensure that any resources that are also
+// canceled by the stack's context are completely consumed before returning.
+// Otherwise the timeout cleanup will race the resource being consumed
+// upstream.
+type operationTimeout struct {
+	Disabled       bool
+	DefaultTimeout time.Duration
+}
+
+func (*operationTimeout) ID() string { return "OperationTimeout" }
+
+func (m *operationTimeout) HandleInitialize(
+	ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+	output middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	if m.Disabled {
+		return next.HandleInitialize(ctx, input)
+	}
+
+	if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 {
+		var cancelFn func()
+		ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout)
+		defer cancelFn()
+	}
+
+	return next.HandleInitialize(ctx, input)
+}
+
+// appendURIPath joins a URI path component to the existing path with `/`
+// separators between the path components. If the path being added ends with a
+// trailing `/` that slash will be maintained.
+func appendURIPath(base, add string) string {
+	reqPath := path.Join(base, add)
+	if len(add) != 0 && add[len(add)-1] == '/' {
+		reqPath += "/"
+	}
+	return reqPath
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+	if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+		return fmt.Errorf("add ResolveAuthScheme: %w", err)
+	}
+	if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+		return fmt.Errorf("add GetIdentity: %w", err)
+	}
+	if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+		return fmt.Errorf("add ResolveEndpointV2: %w", err)
+	}
+	if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
+		return fmt.Errorf("add Signing: %w", err)
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,261 @@
+package imds
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/logging"
+	"net/http"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const (
+	// Headers for Token and TTL
+	tokenHeader     = "x-aws-ec2-metadata-token"
+	defaultTokenTTL = 5 * time.Minute
+)
+
+type tokenProvider struct {
+	client   *Client
+	tokenTTL time.Duration
+
+	token    *apiToken
+	tokenMux sync.RWMutex
+
+	disabled uint32 // Atomic updated
+}
+
+func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider {
+	return &tokenProvider{
+		client:   client,
+		tokenTTL: ttl,
+	}
+}
+
+// apiToken provides the API token used by all operation calls for th EC2
+// Instance metadata service.
+type apiToken struct {
+	token   string
+	expires time.Time
+}
+
+var timeNow = time.Now
+
+// Expired returns if the token is expired.
+func (t *apiToken) Expired() bool {
+	// Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry
+	// time is always based on reported wall-clock time.
+	return timeNow().Round(0).After(t.expires)
+}
+
+func (t *tokenProvider) ID() string { return "APITokenProvider" }
+
+// HandleFinalize is the finalize stack middleware, that if the token provider is
+// enabled, will attempt to add the cached API token to the request. If the API
+// token is not cached, it will be retrieved in a separate API call, getToken.
+//
+// For retry attempts, handler must be added after attempt retryer.
+//
+// If request for getToken fails the token provider may be disabled from future
+// requests, depending on the response status code.
+func (t *tokenProvider) HandleFinalize(
+	ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	if t.fallbackEnabled() && !t.enabled() {
+		// short-circuits to insecure data flow if token provider is disabled.
+		return next.HandleFinalize(ctx, input)
+	}
+
+	req, ok := input.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request)
+	}
+
+	tok, err := t.getToken(ctx)
+	if err != nil {
+		// If the error allows the token to downgrade to insecure flow allow that.
+		var bypassErr *bypassTokenRetrievalError
+		if errors.As(err, &bypassErr) {
+			return next.HandleFinalize(ctx, input)
+		}
+
+		return out, metadata, fmt.Errorf("failed to get API token, %w", err)
+	}
+
+	req.Header.Set(tokenHeader, tok.token)
+
+	return next.HandleFinalize(ctx, input)
+}
+
+// HandleDeserialize is the deserialize stack middleware for determining if the
+// operation the token provider is decorating failed because of a 401
+// unauthorized status code. If the operation failed for that reason the token
+// provider needs to be re-enabled so that it can start adding the API token to
+// operation calls.
+func (t *tokenProvider) HandleDeserialize(
+	ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, input)
+	if err == nil {
+		return out, metadata, err
+	}
+
+	resp, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse)
+	}
+
+	if resp.StatusCode == http.StatusUnauthorized { // unauthorized
+		t.enable()
+		err = &retryableError{Err: err, isRetryable: true}
+	}
+
+	return out, metadata, err
+}
+
+func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) {
+	if t.fallbackEnabled() && !t.enabled() {
+		return nil, &bypassTokenRetrievalError{
+			Err: fmt.Errorf("cannot get API token, provider disabled"),
+		}
+	}
+
+	t.tokenMux.RLock()
+	tok = t.token
+	t.tokenMux.RUnlock()
+
+	if tok != nil && !tok.Expired() {
+		return tok, nil
+	}
+
+	tok, err = t.updateToken(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	return tok, nil
+}
+
+func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
+	t.tokenMux.Lock()
+	defer t.tokenMux.Unlock()
+
+	// Prevent multiple requests to update retrieving the token.
+	if t.token != nil && !t.token.Expired() {
+		tok := t.token
+		return tok, nil
+	}
+
+	result, err := t.client.getToken(ctx, &getTokenInput{
+		TokenTTL: t.tokenTTL,
+	})
+	if err != nil {
+		var statusErr interface{ HTTPStatusCode() int }
+		if errors.As(err, &statusErr) {
+			switch statusErr.HTTPStatusCode() {
+			// Disable future get token if failed because of 403, 404, or 405
+			case http.StatusForbidden,
+				http.StatusNotFound,
+				http.StatusMethodNotAllowed:
+
+				if t.fallbackEnabled() {
+					logger := middleware.GetLogger(ctx)
+					logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err)
+					t.disable()
+				}
+
+			// 400 errors are terminal, and need to be upstreamed
+			case http.StatusBadRequest:
+				return nil, err
+			}
+		}
+
+		// Disable if request send failed or timed out getting response
+		var re *smithyhttp.RequestSendError
+		var ce *smithy.CanceledError
+		if errors.As(err, &re) || errors.As(err, &ce) {
+			atomic.StoreUint32(&t.disabled, 1)
+		}
+
+		if !t.fallbackEnabled() {
+			// NOTE: getToken() is an implementation detail of some outer operation
+			// (e.g. GetMetadata). It has its own retries that have already been exhausted.
+			// Mark the underlying error as a terminal error.
+			err = &retryableError{Err: err, isRetryable: false}
+			return nil, err
+		}
+
+		// Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request
+		// and allow the request to proceed. Future requests _may_ re-attempt fetching a
+		// token if not disabled.
+		return nil, &bypassTokenRetrievalError{Err: err}
+	}
+
+	tok := &apiToken{
+		token:   result.Token,
+		expires: timeNow().Add(result.TokenTTL),
+	}
+	t.token = tok
+
+	return tok, nil
+}
+
+// enabled returns if the token provider is current enabled or not.
+func (t *tokenProvider) enabled() bool {
+	return atomic.LoadUint32(&t.disabled) == 0
+}
+
+// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise
+func (t *tokenProvider) fallbackEnabled() bool {
+	switch t.client.options.EnableFallback {
+	case aws.FalseTernary:
+		return false
+	default:
+		return true
+	}
+}
+
+// disable disables the token provider and it will no longer attempt to inject
+// the token, nor request updates.
+func (t *tokenProvider) disable() {
+	atomic.StoreUint32(&t.disabled, 1)
+}
+
+// enable enables the token provide to start refreshing tokens, and adding them
+// to the pending request.
+func (t *tokenProvider) enable() {
+	t.tokenMux.Lock()
+	t.token = nil
+	t.tokenMux.Unlock()
+	atomic.StoreUint32(&t.disabled, 0)
+}
+
+type bypassTokenRetrievalError struct {
+	Err error
+}
+
+func (e *bypassTokenRetrievalError) Error() string {
+	return fmt.Sprintf("bypass token retrieval, %v", e.Err)
+}
+
+func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
+
+type retryableError struct {
+	Err         error
+	isRetryable bool
+}
+
+func (e *retryableError) RetryableError() bool { return e.isRetryable }
+
+func (e *retryableError) Error() string { return e.Err.Error() }
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,45 @@
+package auth
+
+import (
+	"github.com/aws/smithy-go/auth"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// HTTPAuthScheme is the SDK's internal implementation of smithyhttp.AuthScheme
+// for pre-existing implementations where the signer was added to client
+// config. SDK clients will key off of this type and ensure per-operation
+// updates to those signers persist on the scheme itself.
+type HTTPAuthScheme struct {
+	schemeID string
+	signer   smithyhttp.Signer
+}
+
+var _ smithyhttp.AuthScheme = (*HTTPAuthScheme)(nil)
+
+// NewHTTPAuthScheme returns an auth scheme instance with the given config.
+func NewHTTPAuthScheme(schemeID string, signer smithyhttp.Signer) *HTTPAuthScheme {
+	return &HTTPAuthScheme{
+		schemeID: schemeID,
+		signer:   signer,
+	}
+}
+
+// SchemeID identifies the auth scheme.
+func (s *HTTPAuthScheme) SchemeID() string {
+	return s.schemeID
+}
+
+// IdentityResolver gets the identity resolver for the auth scheme.
+func (s *HTTPAuthScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver {
+	return o.GetIdentityResolver(s.schemeID)
+}
+
+// Signer gets the signer for the auth scheme.
+func (s *HTTPAuthScheme) Signer() smithyhttp.Signer {
+	return s.signer
+}
+
+// WithSigner returns a new instance of the auth scheme with the updated signer.
+func (s *HTTPAuthScheme) WithSigner(signer smithyhttp.Signer) *HTTPAuthScheme {
+	return NewHTTPAuthScheme(s.schemeID, signer)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,191 @@
+package auth
+
+import (
+	"context"
+	"fmt"
+
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// SigV4 is a constant representing
+// Authentication Scheme Signature Version 4
+const SigV4 = "sigv4"
+
+// SigV4A is a constant representing
+// Authentication Scheme Signature Version 4A
+const SigV4A = "sigv4a"
+
+// SigV4S3Express identifies the S3 S3Express auth scheme.
+const SigV4S3Express = "sigv4-s3express"
+
+// None is a constant representing the
+// None Authentication Scheme
+const None = "none"
+
+// SupportedSchemes is a data structure
+// that indicates the list of supported AWS
+// authentication schemes
+var SupportedSchemes = map[string]bool{
+	SigV4:          true,
+	SigV4A:         true,
+	SigV4S3Express: true,
+	None:           true,
+}
+
+// AuthenticationScheme is a representation of
+// AWS authentication schemes
+type AuthenticationScheme interface {
+	isAuthenticationScheme()
+}
+
+// AuthenticationSchemeV4 is a AWS SigV4 representation
+type AuthenticationSchemeV4 struct {
+	Name                  string
+	SigningName           *string
+	SigningRegion         *string
+	DisableDoubleEncoding *bool
+}
+
+func (a *AuthenticationSchemeV4) isAuthenticationScheme() {}
+
+// AuthenticationSchemeV4A is a AWS SigV4A representation
+type AuthenticationSchemeV4A struct {
+	Name                  string
+	SigningName           *string
+	SigningRegionSet      []string
+	DisableDoubleEncoding *bool
+}
+
+func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {}
+
+// AuthenticationSchemeNone is a representation for the none auth scheme
+type AuthenticationSchemeNone struct{}
+
+func (a *AuthenticationSchemeNone) isAuthenticationScheme() {}
+
+// NoAuthenticationSchemesFoundError is used in signaling
+// that no authentication schemes have been specified.
+type NoAuthenticationSchemesFoundError struct{}
+
+func (e *NoAuthenticationSchemesFoundError) Error() string {
+	return fmt.Sprint("No authentication schemes specified.")
+}
+
+// UnSupportedAuthenticationSchemeSpecifiedError is used in
+// signaling that only unsupported authentication schemes
+// were specified.
+type UnSupportedAuthenticationSchemeSpecifiedError struct {
+	UnsupportedSchemes []string
+}
+
+func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string {
+	return fmt.Sprint("Unsupported authentication scheme specified.")
+}
+
+// GetAuthenticationSchemes extracts the relevant authentication scheme data
+// into a custom strongly typed Go data structure.
+func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) {
+	var result []AuthenticationScheme
+	if !p.Has("authSchemes") {
+		return nil, &NoAuthenticationSchemesFoundError{}
+	}
+
+	authSchemes, _ := p.Get("authSchemes").([]interface{})
+
+	var unsupportedSchemes []string
+	for _, scheme := range authSchemes {
+		authScheme, _ := scheme.(map[string]interface{})
+
+		version := authScheme["name"].(string)
+		switch version {
+		case SigV4, SigV4S3Express:
+			v4Scheme := AuthenticationSchemeV4{
+				Name:                  version,
+				SigningName:           getSigningName(authScheme),
+				SigningRegion:         getSigningRegion(authScheme),
+				DisableDoubleEncoding: getDisableDoubleEncoding(authScheme),
+			}
+			result = append(result, AuthenticationScheme(&v4Scheme))
+		case SigV4A:
+			v4aScheme := AuthenticationSchemeV4A{
+				Name:                  SigV4A,
+				SigningName:           getSigningName(authScheme),
+				SigningRegionSet:      getSigningRegionSet(authScheme),
+				DisableDoubleEncoding: getDisableDoubleEncoding(authScheme),
+			}
+			result = append(result, AuthenticationScheme(&v4aScheme))
+		case None:
+			noneScheme := AuthenticationSchemeNone{}
+			result = append(result, AuthenticationScheme(&noneScheme))
+		default:
+			unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string))
+			continue
+		}
+	}
+
+	if len(result) == 0 {
+		return nil, &UnSupportedAuthenticationSchemeSpecifiedError{
+			UnsupportedSchemes: unsupportedSchemes,
+		}
+	}
+
+	return result, nil
+}
+
+type disableDoubleEncoding struct{}
+
+// SetDisableDoubleEncoding sets or modifies the disable double encoding option
+// on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context {
+	return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value)
+}
+
+// GetDisableDoubleEncoding retrieves the disable double encoding option
+// from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) {
+	value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool)
+	return value, ok
+}
+
+func getSigningName(authScheme map[string]interface{}) *string {
+	signingName, ok := authScheme["signingName"].(string)
+	if !ok || signingName == "" {
+		return nil
+	}
+	return &signingName
+}
+
+func getSigningRegionSet(authScheme map[string]interface{}) []string {
+	untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{})
+	if !ok {
+		return nil
+	}
+	signingRegionSet := []string{}
+	for _, item := range untypedSigningRegionSet {
+		signingRegionSet = append(signingRegionSet, item.(string))
+	}
+	return signingRegionSet
+}
+
+func getSigningRegion(authScheme map[string]interface{}) *string {
+	signingRegion, ok := authScheme["signingRegion"].(string)
+	if !ok || signingRegion == "" {
+		return nil
+	}
+	return &signingRegion
+}
+
+func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool {
+	disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool)
+	if !ok {
+		return nil
+	}
+	return &disableDoubleEncoding
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,43 @@
+package smithy
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/auth/bearer"
+)
+
+// BearerTokenAdapter adapts smithy bearer.Token to smithy auth.Identity.
+type BearerTokenAdapter struct {
+	Token bearer.Token
+}
+
+var _ auth.Identity = (*BearerTokenAdapter)(nil)
+
+// Expiration returns the time of expiration for the token.
+func (v *BearerTokenAdapter) Expiration() time.Time {
+	return v.Token.Expires
+}
+
+// BearerTokenProviderAdapter adapts smithy bearer.TokenProvider to smithy
+// auth.IdentityResolver.
+type BearerTokenProviderAdapter struct {
+	Provider bearer.TokenProvider
+}
+
+var _ (auth.IdentityResolver) = (*BearerTokenProviderAdapter)(nil)
+
+// GetIdentity retrieves a bearer token using the underlying provider.
+func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) (
+	auth.Identity, error,
+) {
+	token, err := v.Provider.RetrieveBearerToken(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("get token: %w", err)
+	}
+
+	return &BearerTokenAdapter{Token: token}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,35 @@
+package smithy
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/auth/bearer"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// BearerTokenSignerAdapter adapts smithy bearer.Signer to smithy http
+// auth.Signer.
+type BearerTokenSignerAdapter struct {
+	Signer bearer.Signer
+}
+
+var _ (smithyhttp.Signer) = (*BearerTokenSignerAdapter)(nil)
+
+// SignRequest signs the request with the provided bearer token.
+func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, _ smithy.Properties) error {
+	ca, ok := identity.(*BearerTokenAdapter)
+	if !ok {
+		return fmt.Errorf("unexpected identity type: %T", identity)
+	}
+
+	signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r)
+	if err != nil {
+		return fmt.Errorf("sign request: %w", err)
+	}
+
+	*r = *signed.(*smithyhttp.Request)
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,46 @@
+package smithy
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/auth"
+)
+
+// CredentialsAdapter adapts aws.Credentials to auth.Identity.
+type CredentialsAdapter struct {
+	Credentials aws.Credentials
+}
+
+var _ auth.Identity = (*CredentialsAdapter)(nil)
+
+// Expiration returns the time of expiration for the credentials.
+func (v *CredentialsAdapter) Expiration() time.Time {
+	return v.Credentials.Expires
+}
+
+// CredentialsProviderAdapter adapts aws.CredentialsProvider to auth.IdentityResolver.
+type CredentialsProviderAdapter struct {
+	Provider aws.CredentialsProvider
+}
+
+var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil)
+
+// GetIdentity retrieves AWS credentials using the underlying provider.
+func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) (
+	auth.Identity, error,
+) {
+	if v.Provider == nil {
+		return &CredentialsAdapter{Credentials: aws.Credentials{}}, nil
+	}
+
+	creds, err := v.Provider.Retrieve(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("get credentials: %w", err)
+	}
+
+	return &CredentialsAdapter{Credentials: creds}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2 @@
+// Package smithy adapts concrete AWS auth and signing types to the generic smithy versions.
+package smithy
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,57 @@
+package smithy
+
+import (
+	"context"
+	"fmt"
+
+	v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+	internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+	"github.com/aws/aws-sdk-go-v2/internal/sdk"
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/logging"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// V4SignerAdapter adapts v4.HTTPSigner to smithy http.Signer.
+type V4SignerAdapter struct {
+	Signer     v4.HTTPSigner
+	Logger     logging.Logger
+	LogSigning bool
+}
+
+var _ (smithyhttp.Signer) = (*V4SignerAdapter)(nil)
+
+// SignRequest signs the request with the provided identity.
+func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error {
+	ca, ok := identity.(*CredentialsAdapter)
+	if !ok {
+		return fmt.Errorf("unexpected identity type: %T", identity)
+	}
+
+	name, ok := smithyhttp.GetSigV4SigningName(&props)
+	if !ok {
+		return fmt.Errorf("sigv4 signing name is required")
+	}
+
+	region, ok := smithyhttp.GetSigV4SigningRegion(&props)
+	if !ok {
+		return fmt.Errorf("sigv4 signing region is required")
+	}
+
+	hash := v4.GetPayloadHash(ctx)
+	signingTime := sdk.NowTime()
+	skew := internalcontext.GetAttemptSkewContext(ctx)
+	signingTime = signingTime.Add(skew)
+	err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) {
+		o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props)
+
+		o.Logger = v.Logger
+		o.LogSigning = v.LogSigning
+	})
+	if err != nil {
+		return fmt.Errorf("sign http: %w", err)
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,320 @@
+# v1.3.15 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.14 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.13 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.12 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.11 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.10 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.9 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.8 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.5 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2024-03-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.43 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.42 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.41 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.40 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.39 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.38 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.37 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.36 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.35 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.34 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.33 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.32 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.31 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.30 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.29 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.28 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.27 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.26 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.25 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.24 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.23 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.22 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.21 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.20 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.19 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.18 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.17 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.16 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.15 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.14 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.13 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.12 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.11 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.10 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.9 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.8 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.7 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.6 (2022-03-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.5 (2022-02-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.4 (2022-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.3 (2022-01-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.7 (2021-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.6 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.5 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.4 (2021-08-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.2 (2021-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.1 (2021-07-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.0 (2021-06-25)
+
+* **Release**: Release new modules
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,65 @@
+package configsources
+
+import (
+	"context"
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value
+// for Enable Endpoint Discovery
+type EnableEndpointDiscoveryProvider interface {
+	GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error)
+}
+
+// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice.
+// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs,
+// and error if one is encountered.
+func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok {
+			value, found, err = p.GetEnableEndpointDiscovery(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint
+type UseDualStackEndpointProvider interface {
+	GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error)
+}
+
+// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice.
+// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
+func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(UseDualStackEndpointProvider); ok {
+			value, found, err = p.GetUseDualStackEndpoint(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint
+type UseFIPSEndpointProvider interface {
+	GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error)
+}
+
+// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice.
+// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
+func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(UseFIPSEndpointProvider); ok {
+			value, found, err = p.GetUseFIPSEndpoint(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,57 @@
+package configsources
+
+import (
+	"context"
+)
+
+// ServiceBaseEndpointProvider is needed to search for all providers
+// that provide a configured service endpoint
+type ServiceBaseEndpointProvider interface {
+	GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error)
+}
+
+// IgnoreConfiguredEndpointsProvider is needed to search for all providers
+// that provide a flag to disable configured endpoints.
+//
+// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because
+// service packages cannot import github.com/aws/aws-sdk-go-v2/config
+// due to result import cycle error.
+type IgnoreConfiguredEndpointsProvider interface {
+	GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error)
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+//
+// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because
+// service packages cannot import github.com/aws/aws-sdk-go-v2/config
+// due to result import cycle error.
+func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+	for _, cfg := range configs {
+		if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok {
+			value, found, err = p.GetIgnoreConfiguredEndpoints(ctx)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
+
+// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources
+// while allowing for configured endpoints to be disabled
+func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) {
+	if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val {
+		return "", false, nil
+	}
+
+	for _, cs := range configs {
+		if p, ok := cs.(ServiceBaseEndpointProvider); ok {
+			value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID)
+			if err != nil || found {
+				break
+			}
+		}
+	}
+	return
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package configsources
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.3.15"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,52 @@
+package context
+
+import (
+	"context"
+	"time"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+type s3BackendKey struct{}
+type checksumInputAlgorithmKey struct{}
+type clockSkew struct{}
+
+const (
+	// S3BackendS3Express identifies the S3Express backend
+	S3BackendS3Express = "S3Express"
+)
+
+// SetS3Backend stores the resolved endpoint backend within the request
+// context, which is required for a variety of custom S3 behaviors.
+func SetS3Backend(ctx context.Context, typ string) context.Context {
+	return middleware.WithStackValue(ctx, s3BackendKey{}, typ)
+}
+
+// GetS3Backend retrieves the stored endpoint backend within the context.
+func GetS3Backend(ctx context.Context) string {
+	v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string)
+	return v
+}
+
+// SetChecksumInputAlgorithm sets the request checksum algorithm on the
+// context.
+func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context {
+	return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value)
+}
+
+// GetChecksumInputAlgorithm returns the checksum algorithm from the context.
+func GetChecksumInputAlgorithm(ctx context.Context) string {
+	v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string)
+	return v
+}
+
+// SetAttemptSkewContext sets the clock skew value on the context
+func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context {
+	return middleware.WithStackValue(ctx, clockSkew{}, v)
+}
+
+// GetAttemptSkewContext gets the clock skew value from the context
+func GetAttemptSkewContext(ctx context.Context) time.Duration {
+	x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration)
+	return x
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,94 @@
+package awsrulesfn
+
+import (
+	"strings"
+)
+
+// ARN provides AWS ARN components broken out into a data structure.
+type ARN struct {
+	Partition  string
+	Service    string
+	Region     string
+	AccountId  string
+	ResourceId OptionalStringSlice
+}
+
+const (
+	arnDelimiters      = ":"
+	resourceDelimiters = "/:"
+	arnSections        = 6
+	arnPrefix          = "arn:"
+
+	// zero-indexed
+	sectionPartition = 1
+	sectionService   = 2
+	sectionRegion    = 3
+	sectionAccountID = 4
+	sectionResource  = 5
+)
+
+// ParseARN returns an [ARN] value parsed from the input string provided. If
+// the ARN cannot be parsed nil will be returned, and error added to
+// [ErrorCollector].
+func ParseARN(input string) *ARN {
+	if !strings.HasPrefix(input, arnPrefix) {
+		return nil
+	}
+
+	sections := strings.SplitN(input, arnDelimiters, arnSections)
+	if numSections := len(sections); numSections != arnSections {
+		return nil
+	}
+
+	if sections[sectionPartition] == "" {
+		return nil
+	}
+	if sections[sectionService] == "" {
+		return nil
+	}
+	if sections[sectionResource] == "" {
+		return nil
+	}
+
+	return &ARN{
+		Partition:  sections[sectionPartition],
+		Service:    sections[sectionService],
+		Region:     sections[sectionRegion],
+		AccountId:  sections[sectionAccountID],
+		ResourceId: splitResource(sections[sectionResource]),
+	}
+}
+
+// splitResource splits the resource components by the ARN resource delimiters.
+func splitResource(v string) []string {
+	var parts []string
+	var offset int
+
+	for offset <= len(v) {
+		idx := strings.IndexAny(v[offset:], "/:")
+		if idx < 0 {
+			parts = append(parts, v[offset:])
+			break
+		}
+		parts = append(parts, v[offset:idx+offset])
+		offset += idx + 1
+	}
+
+	return parts
+}
+
+// OptionalStringSlice provides a helper to safely get the index of a string
+// slice that may be out of bounds. Returns pointer to string if index is
+// valid. Otherwise returns nil.
+type OptionalStringSlice []string
+
+// Get returns a string pointer of the string at index i if the index is valid.
+// Otherwise returns nil.
+func (s OptionalStringSlice) Get(i int) *string {
+	if i < 0 || i >= len(s) {
+		return nil
+	}
+
+	v := s[i]
+	return &v
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,3 @@
+// Package awsrulesfn provides AWS focused endpoint rule functions for
+// evaluating endpoint resolution rules.
+package awsrulesfn
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,7 @@
+//go:build codegen
+// +build codegen
+
+package awsrulesfn
+
+//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go
+//go:generate gofmt -w -s .
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,51 @@
+package awsrulesfn
+
+import (
+	"net"
+	"strings"
+
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket
+// name and can be used with Amazon S3 virtual hosted style addressing. Similar
+// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label
+// must be [3:63] characters long, all lowercase, and not formatted as an IP
+// address.
+func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool {
+	// input should not be formatted as an IP address
+	// NOTE: this will technically trip up on IPv6 hosts with zone IDs, but
+	// validation further down will catch that anyway (it's guaranteed to have
+	// unfriendly characters % and : if that's the case)
+	if net.ParseIP(input) != nil {
+		return false
+	}
+
+	var labels []string
+	if allowSubDomains {
+		labels = strings.Split(input, ".")
+	} else {
+		labels = []string{input}
+	}
+
+	for _, label := range labels {
+		// validate special length constraints
+		if l := len(label); l < 3 || l > 63 {
+			return false
+		}
+
+		// Validate no capital letters
+		for _, r := range label {
+			if r >= 'A' && r <= 'Z' {
+				return false
+			}
+		}
+
+		// Validate valid host label
+		if !smithyhttp.ValidHostLabel(label) {
+			return false
+		}
+	}
+
+	return true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,76 @@
+package awsrulesfn
+
+import "regexp"
+
+// Partition provides the metadata describing an AWS partition.
+type Partition struct {
+	ID            string                     `json:"id"`
+	Regions       map[string]RegionOverrides `json:"regions"`
+	RegionRegex   string                     `json:"regionRegex"`
+	DefaultConfig PartitionConfig            `json:"outputs"`
+}
+
+// PartitionConfig provides the endpoint metadata for an AWS region or partition.
+type PartitionConfig struct {
+	Name                 string `json:"name"`
+	DnsSuffix            string `json:"dnsSuffix"`
+	DualStackDnsSuffix   string `json:"dualStackDnsSuffix"`
+	SupportsFIPS         bool   `json:"supportsFIPS"`
+	SupportsDualStack    bool   `json:"supportsDualStack"`
+	ImplicitGlobalRegion string `json:"implicitGlobalRegion"`
+}
+
+type RegionOverrides struct {
+	Name               *string `json:"name"`
+	DnsSuffix          *string `json:"dnsSuffix"`
+	DualStackDnsSuffix *string `json:"dualStackDnsSuffix"`
+	SupportsFIPS       *bool   `json:"supportsFIPS"`
+	SupportsDualStack  *bool   `json:"supportsDualStack"`
+}
+
+const defaultPartition = "aws"
+
+func getPartition(partitions []Partition, region string) *PartitionConfig {
+	for _, partition := range partitions {
+		if v, ok := partition.Regions[region]; ok {
+			p := mergeOverrides(partition.DefaultConfig, v)
+			return &p
+		}
+	}
+
+	for _, partition := range partitions {
+		regionRegex := regexp.MustCompile(partition.RegionRegex)
+		if regionRegex.MatchString(region) {
+			v := partition.DefaultConfig
+			return &v
+		}
+	}
+
+	for _, partition := range partitions {
+		if partition.ID == defaultPartition {
+			v := partition.DefaultConfig
+			return &v
+		}
+	}
+
+	return nil
+}
+
+func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig {
+	if from.Name != nil {
+		into.Name = *from.Name
+	}
+	if from.DnsSuffix != nil {
+		into.DnsSuffix = *from.DnsSuffix
+	}
+	if from.DualStackDnsSuffix != nil {
+		into.DualStackDnsSuffix = *from.DualStackDnsSuffix
+	}
+	if from.SupportsFIPS != nil {
+		into.SupportsFIPS = *from.SupportsFIPS
+	}
+	if from.SupportsDualStack != nil {
+		into.SupportsDualStack = *from.SupportsDualStack
+	}
+	return into
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,403 @@
+// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT.
+
+package awsrulesfn
+
+// GetPartition returns an AWS [Partition] for the region provided. If the
+// partition cannot be determined nil will be returned.
+func GetPartition(region string) *PartitionConfig {
+	return getPartition(partitions, region)
+}
+
+var partitions = []Partition{
+	{
+		ID:          "aws",
+		RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$",
+		DefaultConfig: PartitionConfig{
+			Name:                 "aws",
+			DnsSuffix:            "amazonaws.com",
+			DualStackDnsSuffix:   "api.aws",
+			SupportsFIPS:         true,
+			SupportsDualStack:    true,
+			ImplicitGlobalRegion: "us-east-1",
+		},
+		Regions: map[string]RegionOverrides{
+			"af-south-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-east-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-northeast-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-northeast-2": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-northeast-3": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-south-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-south-2": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-southeast-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-southeast-2": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-southeast-3": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ap-southeast-4": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"aws-global": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ca-central-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"ca-west-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"eu-central-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"eu-central-2": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"eu-north-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"eu-south-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"eu-south-2": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"eu-west-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"eu-west-2": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"eu-west-3": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"il-central-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"me-central-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"me-south-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"sa-east-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-east-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-east-2": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-west-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-west-2": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+		},
+	},
+	{
+		ID:          "aws-cn",
+		RegionRegex: "^cn\\-\\w+\\-\\d+$",
+		DefaultConfig: PartitionConfig{
+			Name:                 "aws-cn",
+			DnsSuffix:            "amazonaws.com.cn",
+			DualStackDnsSuffix:   "api.amazonwebservices.com.cn",
+			SupportsFIPS:         true,
+			SupportsDualStack:    true,
+			ImplicitGlobalRegion: "cn-northwest-1",
+		},
+		Regions: map[string]RegionOverrides{
+			"aws-cn-global": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"cn-north-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"cn-northwest-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+		},
+	},
+	{
+		ID:          "aws-us-gov",
+		RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$",
+		DefaultConfig: PartitionConfig{
+			Name:                 "aws-us-gov",
+			DnsSuffix:            "amazonaws.com",
+			DualStackDnsSuffix:   "api.aws",
+			SupportsFIPS:         true,
+			SupportsDualStack:    true,
+			ImplicitGlobalRegion: "us-gov-west-1",
+		},
+		Regions: map[string]RegionOverrides{
+			"aws-us-gov-global": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-gov-east-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-gov-west-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+		},
+	},
+	{
+		ID:          "aws-iso",
+		RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$",
+		DefaultConfig: PartitionConfig{
+			Name:                 "aws-iso",
+			DnsSuffix:            "c2s.ic.gov",
+			DualStackDnsSuffix:   "c2s.ic.gov",
+			SupportsFIPS:         true,
+			SupportsDualStack:    false,
+			ImplicitGlobalRegion: "us-iso-east-1",
+		},
+		Regions: map[string]RegionOverrides{
+			"aws-iso-global": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-iso-east-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-iso-west-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+		},
+	},
+	{
+		ID:          "aws-iso-b",
+		RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$",
+		DefaultConfig: PartitionConfig{
+			Name:                 "aws-iso-b",
+			DnsSuffix:            "sc2s.sgov.gov",
+			DualStackDnsSuffix:   "sc2s.sgov.gov",
+			SupportsFIPS:         true,
+			SupportsDualStack:    false,
+			ImplicitGlobalRegion: "us-isob-east-1",
+		},
+		Regions: map[string]RegionOverrides{
+			"aws-iso-b-global": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+			"us-isob-east-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+		},
+	},
+	{
+		ID:          "aws-iso-e",
+		RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$",
+		DefaultConfig: PartitionConfig{
+			Name:                 "aws-iso-e",
+			DnsSuffix:            "cloud.adc-e.uk",
+			DualStackDnsSuffix:   "cloud.adc-e.uk",
+			SupportsFIPS:         true,
+			SupportsDualStack:    false,
+			ImplicitGlobalRegion: "eu-isoe-west-1",
+		},
+		Regions: map[string]RegionOverrides{
+			"eu-isoe-west-1": {
+				Name:               nil,
+				DnsSuffix:          nil,
+				DualStackDnsSuffix: nil,
+				SupportsFIPS:       nil,
+				SupportsDualStack:  nil,
+			},
+		},
+	},
+	{
+		ID:          "aws-iso-f",
+		RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$",
+		DefaultConfig: PartitionConfig{
+			Name:                 "aws-iso-f",
+			DnsSuffix:            "csp.hci.ic.gov",
+			DualStackDnsSuffix:   "csp.hci.ic.gov",
+			SupportsFIPS:         true,
+			SupportsDualStack:    false,
+			ImplicitGlobalRegion: "us-isof-south-1",
+		},
+		Regions: map[string]RegionOverrides{},
+	},
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,220 @@
+{
+  "partitions" : [ {
+    "id" : "aws",
+    "outputs" : {
+      "dnsSuffix" : "amazonaws.com",
+      "dualStackDnsSuffix" : "api.aws",
+      "implicitGlobalRegion" : "us-east-1",
+      "name" : "aws",
+      "supportsDualStack" : true,
+      "supportsFIPS" : true
+    },
+    "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$",
+    "regions" : {
+      "af-south-1" : {
+        "description" : "Africa (Cape Town)"
+      },
+      "ap-east-1" : {
+        "description" : "Asia Pacific (Hong Kong)"
+      },
+      "ap-northeast-1" : {
+        "description" : "Asia Pacific (Tokyo)"
+      },
+      "ap-northeast-2" : {
+        "description" : "Asia Pacific (Seoul)"
+      },
+      "ap-northeast-3" : {
+        "description" : "Asia Pacific (Osaka)"
+      },
+      "ap-south-1" : {
+        "description" : "Asia Pacific (Mumbai)"
+      },
+      "ap-south-2" : {
+        "description" : "Asia Pacific (Hyderabad)"
+      },
+      "ap-southeast-1" : {
+        "description" : "Asia Pacific (Singapore)"
+      },
+      "ap-southeast-2" : {
+        "description" : "Asia Pacific (Sydney)"
+      },
+      "ap-southeast-3" : {
+        "description" : "Asia Pacific (Jakarta)"
+      },
+      "ap-southeast-4" : {
+        "description" : "Asia Pacific (Melbourne)"
+      },
+      "aws-global" : {
+        "description" : "AWS Standard global region"
+      },
+      "ca-central-1" : {
+        "description" : "Canada (Central)"
+      },
+      "ca-west-1" : {
+        "description" : "Canada West (Calgary)"
+      },
+      "eu-central-1" : {
+        "description" : "Europe (Frankfurt)"
+      },
+      "eu-central-2" : {
+        "description" : "Europe (Zurich)"
+      },
+      "eu-north-1" : {
+        "description" : "Europe (Stockholm)"
+      },
+      "eu-south-1" : {
+        "description" : "Europe (Milan)"
+      },
+      "eu-south-2" : {
+        "description" : "Europe (Spain)"
+      },
+      "eu-west-1" : {
+        "description" : "Europe (Ireland)"
+      },
+      "eu-west-2" : {
+        "description" : "Europe (London)"
+      },
+      "eu-west-3" : {
+        "description" : "Europe (Paris)"
+      },
+      "il-central-1" : {
+        "description" : "Israel (Tel Aviv)"
+      },
+      "me-central-1" : {
+        "description" : "Middle East (UAE)"
+      },
+      "me-south-1" : {
+        "description" : "Middle East (Bahrain)"
+      },
+      "sa-east-1" : {
+        "description" : "South America (Sao Paulo)"
+      },
+      "us-east-1" : {
+        "description" : "US East (N. Virginia)"
+      },
+      "us-east-2" : {
+        "description" : "US East (Ohio)"
+      },
+      "us-west-1" : {
+        "description" : "US West (N. California)"
+      },
+      "us-west-2" : {
+        "description" : "US West (Oregon)"
+      }
+    }
+  }, {
+    "id" : "aws-cn",
+    "outputs" : {
+      "dnsSuffix" : "amazonaws.com.cn",
+      "dualStackDnsSuffix" : "api.amazonwebservices.com.cn",
+      "implicitGlobalRegion" : "cn-northwest-1",
+      "name" : "aws-cn",
+      "supportsDualStack" : true,
+      "supportsFIPS" : true
+    },
+    "regionRegex" : "^cn\\-\\w+\\-\\d+$",
+    "regions" : {
+      "aws-cn-global" : {
+        "description" : "AWS China global region"
+      },
+      "cn-north-1" : {
+        "description" : "China (Beijing)"
+      },
+      "cn-northwest-1" : {
+        "description" : "China (Ningxia)"
+      }
+    }
+  }, {
+    "id" : "aws-us-gov",
+    "outputs" : {
+      "dnsSuffix" : "amazonaws.com",
+      "dualStackDnsSuffix" : "api.aws",
+      "implicitGlobalRegion" : "us-gov-west-1",
+      "name" : "aws-us-gov",
+      "supportsDualStack" : true,
+      "supportsFIPS" : true
+    },
+    "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$",
+    "regions" : {
+      "aws-us-gov-global" : {
+        "description" : "AWS GovCloud (US) global region"
+      },
+      "us-gov-east-1" : {
+        "description" : "AWS GovCloud (US-East)"
+      },
+      "us-gov-west-1" : {
+        "description" : "AWS GovCloud (US-West)"
+      }
+    }
+  }, {
+    "id" : "aws-iso",
+    "outputs" : {
+      "dnsSuffix" : "c2s.ic.gov",
+      "dualStackDnsSuffix" : "c2s.ic.gov",
+      "implicitGlobalRegion" : "us-iso-east-1",
+      "name" : "aws-iso",
+      "supportsDualStack" : false,
+      "supportsFIPS" : true
+    },
+    "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$",
+    "regions" : {
+      "aws-iso-global" : {
+        "description" : "AWS ISO (US) global region"
+      },
+      "us-iso-east-1" : {
+        "description" : "US ISO East"
+      },
+      "us-iso-west-1" : {
+        "description" : "US ISO WEST"
+      }
+    }
+  }, {
+    "id" : "aws-iso-b",
+    "outputs" : {
+      "dnsSuffix" : "sc2s.sgov.gov",
+      "dualStackDnsSuffix" : "sc2s.sgov.gov",
+      "implicitGlobalRegion" : "us-isob-east-1",
+      "name" : "aws-iso-b",
+      "supportsDualStack" : false,
+      "supportsFIPS" : true
+    },
+    "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$",
+    "regions" : {
+      "aws-iso-b-global" : {
+        "description" : "AWS ISOB (US) global region"
+      },
+      "us-isob-east-1" : {
+        "description" : "US ISOB East (Ohio)"
+      }
+    }
+  }, {
+    "id" : "aws-iso-e",
+    "outputs" : {
+      "dnsSuffix" : "cloud.adc-e.uk",
+      "dualStackDnsSuffix" : "cloud.adc-e.uk",
+      "implicitGlobalRegion" : "eu-isoe-west-1",
+      "name" : "aws-iso-e",
+      "supportsDualStack" : false,
+      "supportsFIPS" : true
+    },
+    "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$",
+    "regions" : {
+      "eu-isoe-west-1" : {
+        "description" : "EU ISOE West"
+      }
+    }
+  }, {
+    "id" : "aws-iso-f",
+    "outputs" : {
+      "dnsSuffix" : "csp.hci.ic.gov",
+      "dualStackDnsSuffix" : "csp.hci.ic.gov",
+      "implicitGlobalRegion" : "us-isof-south-1",
+      "name" : "aws-iso-f",
+      "supportsDualStack" : false,
+      "supportsFIPS" : true
+    },
+    "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$",
+    "regions" : { }
+  } ],
+  "version" : "1.1"
+}
\ No newline at end of file
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,201 @@
+package endpoints
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+const (
+	defaultProtocol = "https"
+	defaultSigner   = "v4"
+)
+
+var (
+	protocolPriority = []string{"https", "http"}
+	signerPriority   = []string{"v4"}
+)
+
+// Options provide configuration needed to direct how endpoints are resolved.
+type Options struct {
+	// Disable usage of HTTPS (TLS / SSL)
+	DisableHTTPS bool
+}
+
+// Partitions is a slice of partition
+type Partitions []Partition
+
+// ResolveEndpoint resolves a service endpoint for the given region and options.
+func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) {
+	if len(ps) == 0 {
+		return aws.Endpoint{}, fmt.Errorf("no partitions found")
+	}
+
+	for i := 0; i < len(ps); i++ {
+		if !ps[i].canResolveEndpoint(region) {
+			continue
+		}
+
+		return ps[i].ResolveEndpoint(region, opts)
+	}
+
+	// fallback to first partition format to use when resolving the endpoint.
+	return ps[0].ResolveEndpoint(region, opts)
+}
+
+// Partition is an AWS partition description for a service and its' region endpoints.
+type Partition struct {
+	ID                string
+	RegionRegex       *regexp.Regexp
+	PartitionEndpoint string
+	IsRegionalized    bool
+	Defaults          Endpoint
+	Endpoints         Endpoints
+}
+
+func (p Partition) canResolveEndpoint(region string) bool {
+	_, ok := p.Endpoints[region]
+	return ok || p.RegionRegex.MatchString(region)
+}
+
+// ResolveEndpoint resolves and service endpoint for the given region and options.
+func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) {
+	if len(region) == 0 && len(p.PartitionEndpoint) != 0 {
+		region = p.PartitionEndpoint
+	}
+
+	e, _ := p.endpointForRegion(region)
+
+	return e.resolve(p.ID, region, p.Defaults, options), nil
+}
+
+func (p Partition) endpointForRegion(region string) (Endpoint, bool) {
+	if e, ok := p.Endpoints[region]; ok {
+		return e, true
+	}
+
+	if !p.IsRegionalized {
+		return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint
+	}
+
+	// Unable to find any matching endpoint, return
+	// blank that will be used for generic endpoint creation.
+	return Endpoint{}, false
+}
+
+// Endpoints is a map of service config regions to endpoints
+type Endpoints map[string]Endpoint
+
+// CredentialScope is the credential scope of a region and service
+type CredentialScope struct {
+	Region  string
+	Service string
+}
+
+// Endpoint is a service endpoint description
+type Endpoint struct {
+	// True if the endpoint cannot be resolved for this partition/region/service
+	Unresolveable aws.Ternary
+
+	Hostname  string
+	Protocols []string
+
+	CredentialScope CredentialScope
+
+	SignatureVersions []string `json:"signatureVersions"`
+}
+
+func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint {
+	var merged Endpoint
+	merged.mergeIn(def)
+	merged.mergeIn(e)
+	e = merged
+
+	var u string
+	if e.Unresolveable != aws.TrueTernary {
+		// Only attempt to resolve the endpoint if it can be resolved.
+		hostname := strings.Replace(e.Hostname, "{region}", region, 1)
+
+		scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS)
+		u = scheme + "://" + hostname
+	}
+
+	signingRegion := e.CredentialScope.Region
+	if len(signingRegion) == 0 {
+		signingRegion = region
+	}
+	signingName := e.CredentialScope.Service
+
+	return aws.Endpoint{
+		URL:           u,
+		PartitionID:   partition,
+		SigningRegion: signingRegion,
+		SigningName:   signingName,
+		SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+	}
+}
+
+func (e *Endpoint) mergeIn(other Endpoint) {
+	if other.Unresolveable != aws.UnknownTernary {
+		e.Unresolveable = other.Unresolveable
+	}
+	if len(other.Hostname) > 0 {
+		e.Hostname = other.Hostname
+	}
+	if len(other.Protocols) > 0 {
+		e.Protocols = other.Protocols
+	}
+	if len(other.CredentialScope.Region) > 0 {
+		e.CredentialScope.Region = other.CredentialScope.Region
+	}
+	if len(other.CredentialScope.Service) > 0 {
+		e.CredentialScope.Service = other.CredentialScope.Service
+	}
+	if len(other.SignatureVersions) > 0 {
+		e.SignatureVersions = other.SignatureVersions
+	}
+}
+
+func getEndpointScheme(protocols []string, disableHTTPS bool) string {
+	if disableHTTPS {
+		return "http"
+	}
+
+	return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func getByPriority(s []string, p []string, def string) string {
+	if len(s) == 0 {
+		return def
+	}
+
+	for i := 0; i < len(p); i++ {
+		for j := 0; j < len(s); j++ {
+			if s[j] == p[i] {
+				return s[j]
+			}
+		}
+	}
+
+	return s[0]
+}
+
+// MapFIPSRegion extracts the intrinsic AWS region from one that may have an
+// embedded FIPS microformat.
+func MapFIPSRegion(region string) string {
+	const fipsInfix = "-fips-"
+	const fipsPrefix = "fips-"
+	const fipsSuffix = "-fips"
+
+	if strings.Contains(region, fipsInfix) ||
+		strings.Contains(region, fipsPrefix) ||
+		strings.Contains(region, fipsSuffix) {
+		region = strings.ReplaceAll(region, fipsInfix, "-")
+		region = strings.ReplaceAll(region, fipsPrefix, "")
+		region = strings.ReplaceAll(region, fipsSuffix, "")
+	}
+
+	return region
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,294 @@
+# v2.6.15 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.14 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.13 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.12 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.11 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.10 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.9 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.8 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.5 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.37 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.36 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.29 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.28 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.27 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.26 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.25 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.19 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.17 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.16 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.15 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.14 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.13 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.12 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.11 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.10 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.9 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.3.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.2.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.1.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.0.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.0.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.0.0 (2021-11-06)
+
+* **Release**: Endpoint Variant Model Support
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,302 @@
+package endpoints
+
+import (
+	"fmt"
+	"github.com/aws/smithy-go/logging"
+	"regexp"
+	"strings"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// DefaultKey is a compound map key of a variant and other values.
+type DefaultKey struct {
+	Variant        EndpointVariant
+	ServiceVariant ServiceVariant
+}
+
+// EndpointKey is a compound map key of a region and associated variant value.
+type EndpointKey struct {
+	Region         string
+	Variant        EndpointVariant
+	ServiceVariant ServiceVariant
+}
+
+// EndpointVariant is a bit field to describe the endpoints attributes.
+type EndpointVariant uint64
+
+const (
+	// FIPSVariant indicates that the endpoint is FIPS capable.
+	FIPSVariant EndpointVariant = 1 << (64 - 1 - iota)
+
+	// DualStackVariant indicates that the endpoint is DualStack capable.
+	DualStackVariant
+)
+
+// ServiceVariant is a bit field to describe the service endpoint attributes.
+type ServiceVariant uint64
+
+const (
+	defaultProtocol = "https"
+	defaultSigner   = "v4"
+)
+
+var (
+	protocolPriority = []string{"https", "http"}
+	signerPriority   = []string{"v4", "s3v4"}
+)
+
+// Options provide configuration needed to direct how endpoints are resolved.
+type Options struct {
+	// Logger is a logging implementation that log events should be sent to.
+	Logger logging.Logger
+
+	// LogDeprecated indicates that deprecated endpoints should be logged to the provided logger.
+	LogDeprecated bool
+
+	// ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority
+	// over the region name passed to the ResolveEndpoint call.
+	ResolvedRegion string
+
+	// Disable usage of HTTPS (TLS / SSL)
+	DisableHTTPS bool
+
+	// Instruct the resolver to use a service endpoint that supports dual-stack.
+	// If a service does not have a dual-stack endpoint an error will be returned by the resolver.
+	UseDualStackEndpoint aws.DualStackEndpointState
+
+	// Instruct the resolver to use a service endpoint that supports FIPS.
+	// If a service does not have a FIPS endpoint an error will be returned by the resolver.
+	UseFIPSEndpoint aws.FIPSEndpointState
+
+	// ServiceVariant is a bitfield of service specified endpoint variant data.
+	ServiceVariant ServiceVariant
+}
+
+// GetEndpointVariant returns the EndpointVariant for the variant associated options.
+func (o Options) GetEndpointVariant() (v EndpointVariant) {
+	if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled {
+		v |= DualStackVariant
+	}
+	if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled {
+		v |= FIPSVariant
+	}
+	return v
+}
+
+// Partitions is a slice of partition
+type Partitions []Partition
+
+// ResolveEndpoint resolves a service endpoint for the given region and options.
+func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) {
+	if len(ps) == 0 {
+		return aws.Endpoint{}, fmt.Errorf("no partitions found")
+	}
+
+	if opts.Logger == nil {
+		opts.Logger = logging.Nop{}
+	}
+
+	if len(opts.ResolvedRegion) > 0 {
+		region = opts.ResolvedRegion
+	}
+
+	for i := 0; i < len(ps); i++ {
+		if !ps[i].canResolveEndpoint(region, opts) {
+			continue
+		}
+
+		return ps[i].ResolveEndpoint(region, opts)
+	}
+
+	// fallback to first partition format to use when resolving the endpoint.
+	return ps[0].ResolveEndpoint(region, opts)
+}
+
+// Partition is an AWS partition description for a service and its' region endpoints.
+type Partition struct {
+	ID                string
+	RegionRegex       *regexp.Regexp
+	PartitionEndpoint string
+	IsRegionalized    bool
+	Defaults          map[DefaultKey]Endpoint
+	Endpoints         Endpoints
+}
+
+func (p Partition) canResolveEndpoint(region string, opts Options) bool {
+	_, ok := p.Endpoints[EndpointKey{
+		Region:  region,
+		Variant: opts.GetEndpointVariant(),
+	}]
+	return ok || p.RegionRegex.MatchString(region)
+}
+
+// ResolveEndpoint resolves and service endpoint for the given region and options.
+func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) {
+	if len(region) == 0 && len(p.PartitionEndpoint) != 0 {
+		region = p.PartitionEndpoint
+	}
+
+	endpoints := p.Endpoints
+
+	variant := options.GetEndpointVariant()
+	serviceVariant := options.ServiceVariant
+
+	defaults := p.Defaults[DefaultKey{
+		Variant:        variant,
+		ServiceVariant: serviceVariant,
+	}]
+
+	return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options)
+}
+
+func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint {
+	key := EndpointKey{
+		Region:  region,
+		Variant: variant,
+	}
+
+	if e, ok := endpoints[key]; ok {
+		return e
+	}
+
+	if !p.IsRegionalized {
+		return endpoints[EndpointKey{
+			Region:         p.PartitionEndpoint,
+			Variant:        variant,
+			ServiceVariant: serviceVariant,
+		}]
+	}
+
+	// Unable to find any matching endpoint, return
+	// blank that will be used for generic endpoint creation.
+	return Endpoint{}
+}
+
+// Endpoints is a map of service config regions to endpoints
+type Endpoints map[EndpointKey]Endpoint
+
+// CredentialScope is the credential scope of a region and service
+type CredentialScope struct {
+	Region  string
+	Service string
+}
+
+// Endpoint is a service endpoint description
+type Endpoint struct {
+	// True if the endpoint cannot be resolved for this partition/region/service
+	Unresolveable aws.Ternary
+
+	Hostname  string
+	Protocols []string
+
+	CredentialScope CredentialScope
+
+	SignatureVersions []string
+
+	// Indicates that this endpoint is deprecated.
+	Deprecated aws.Ternary
+}
+
+// IsZero returns whether the endpoint structure is an empty (zero) value.
+func (e Endpoint) IsZero() bool {
+	switch {
+	case e.Unresolveable != aws.UnknownTernary:
+		return false
+	case len(e.Hostname) != 0:
+		return false
+	case len(e.Protocols) != 0:
+		return false
+	case e.CredentialScope != (CredentialScope{}):
+		return false
+	case len(e.SignatureVersions) != 0:
+		return false
+	}
+	return true
+}
+
+func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) {
+	var merged Endpoint
+	merged.mergeIn(def)
+	merged.mergeIn(e)
+	e = merged
+
+	if e.IsZero() {
+		return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region)
+	}
+
+	var u string
+	if e.Unresolveable != aws.TrueTernary {
+		// Only attempt to resolve the endpoint if it can be resolved.
+		hostname := strings.Replace(e.Hostname, "{region}", region, 1)
+
+		scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS)
+		u = scheme + "://" + hostname
+	}
+
+	signingRegion := e.CredentialScope.Region
+	if len(signingRegion) == 0 {
+		signingRegion = region
+	}
+	signingName := e.CredentialScope.Service
+
+	if e.Deprecated == aws.TrueTernary && options.LogDeprecated {
+		options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u)
+	}
+
+	return aws.Endpoint{
+		URL:           u,
+		PartitionID:   partition,
+		SigningRegion: signingRegion,
+		SigningName:   signingName,
+		SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+	}, nil
+}
+
+func (e *Endpoint) mergeIn(other Endpoint) {
+	if other.Unresolveable != aws.UnknownTernary {
+		e.Unresolveable = other.Unresolveable
+	}
+	if len(other.Hostname) > 0 {
+		e.Hostname = other.Hostname
+	}
+	if len(other.Protocols) > 0 {
+		e.Protocols = other.Protocols
+	}
+	if len(other.CredentialScope.Region) > 0 {
+		e.CredentialScope.Region = other.CredentialScope.Region
+	}
+	if len(other.CredentialScope.Service) > 0 {
+		e.CredentialScope.Service = other.CredentialScope.Service
+	}
+	if len(other.SignatureVersions) > 0 {
+		e.SignatureVersions = other.SignatureVersions
+	}
+	if other.Deprecated != aws.UnknownTernary {
+		e.Deprecated = other.Deprecated
+	}
+}
+
+func getEndpointScheme(protocols []string, disableHTTPS bool) string {
+	if disableHTTPS {
+		return "http"
+	}
+
+	return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func getByPriority(s []string, p []string, def string) string {
+	if len(s) == 0 {
+		return def
+	}
+
+	for i := 0; i < len(p); i++ {
+		for j := 0; j < len(s); j++ {
+			if s[j] == p[i] {
+				return s[j]
+			}
+		}
+	}
+
+	return s[0]
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package endpoints
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "2.6.15"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,271 @@
+# v1.8.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.7.3 (2024-01-22)
+
+* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
+
+# v1.7.2 (2023-12-08)
+
+* **Bug Fix**: Correct loading of [services *] sections into shared config.
+
+# v1.7.1 (2023-11-16)
+
+* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it.
+
+# v1.7.0 (2023-11-13)
+
+* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.
+
+# v1.6.0 (2023-11-09.2)
+
+* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored
+
+# v1.5.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2023-11-07)
+
+* **Bug Fix**: Fix subproperty performance regression
+
+# v1.5.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.45 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.44 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.43 (2023-09-22)
+
+* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
+* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
+
+# v1.3.42 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.41 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.40 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.39 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.38 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.37 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.36 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.35 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.34 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.33 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.32 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.31 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.30 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.29 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.28 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.27 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.26 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.25 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.24 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.23 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.22 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.21 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.20 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.19 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.18 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.17 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.16 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.15 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.14 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.13 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.12 (2022-05-17)
+
+* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.11 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.10 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.9 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.8 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.7 (2022-03-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2022-02-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.5 (2022-01-28)
+
+* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug.
+
+# v1.3.4 (2022-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2022-01-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.5 (2021-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.4 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2021-08-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-08-04)
+
+* **Feature**: adds error handling for defered close calls
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-07-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-07-01)
+
+* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values.
+
+# v1.0.1 (2021-06-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.0 (2021-05-20)
+
+* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,22 @@
+package ini
+
+import "fmt"
+
+// UnableToReadFile is an error indicating that a ini file could not be read
+type UnableToReadFile struct {
+	Err error
+}
+
+// Error returns an error message and the underlying error message if present
+func (e *UnableToReadFile) Error() string {
+	base := "unable to read file"
+	if e.Err == nil {
+		return base
+	}
+	return fmt.Sprintf("%s: %v", base, e.Err)
+}
+
+// Unwrap returns the underlying error
+func (e *UnableToReadFile) Unwrap() error {
+	return e.Err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package ini
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.8.0"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,56 @@
+// Package ini implements parsing of the AWS shared config file.
+//
+//	Example:
+//	sections, err := ini.OpenFile("/path/to/file")
+//	if err != nil {
+//		panic(err)
+//	}
+//
+//	profile := "foo"
+//	section, ok := sections.GetSection(profile)
+//	if !ok {
+//		fmt.Printf("section %q could not be found", profile)
+//	}
+package ini
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"strings"
+)
+
+// OpenFile parses shared config from the given file path.
+func OpenFile(path string) (sections Sections, err error) {
+	f, oerr := os.Open(path)
+	if oerr != nil {
+		return Sections{}, &UnableToReadFile{Err: oerr}
+	}
+
+	defer func() {
+		closeErr := f.Close()
+		if err == nil {
+			err = closeErr
+		} else if closeErr != nil {
+			err = fmt.Errorf("close error: %v, original error: %w", closeErr, err)
+		}
+	}()
+
+	return Parse(f, path)
+}
+
+// Parse parses shared config from the given reader.
+func Parse(r io.Reader, path string) (Sections, error) {
+	contents, err := io.ReadAll(r)
+	if err != nil {
+		return Sections{}, fmt.Errorf("read all: %v", err)
+	}
+
+	lines := strings.Split(string(contents), "\n")
+	tokens, err := tokenize(lines)
+	if err != nil {
+		return Sections{}, fmt.Errorf("tokenize: %v", err)
+	}
+
+	return parse(tokens, path), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,109 @@
+package ini
+
+import (
+	"fmt"
+	"strings"
+)
+
+func parse(tokens []lineToken, path string) Sections {
+	parser := &parser{
+		path:     path,
+		sections: NewSections(),
+	}
+	parser.parse(tokens)
+	return parser.sections
+}
+
+type parser struct {
+	csection, ckey string   // current state
+	path           string   // source file path
+	sections       Sections // parse result
+}
+
+func (p *parser) parse(tokens []lineToken) {
+	for _, otok := range tokens {
+		switch tok := otok.(type) {
+		case *lineTokenProfile:
+			p.handleProfile(tok)
+		case *lineTokenProperty:
+			p.handleProperty(tok)
+		case *lineTokenSubProperty:
+			p.handleSubProperty(tok)
+		case *lineTokenContinuation:
+			p.handleContinuation(tok)
+		}
+	}
+}
+
+func (p *parser) handleProfile(tok *lineTokenProfile) {
+	name := tok.Name
+	if tok.Type != "" {
+		name = fmt.Sprintf("%s %s", tok.Type, tok.Name)
+	}
+	p.ckey = ""
+	p.csection = name
+	if _, ok := p.sections.container[name]; !ok {
+		p.sections.container[name] = NewSection(name)
+	}
+}
+
+func (p *parser) handleProperty(tok *lineTokenProperty) {
+	if p.csection == "" {
+		return // LEGACY: don't error on "global" properties
+	}
+
+	p.ckey = tok.Key
+	if _, ok := p.sections.container[p.csection].values[tok.Key]; ok {
+		section := p.sections.container[p.csection]
+		section.Logs = append(p.sections.container[p.csection].Logs,
+			fmt.Sprintf(
+				"For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n",
+				p.csection, tok.Key, tok.Key, p.path,
+			),
+		)
+		p.sections.container[p.csection] = section
+	}
+
+	p.sections.container[p.csection].values[tok.Key] = Value{
+		str: tok.Value,
+	}
+	p.sections.container[p.csection].SourceFile[tok.Key] = p.path
+}
+
+func (p *parser) handleSubProperty(tok *lineTokenSubProperty) {
+	if p.csection == "" {
+		return // LEGACY: don't error on "global" properties
+	}
+
+	if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" {
+		// This is an "orphaned" subproperty, either because it's at
+		// the beginning of a section or because the last property's
+		// value isn't empty. Either way we're lenient here and
+		// "promote" this to a normal property.
+		p.handleProperty(&lineTokenProperty{
+			Key:   tok.Key,
+			Value: strings.TrimSpace(trimPropertyComment(tok.Value)),
+		})
+		return
+	}
+
+	if p.sections.container[p.csection].values[p.ckey].mp == nil {
+		p.sections.container[p.csection].values[p.ckey] = Value{
+			mp: map[string]string{},
+		}
+	}
+	p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value
+}
+
+func (p *parser) handleContinuation(tok *lineTokenContinuation) {
+	if p.ckey == "" {
+		return
+	}
+
+	value, _ := p.sections.container[p.csection].values[p.ckey]
+	if value.str != "" && value.mp == nil {
+		value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value)
+	}
+
+	p.sections.container[p.csection].values[p.ckey] = value
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,157 @@
+package ini
+
+import (
+	"sort"
+)
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+	container map[string]Section
+}
+
+// NewSections returns empty ini Sections
+func NewSections() Sections {
+	return Sections{
+		container: make(map[string]Section, 0),
+	}
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+	v, ok := t.container[p]
+	return v, ok
+}
+
+// HasSection denotes if Sections consist of a section with
+// provided name.
+func (t Sections) HasSection(p string) bool {
+	_, ok := t.container[p]
+	return ok
+}
+
+// SetSection sets a section value for provided section name.
+func (t Sections) SetSection(p string, v Section) Sections {
+	t.container[p] = v
+	return t
+}
+
+// DeleteSection deletes a section entry/value for provided section name./
+func (t Sections) DeleteSection(p string) {
+	delete(t.container, p)
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+	keys := make([]string, len(t.container))
+	i := 0
+	for k := range t.container {
+		keys[i] = k
+		i++
+	}
+
+	sort.Strings(keys)
+	return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+	// Name is the Section profile name
+	Name string
+
+	// values are the values within parsed profile
+	values values
+
+	// Errors is the list of errors
+	Errors []error
+
+	// Logs is the list of logs
+	Logs []string
+
+	// SourceFile is the INI Source file from where this section
+	// was retrieved. They key is the property, value is the
+	// source file the property was retrieved from.
+	SourceFile map[string]string
+}
+
+// NewSection returns an initialize section for the name
+func NewSection(name string) Section {
+	return Section{
+		Name:       name,
+		values:     values{},
+		SourceFile: map[string]string{},
+	}
+}
+
+// List will return a list of all
+// services in values
+func (t Section) List() []string {
+	keys := make([]string, len(t.values))
+	i := 0
+	for k := range t.values {
+		keys[i] = k
+		i++
+	}
+
+	sort.Strings(keys)
+	return keys
+}
+
+// UpdateSourceFile updates source file for a property to provided filepath.
+func (t Section) UpdateSourceFile(property string, filepath string) {
+	t.SourceFile[property] = filepath
+}
+
+// UpdateValue updates value for a provided key with provided value
+func (t Section) UpdateValue(k string, v Value) error {
+	t.values[k] = v
+	return nil
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+	_, ok := t.values[k]
+	return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+	v, ok := t.values[k]
+	return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) (bool, bool) {
+	return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) (int64, bool) {
+	return t.values[k].IntValue()
+}
+
+// Map returns a map value at k
+func (t Section) Map(k string) map[string]string {
+	return t.values[k].MapValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) (float64, bool) {
+	return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+	_, ok := t.values[k]
+	if !ok {
+		return ""
+	}
+	return t.values[k].StringValue()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,89 @@
+package ini
+
+import (
+	"strings"
+)
+
+func trimProfileComment(s string) string {
+	r, _, _ := strings.Cut(s, "#")
+	r, _, _ = strings.Cut(r, ";")
+	return r
+}
+
+func trimPropertyComment(s string) string {
+	r, _, _ := strings.Cut(s, " #")
+	r, _, _ = strings.Cut(r, " ;")
+	r, _, _ = strings.Cut(r, "\t#")
+	r, _, _ = strings.Cut(r, "\t;")
+	return r
+}
+
+// assumes no surrounding comment
+func splitProperty(s string) (string, string, bool) {
+	equalsi := strings.Index(s, "=")
+	coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment
+	sep := "="
+	if equalsi == -1 || coloni != -1 && coloni < equalsi {
+		sep = ":"
+	}
+
+	k, v, ok := strings.Cut(s, sep)
+	if !ok {
+		return "", "", false
+	}
+	return strings.TrimSpace(k), strings.TrimSpace(v), true
+}
+
+// assumes no surrounding comment, whitespace, or profile brackets
+func splitProfile(s string) (string, string) {
+	var first int
+	for i, r := range s {
+		if isLineSpace(r) {
+			if first == 0 {
+				first = i
+			}
+		} else {
+			if first != 0 {
+				return s[:first], s[i:]
+			}
+		}
+	}
+	if first == 0 {
+		return "", s // type component is effectively blank
+	}
+	return "", ""
+}
+
+func isLineSpace(r rune) bool {
+	return r == ' ' || r == '\t'
+}
+
+func unquote(s string) string {
+	if isSingleQuoted(s) || isDoubleQuoted(s) {
+		return s[1 : len(s)-1]
+	}
+	return s
+}
+
+// applies various legacy conversions to property values:
+//   - remote wrapping single/doublequotes
+func legacyStrconv(s string) string {
+	s = unquote(s)
+	return s
+}
+
+func isSingleQuoted(s string) bool {
+	return hasAffixes(s, "'", "'")
+}
+
+func isDoubleQuoted(s string) bool {
+	return hasAffixes(s, `"`, `"`)
+}
+
+func isBracketed(s string) bool {
+	return hasAffixes(s, "[", "]")
+}
+
+func hasAffixes(s, left, right string) bool {
+	return strings.HasPrefix(s, left) && strings.HasSuffix(s, right)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,32 @@
+package ini
+
+type lineToken interface {
+	isLineToken()
+}
+
+type lineTokenProfile struct {
+	Type string
+	Name string
+}
+
+func (*lineTokenProfile) isLineToken() {}
+
+type lineTokenProperty struct {
+	Key   string
+	Value string
+}
+
+func (*lineTokenProperty) isLineToken() {}
+
+type lineTokenContinuation struct {
+	Value string
+}
+
+func (*lineTokenContinuation) isLineToken() {}
+
+type lineTokenSubProperty struct {
+	Key   string
+	Value string
+}
+
+func (*lineTokenSubProperty) isLineToken() {}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,92 @@
+package ini
+
+import (
+	"strings"
+)
+
+func tokenize(lines []string) ([]lineToken, error) {
+	tokens := make([]lineToken, 0, len(lines))
+	for _, line := range lines {
+		if len(strings.TrimSpace(line)) == 0 || isLineComment(line) {
+			continue
+		}
+
+		if tok := asProfile(line); tok != nil {
+			tokens = append(tokens, tok)
+		} else if tok := asProperty(line); tok != nil {
+			tokens = append(tokens, tok)
+		} else if tok := asSubProperty(line); tok != nil {
+			tokens = append(tokens, tok)
+		} else if tok := asContinuation(line); tok != nil {
+			tokens = append(tokens, tok)
+		} // unrecognized tokens are effectively ignored
+	}
+	return tokens, nil
+}
+
+func isLineComment(line string) bool {
+	trimmed := strings.TrimLeft(line, " \t")
+	return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";")
+}
+
+func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment"
+	trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]"
+	if !isBracketed(trimmed) {
+		return nil
+	}
+	trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ")
+	trimmed = strings.TrimSpace(trimmed)  // "type name" / "name"
+	typ, name := splitProfile(trimmed)
+	return &lineTokenProfile{
+		Type: typ,
+		Name: name,
+	}
+}
+
+func asProperty(line string) *lineTokenProperty {
+	if isLineSpace(rune(line[0])) {
+		return nil
+	}
+
+	trimmed := trimPropertyComment(line)
+	trimmed = strings.TrimRight(trimmed, " \t")
+	k, v, ok := splitProperty(trimmed)
+	if !ok {
+		return nil
+	}
+
+	return &lineTokenProperty{
+		Key:   strings.ToLower(k), // LEGACY: normalize key case
+		Value: legacyStrconv(v),   // LEGACY: see func docs
+	}
+}
+
+func asSubProperty(line string) *lineTokenSubProperty {
+	if !isLineSpace(rune(line[0])) {
+		return nil
+	}
+
+	// comments on sub-properties are included in the value
+	trimmed := strings.TrimLeft(line, " \t")
+	k, v, ok := splitProperty(trimmed)
+	if !ok {
+		return nil
+	}
+
+	return &lineTokenSubProperty{ // same LEGACY constraints as in normal property
+		Key:   strings.ToLower(k),
+		Value: legacyStrconv(v),
+	}
+}
+
+func asContinuation(line string) *lineTokenContinuation {
+	if !isLineSpace(rune(line[0])) {
+		return nil
+	}
+
+	// includes comments like sub-properties
+	trimmed := strings.TrimLeft(line, " \t")
+	return &lineTokenContinuation{
+		Value: trimmed,
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,93 @@
+package ini
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+	switch v {
+	case NoneType:
+		return "NONE"
+	case StringType:
+		return "STRING"
+	}
+
+	return ""
+}
+
+// ValueType enums
+const (
+	NoneType = ValueType(iota)
+	StringType
+	QuotedStringType
+)
+
+// Value is a union container
+type Value struct {
+	Type ValueType
+
+	str string
+	mp  map[string]string
+}
+
+// NewStringValue returns a Value type generated using a string input.
+func NewStringValue(str string) (Value, error) {
+	return Value{str: str}, nil
+}
+
+func (v Value) String() string {
+	switch v.Type {
+	case StringType:
+		return fmt.Sprintf("string: %s", string(v.str))
+	case QuotedStringType:
+		return fmt.Sprintf("quoted string: %s", string(v.str))
+	default:
+		return "union not set"
+	}
+}
+
+// MapValue returns a map value for sub properties
+func (v Value) MapValue() map[string]string {
+	return v.mp
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() (int64, bool) {
+	i, err := strconv.ParseInt(string(v.str), 0, 64)
+	if err != nil {
+		return 0, false
+	}
+	return i, true
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() (float64, bool) {
+	f, err := strconv.ParseFloat(string(v.str), 64)
+	if err != nil {
+		return 0, false
+	}
+	return f, true
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() (bool, bool) {
+	// we don't use ParseBool as it recognizes more than what we've
+	// historically supported
+	if strings.EqualFold(v.str, "true") {
+		return true, true
+	} else if strings.EqualFold(v.str, "false") {
+		return false, true
+	}
+	return false, false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+	return v.str
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,42 @@
+package middleware
+
+import (
+	"context"
+	"sync/atomic"
+	"time"
+
+	internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// AddTimeOffsetMiddleware sets a value representing clock skew on the request context.
+// This can be read by other operations (such as signing) to correct the date value they send
+// on the request
+type AddTimeOffsetMiddleware struct {
+	Offset *atomic.Int64
+}
+
+// ID the identifier for AddTimeOffsetMiddleware
+func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" }
+
+// HandleBuild sets a value for attemptSkew on the request context if one is set on the client.
+func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	if m.Offset != nil {
+		offset := time.Duration(m.Offset.Load())
+		ctx = internalcontext.SetAttemptSkewContext(ctx, offset)
+	}
+	return next.HandleBuild(ctx, in)
+}
+
+// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer
+// held by AddTimeOffsetMiddleware
+func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 {
+		m.Offset.Store(v.Nanoseconds())
+	}
+	return next.HandleDeserialize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,33 @@
+package rand
+
+import (
+	"crypto/rand"
+	"fmt"
+	"io"
+	"math/big"
+)
+
+func init() {
+	Reader = rand.Reader
+}
+
+// Reader provides a random reader that can reset during testing.
+var Reader io.Reader
+
+var floatMaxBigInt = big.NewInt(1 << 53)
+
+// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0).
+func Float64(reader io.Reader) (float64, error) {
+	bi, err := rand.Int(reader, floatMaxBigInt)
+	if err != nil {
+		return 0, fmt.Errorf("failed to read random value, %v", err)
+	}
+
+	return float64(bi.Int64()) / (1 << 53), nil
+}
+
+// CryptoRandFloat64 returns a random float64 obtained from the crypto rand
+// source.
+func CryptoRandFloat64() (float64, error) {
+	return Float64(Reader)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,9 @@
+package sdk
+
+// Invalidator provides access to a type's invalidate method to make it
+// invalidate it cache.
+//
+// e.g aws.SafeCredentialsProvider's Invalidate method.
+type Invalidator interface {
+	Invalidate()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,74 @@
+package sdk
+
+import (
+	"context"
+	"time"
+)
+
+func init() {
+	NowTime = time.Now
+	Sleep = time.Sleep
+	SleepWithContext = sleepWithContext
+}
+
+// NowTime is a value for getting the current time. This value can be overridden
+// for testing mocking out current time.
+var NowTime func() time.Time
+
+// Sleep is a value for sleeping for a duration. This value can be overridden
+// for testing and mocking out sleep duration.
+var Sleep func(time.Duration)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// This value can be overridden for testing and mocking out sleep duration.
+var SleepWithContext func(context.Context, time.Duration) error
+
+// sleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the
+// Context's error will be returned.
+func sleepWithContext(ctx context.Context, dur time.Duration) error {
+	t := time.NewTimer(dur)
+	defer t.Stop()
+
+	select {
+	case <-t.C:
+		break
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+
+	return nil
+}
+
+// noOpSleepWithContext does nothing, returns immediately.
+func noOpSleepWithContext(context.Context, time.Duration) error {
+	return nil
+}
+
+func noOpSleep(time.Duration) {}
+
+// TestingUseNopSleep is a utility for disabling sleep across the SDK for
+// testing.
+func TestingUseNopSleep() func() {
+	SleepWithContext = noOpSleepWithContext
+	Sleep = noOpSleep
+
+	return func() {
+		SleepWithContext = sleepWithContext
+		Sleep = time.Sleep
+	}
+}
+
+// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time
+// for testing purposes.
+func TestingUseReferenceTime(referenceTime time.Time) func() {
+	NowTime = func() time.Time {
+		return referenceTime
+	}
+	return func() {
+		NowTime = time.Now
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,12 @@
+package sdkio
+
+const (
+	// Byte is 8 bits
+	Byte int64 = 1
+	// KibiByte (KiB) is 1024 Bytes
+	KibiByte = Byte * 1024
+	// MebiByte (MiB) is 1024 KiB
+	MebiByte = KibiByte * 1024
+	// GibiByte (GiB) is 1024 MiB
+	GibiByte = MebiByte * 1024
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,47 @@
+package shareddefaults
+
+import (
+	"os"
+	"os/user"
+	"path/filepath"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+//   - Linux/Unix: $HOME/.aws/credentials
+//   - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+	return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+//   - Linux/Unix: $HOME/.aws/config
+//   - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+	return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+	// Ignore errors since we only care about Windows and *nix.
+	home, _ := os.UserHomeDir()
+
+	if len(home) > 0 {
+		return home
+	}
+
+	currUser, _ := user.Current()
+	if currUser != nil {
+		home = currUser.HomeDir
+	}
+
+	return home
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,11 @@
+package strings
+
+import (
+	"strings"
+)
+
+// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
+// under Unicode case-folding.
+func HasPrefixFold(s, prefix string) bool {
+	return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,28 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,7 @@
+// Package singleflight provides a duplicate function call suppression
+// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight
+// package. The package is forked, because the package a part of the unstable
+// and unversioned golang.org/x/sync module.
+//
+// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight
+package singleflight
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,210 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package singleflight
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"runtime"
+	"runtime/debug"
+	"sync"
+)
+
+// errGoexit indicates the runtime.Goexit was called in
+// the user given function.
+var errGoexit = errors.New("runtime.Goexit was called")
+
+// A panicError is an arbitrary value recovered from a panic
+// with the stack trace during the execution of given function.
+type panicError struct {
+	value interface{}
+	stack []byte
+}
+
+// Error implements error interface.
+func (p *panicError) Error() string {
+	return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
+}
+
+func newPanicError(v interface{}) error {
+	stack := debug.Stack()
+
+	// The first line of the stack trace is of the form "goroutine N [status]:"
+	// but by the time the panic reaches Do the goroutine may no longer exist
+	// and its status will have changed. Trim out the misleading line.
+	if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
+		stack = stack[line+1:]
+	}
+	return &panicError{value: v, stack: stack}
+}
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+	wg sync.WaitGroup
+
+	// These fields are written once before the WaitGroup is done
+	// and are only read after the WaitGroup is done.
+	val interface{}
+	err error
+
+	// forgotten indicates whether Forget was called with this call's key
+	// while the call was still in flight.
+	forgotten bool
+
+	// These fields are read and written with the singleflight
+	// mutex held before the WaitGroup is done, and are read but
+	// not written after the WaitGroup is done.
+	dups  int
+	chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+	mu sync.Mutex       // protects m
+	m  map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+	Val    interface{}
+	Err    error
+	Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+	g.mu.Lock()
+	if g.m == nil {
+		g.m = make(map[string]*call)
+	}
+	if c, ok := g.m[key]; ok {
+		c.dups++
+		g.mu.Unlock()
+		c.wg.Wait()
+
+		if e, ok := c.err.(*panicError); ok {
+			panic(e)
+		} else if c.err == errGoexit {
+			runtime.Goexit()
+		}
+		return c.val, c.err, true
+	}
+	c := new(call)
+	c.wg.Add(1)
+	g.m[key] = c
+	g.mu.Unlock()
+
+	g.doCall(c, key, fn)
+	return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+//
+// The returned channel will not be closed.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+	ch := make(chan Result, 1)
+	g.mu.Lock()
+	if g.m == nil {
+		g.m = make(map[string]*call)
+	}
+	if c, ok := g.m[key]; ok {
+		c.dups++
+		c.chans = append(c.chans, ch)
+		g.mu.Unlock()
+		return ch
+	}
+	c := &call{chans: []chan<- Result{ch}}
+	c.wg.Add(1)
+	g.m[key] = c
+	g.mu.Unlock()
+
+	go g.doCall(c, key, fn)
+
+	return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+	normalReturn := false
+	recovered := false
+
+	// use double-defer to distinguish panic from runtime.Goexit,
+	// more details see https://golang.org/cl/134395
+	defer func() {
+		// the given function invoked runtime.Goexit
+		if !normalReturn && !recovered {
+			c.err = errGoexit
+		}
+
+		c.wg.Done()
+		g.mu.Lock()
+		defer g.mu.Unlock()
+		if !c.forgotten {
+			delete(g.m, key)
+		}
+
+		if e, ok := c.err.(*panicError); ok {
+			// In order to prevent the waiting channels from being blocked forever,
+			// needs to ensure that this panic cannot be recovered.
+			if len(c.chans) > 0 {
+				go panic(e)
+				select {} // Keep this goroutine around so that it will appear in the crash dump.
+			} else {
+				panic(e)
+			}
+		} else if c.err == errGoexit {
+			// Already in the process of goexit, no need to call again
+		} else {
+			// Normal return
+			for _, ch := range c.chans {
+				ch <- Result{c.val, c.err, c.dups > 0}
+			}
+		}
+	}()
+
+	func() {
+		defer func() {
+			if !normalReturn {
+				// Ideally, we would wait to take a stack trace until we've determined
+				// whether this is a panic or a runtime.Goexit.
+				//
+				// Unfortunately, the only way we can distinguish the two is to see
+				// whether the recover stopped the goroutine from terminating, and by
+				// the time we know that, the part of the stack trace relevant to the
+				// panic has been discarded.
+				if r := recover(); r != nil {
+					c.err = newPanicError(r)
+				}
+			}
+		}()
+
+		c.val, c.err = fn()
+		normalReturn = true
+	}()
+
+	if !normalReturn {
+		recovered = true
+	}
+}
+
+// Forget tells the singleflight to forget about a key.  Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+	g.mu.Lock()
+	if c, ok := g.m[key]; ok {
+		c.forgotten = true
+	}
+	delete(g.m, key)
+	g.mu.Unlock()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,13 @@
+package timeconv
+
+import "time"
+
+// FloatSecondsDur converts a fractional seconds to duration.
+func FloatSecondsDur(v float64) time.Duration {
+	return time.Duration(v * float64(time.Second))
+}
+
+// DurSecondsFloat converts a duration into fractional seconds.
+func DurSecondsFloat(d time.Duration) float64 {
+	return float64(d) / float64(time.Second)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,140 @@
+# v1.11.3 (2024-06-28)
+
+* No change notes available for this release.
+
+# v1.11.2 (2024-03-29)
+
+* No change notes available for this release.
+
+# v1.11.1 (2024-02-21)
+
+* No change notes available for this release.
+
+# v1.11.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.10.4 (2023-12-07)
+
+* No change notes available for this release.
+
+# v1.10.3 (2023-11-30)
+
+* No change notes available for this release.
+
+# v1.10.2 (2023-11-29)
+
+* No change notes available for this release.
+
+# v1.10.1 (2023-11-15)
+
+* No change notes available for this release.
+
+# v1.10.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+
+# v1.9.15 (2023-10-06)
+
+* No change notes available for this release.
+
+# v1.9.14 (2023-08-18)
+
+* No change notes available for this release.
+
+# v1.9.13 (2023-08-07)
+
+* No change notes available for this release.
+
+# v1.9.12 (2023-07-31)
+
+* No change notes available for this release.
+
+# v1.9.11 (2022-12-02)
+
+* No change notes available for this release.
+
+# v1.9.10 (2022-10-24)
+
+* No change notes available for this release.
+
+# v1.9.9 (2022-09-14)
+
+* No change notes available for this release.
+
+# v1.9.8 (2022-09-02)
+
+* No change notes available for this release.
+
+# v1.9.7 (2022-08-31)
+
+* No change notes available for this release.
+
+# v1.9.6 (2022-08-29)
+
+* No change notes available for this release.
+
+# v1.9.5 (2022-08-11)
+
+* No change notes available for this release.
+
+# v1.9.4 (2022-08-09)
+
+* No change notes available for this release.
+
+# v1.9.3 (2022-06-29)
+
+* No change notes available for this release.
+
+# v1.9.2 (2022-06-07)
+
+* No change notes available for this release.
+
+# v1.9.1 (2022-03-24)
+
+* No change notes available for this release.
+
+# v1.9.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.8.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.7.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.6.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.5.0 (2021-11-06)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.4.0 (2021-10-21)
+
+* **Feature**: Updated  to latest version
+
+# v1.3.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.2.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+
+# v1.2.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.2.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.1.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,176 @@
+package acceptencoding
+
+import (
+	"compress/gzip"
+	"context"
+	"fmt"
+	"io"
+
+	"github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const acceptEncodingHeaderKey = "Accept-Encoding"
+const contentEncodingHeaderKey = "Content-Encoding"
+
+// AddAcceptEncodingGzipOptions provides the options for the
+// AddAcceptEncodingGzip middleware setup.
+type AddAcceptEncodingGzipOptions struct {
+	Enable bool
+}
+
+// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP
+// middleware to the operation stack. This allows checksums to be correctly
+// computed without disabling GZIP support.
+func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error {
+	if options.Enable {
+		if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil {
+			return err
+		}
+		if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	return stack.Finalize.Add(&DisableGzip{}, middleware.Before)
+}
+
+// DisableGzip provides the middleware that will
+// disable the underlying http client automatically enabling for gzip
+// decompress content-encoding support.
+type DisableGzip struct{}
+
+// ID returns the id for the middleware.
+func (*DisableGzip) ID() string {
+	return "DisableAcceptEncodingGzip"
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface.
+func (*DisableGzip) HandleFinalize(
+	ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	output middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := input.Request.(*smithyhttp.Request)
+	if !ok {
+		return output, metadata, &smithy.SerializationError{
+			Err: fmt.Errorf("unknown request type %T", input.Request),
+		}
+	}
+
+	// Explicitly enable gzip support, this will prevent the http client from
+	// auto extracting the zipped content.
+	req.Header.Set(acceptEncodingHeaderKey, "identity")
+
+	return next.HandleFinalize(ctx, input)
+}
+
+// EnableGzip provides a middleware to enable support for
+// gzip responses, with manual decompression. This prevents the underlying HTTP
+// client from performing the gzip decompression automatically.
+type EnableGzip struct{}
+
+// ID returns the id for the middleware.
+func (*EnableGzip) ID() string {
+	return "AcceptEncodingGzip"
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface.
+func (*EnableGzip) HandleFinalize(
+	ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	output middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := input.Request.(*smithyhttp.Request)
+	if !ok {
+		return output, metadata, &smithy.SerializationError{
+			Err: fmt.Errorf("unknown request type %T", input.Request),
+		}
+	}
+
+	// Explicitly enable gzip support, this will prevent the http client from
+	// auto extracting the zipped content.
+	req.Header.Set(acceptEncodingHeaderKey, "gzip")
+
+	return next.HandleFinalize(ctx, input)
+}
+
+// DecompressGzip provides the middleware for decompressing a gzip
+// response from the service.
+type DecompressGzip struct{}
+
+// ID returns the id for the middleware.
+func (*DecompressGzip) ID() string {
+	return "DecompressGzip"
+}
+
+// HandleDeserialize implements the DeserializeMiddlware interface.
+func (*DecompressGzip) HandleDeserialize(
+	ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+	output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	output, metadata, err = next.HandleDeserialize(ctx, input)
+	if err != nil {
+		return output, metadata, err
+	}
+
+	resp, ok := output.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return output, metadata, &smithy.DeserializationError{
+			Err: fmt.Errorf("unknown response type %T", output.RawResponse),
+		}
+	}
+	if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" {
+		return output, metadata, err
+	}
+
+	// Clear content length since it will no longer be valid once the response
+	// body is decompressed.
+	resp.Header.Del("Content-Length")
+	resp.ContentLength = -1
+
+	resp.Body = wrapGzipReader(resp.Body)
+
+	return output, metadata, err
+}
+
+type gzipReader struct {
+	reader io.ReadCloser
+	gzip   *gzip.Reader
+}
+
+func wrapGzipReader(reader io.ReadCloser) *gzipReader {
+	return &gzipReader{
+		reader: reader,
+	}
+}
+
+// Read wraps the gzip reader around the underlying io.Reader to extract the
+// response bytes on the fly.
+func (g *gzipReader) Read(b []byte) (n int, err error) {
+	if g.gzip == nil {
+		g.gzip, err = gzip.NewReader(g.reader)
+		if err != nil {
+			g.gzip = nil // ensure uninitialized gzip value isn't used in close.
+			return 0, fmt.Errorf("failed to decompress gzip response, %w", err)
+		}
+	}
+
+	return g.gzip.Read(b)
+}
+
+func (g *gzipReader) Close() error {
+	if g.gzip == nil {
+		return nil
+	}
+
+	if err := g.gzip.Close(); err != nil {
+		g.reader.Close()
+		return fmt.Errorf("failed to decompress gzip response, %w", err)
+	}
+
+	return g.reader.Close()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,22 @@
+/*
+Package acceptencoding provides customizations associated with Accept Encoding Header.
+
+# Accept encoding gzip
+
+The Go HTTP client automatically supports accept-encoding and content-encoding
+gzip by default. This default behavior is not desired by the SDK, and prevents
+validating the response body's checksum. To prevent this the SDK must manually
+control usage of content-encoding gzip.
+
+To control content-encoding, the SDK must always set the `Accept-Encoding`
+header to a value. This prevents the HTTP client from using gzip automatically.
+When gzip is enabled on the API client, the SDK's customization will control
+decompressing the gzip data in order to not break the checksum validation. When
+gzip is disabled, the API client will disable gzip, preventing the HTTP
+client's default behavior.
+
+An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using
+the below middleware. The option if present can be used to enable auto decompressing
+gzip by the SDK.
+*/
+package acceptencoding
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package acceptencoding
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.11.3"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,346 @@
+# v1.11.17 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.16 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.15 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.14 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.13 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.12 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.11 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.10 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.7 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.6 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.5 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.4 (2024-03-05)
+
+* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility.
+
+# v1.11.3 (2024-03-04)
+
+* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API.
+
+# v1.11.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.37 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.36 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.29 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.28 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.27 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.26 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.25 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.19 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.17 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.16 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.15 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.14 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.13 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.12 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.11 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.10 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.9 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-11-06)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-10-21)
+
+* **Feature**: Updated  to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,56 @@
+package presignedurl
+
+import (
+	"context"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+// WithIsPresigning adds the isPresigning sentinel value to a context to signal
+// that the middleware stack is using the presign flow.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func WithIsPresigning(ctx context.Context) context.Context {
+	return middleware.WithStackValue(ctx, isPresigningKey{}, true)
+}
+
+// GetIsPresigning returns if the context contains the isPresigning sentinel
+// value for presigning flows.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetIsPresigning(ctx context.Context) bool {
+	v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool)
+	return v
+}
+
+type isPresigningKey struct{}
+
+// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that
+// will update the stack's context to be flagged as being invoked for the
+// purpose of presigning.
+func AddAsIsPresigningMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before)
+}
+
+// AddAsIsPresigingMiddleware is an alias for backwards compatibility.
+//
+// Deprecated: This API was released with a typo. Use
+// [AddAsIsPresigningMiddleware] instead.
+func AddAsIsPresigingMiddleware(stack *middleware.Stack) error {
+	return AddAsIsPresigningMiddleware(stack)
+}
+
+type asIsPresigningMiddleware struct{}
+
+func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" }
+
+func (asIsPresigningMiddleware) HandleInitialize(
+	ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	ctx = WithIsPresigning(ctx)
+	return next.HandleInitialize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,3 @@
+// Package presignedurl provides the customizations for API clients to fill in
+// presigned URLs into input parameters.
+package presignedurl
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package presignedurl
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.11.17"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,110 @@
+package presignedurl
+
+import (
+	"context"
+	"fmt"
+
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+// URLPresigner provides the interface to presign the input parameters in to a
+// presigned URL.
+type URLPresigner interface {
+	// PresignURL presigns a URL.
+	PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error)
+}
+
+// ParameterAccessor provides an collection of accessor to for retrieving and
+// setting the values needed to PresignedURL generation
+type ParameterAccessor struct {
+	// GetPresignedURL accessor points to a function that retrieves a presigned url if present
+	GetPresignedURL func(interface{}) (string, bool, error)
+
+	// GetSourceRegion accessor points to a function that retrieves source region for presigned url
+	GetSourceRegion func(interface{}) (string, bool, error)
+
+	// CopyInput accessor points to a function that takes in an input, and returns a copy.
+	CopyInput func(interface{}) (interface{}, error)
+
+	// SetDestinationRegion accessor points to a function that sets destination region on api input struct
+	SetDestinationRegion func(interface{}, string) error
+
+	// SetPresignedURL accessor points to a function that sets presigned url on api input struct
+	SetPresignedURL func(interface{}, string) error
+}
+
+// Options provides the set of options needed by the presigned URL middleware.
+type Options struct {
+	// Accessor are the parameter accessors used by this middleware
+	Accessor ParameterAccessor
+
+	// Presigner is the URLPresigner used by the middleware
+	Presigner URLPresigner
+}
+
+// AddMiddleware adds the Presign URL middleware to the middleware stack.
+func AddMiddleware(stack *middleware.Stack, opts Options) error {
+	return stack.Initialize.Add(&presign{options: opts}, middleware.Before)
+}
+
+// RemoveMiddleware removes the Presign URL middleware from the stack.
+func RemoveMiddleware(stack *middleware.Stack) error {
+	_, err := stack.Initialize.Remove((*presign)(nil).ID())
+	return err
+}
+
+type presign struct {
+	options Options
+}
+
+func (m *presign) ID() string { return "Presign" }
+
+func (m *presign) HandleInitialize(
+	ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	// If PresignedURL is already set ignore middleware.
+	if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil {
+		return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+	} else if ok {
+		return next.HandleInitialize(ctx, input)
+	}
+
+	// If have source region is not set ignore middleware.
+	srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters)
+	if err != nil {
+		return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+	} else if !ok || len(srcRegion) == 0 {
+		return next.HandleInitialize(ctx, input)
+	}
+
+	// Create a copy of the original input so the destination region value can
+	// be added. This ensures that value does not leak into the original
+	// request parameters.
+	paramCpy, err := m.options.Accessor.CopyInput(input.Parameters)
+	if err != nil {
+		return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err)
+	}
+
+	// Destination region is the API client's configured region.
+	dstRegion := awsmiddleware.GetRegion(ctx)
+	if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil {
+		return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+	}
+
+	presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy)
+	if err != nil {
+		return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err)
+	}
+
+	// Update the original input with the presigned URL value.
+	if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil {
+		return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+	}
+
+	return next.HandleInitialize(ctx, input)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,475 @@
+# v1.22.4 (2024-07-18)
+
+* No change notes available for this release.
+
+# v1.22.3 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.2 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.21.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.12 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.11 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.10 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.9 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.20.8 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.7 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.6 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.20.5 (2024-04-05)
+
+* No change notes available for this release.
+
+# v1.20.4 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.3 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.2 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.19.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.19.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2024-01-18)
+
+* No change notes available for this release.
+
+# v1.18.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.18.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
+# v1.18.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.17.3 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2023-10-02)
+
+* **Feature**: Fix FIPS Endpoints in aws-us-gov.
+
+# v1.14.1 (2023-09-22)
+
+* No change notes available for this release.
+
+# v1.14.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.13.6 (2023-08-31)
+
+* No change notes available for this release.
+
+# v1.13.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.13.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.12.11 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.12.9 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.12.7 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.12.3 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2023-02-15)
+
+* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
+* **Bug Fix**: Correct error type parsing for restJson services.
+
+# v1.12.1 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.11.28 (2022-12-20)
+
+* No change notes available for this release.
+
+# v1.11.27 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.26 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.25 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.24 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.23 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.22 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.21 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.20 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.19 (2022-08-30)
+
+* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference.
+
+# v1.11.18 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.17 (2022-08-15)
+
+* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
+
+# v1.11.16 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.15 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.14 (2022-08-08)
+
+* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.13 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.12 (2022-07-11)
+
+* No change notes available for this release.
+
+# v1.11.11 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.10 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.9 (2022-06-16)
+
+* No change notes available for this release.
+
+# v1.11.8 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.7 (2022-05-26)
+
+* No change notes available for this release.
+
+# v1.11.6 (2022-05-25)
+
+* No change notes available for this release.
+
+# v1.11.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-02-24)
+
+* **Feature**: API client updated
+* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Documentation**: Updated API models
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-12-21)
+
+* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens.
+
+# v1.6.2 (2021-12-02)
+
+* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Feature**: Updated service to latest API model.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-10-21)
+
+* **Feature**: Updated  to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,627 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/aws/defaults"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/aws/retry"
+	"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+	awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+	internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+	internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+	internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+	internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+	smithy "github.com/aws/smithy-go"
+	smithyauth "github.com/aws/smithy-go/auth"
+	smithydocument "github.com/aws/smithy-go/document"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net"
+	"net/http"
+	"sync/atomic"
+	"time"
+)
+
+const ServiceID = "SSO"
+const ServiceAPIVersion = "2019-06-10"
+
+// Client provides the API client to make operations call for AWS Single Sign-On.
+type Client struct {
+	options Options
+
+	// Difference between the time reported by the server and the client
+	timeOffset *atomic.Int64
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+	options = options.Copy()
+
+	resolveDefaultLogger(&options)
+
+	setResolvedDefaultsMode(&options)
+
+	resolveRetryer(&options)
+
+	resolveHTTPClient(&options)
+
+	resolveHTTPSignerV4(&options)
+
+	resolveEndpointResolverV2(&options)
+
+	resolveAuthSchemeResolver(&options)
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	finalizeRetryMaxAttempts(&options)
+
+	ignoreAnonymousAuth(&options)
+
+	wrapWithAnonymousAuth(&options)
+
+	resolveAuthSchemes(&options)
+
+	client := &Client{
+		options: options,
+	}
+
+	initializeTimeOffsetResolver(client)
+
+	return client
+}
+
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+	return c.options.Copy()
+}
+
+func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+	ctx = middleware.ClearStackValues(ctx)
+	stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+	options := c.options.Copy()
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	finalizeOperationRetryMaxAttempts(&options, *c)
+
+	finalizeClientEndpointResolverOptions(&options)
+
+	for _, fn := range stackFns {
+		if err := fn(stack, options); err != nil {
+			return nil, metadata, err
+		}
+	}
+
+	for _, fn := range options.APIOptions {
+		if err := fn(stack); err != nil {
+			return nil, metadata, err
+		}
+	}
+
+	handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+	result, metadata, err = handler.Handle(ctx, params)
+	if err != nil {
+		err = &smithy.OperationError{
+			ServiceID:     ServiceID,
+			OperationName: opID,
+			Err:           err,
+		}
+	}
+	return result, metadata, err
+}
+
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+	return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+	return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+	return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	ctx = setOperationInput(ctx, in.Parameters)
+	return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+	if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+		return fmt.Errorf("add ResolveAuthScheme: %w", err)
+	}
+	if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+		return fmt.Errorf("add GetIdentity: %v", err)
+	}
+	if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+		return fmt.Errorf("add ResolveEndpointV2: %v", err)
+	}
+	if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
+		return fmt.Errorf("add Signing: %w", err)
+	}
+	return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+	if options.AuthSchemeResolver == nil {
+		options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+	}
+}
+
+func resolveAuthSchemes(options *Options) {
+	if options.AuthSchemes == nil {
+		options.AuthSchemes = []smithyhttp.AuthScheme{
+			internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+				Signer:     options.HTTPSignerV4,
+				Logger:     options.Logger,
+				LogSigning: options.ClientLogMode.IsSigning(),
+			}),
+		}
+	}
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+type legacyEndpointContextSetter struct {
+	LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+	return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	if m.LegacyResolver != nil {
+		ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+	}
+
+	return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+	return stack.Initialize.Add(&legacyEndpointContextSetter{
+		LegacyResolver: o.EndpointResolver,
+	}, middleware.Before)
+}
+
+func resolveDefaultLogger(o *Options) {
+	if o.Logger != nil {
+		return
+	}
+	o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+	return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+func setResolvedDefaultsMode(o *Options) {
+	if len(o.resolvedDefaultsMode) > 0 {
+		return
+	}
+
+	var mode aws.DefaultsMode
+	mode.SetFromString(string(o.DefaultsMode))
+
+	if mode == aws.DefaultsModeAuto {
+		mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
+	}
+
+	o.resolvedDefaultsMode = mode
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+	opts := Options{
+		Region:                cfg.Region,
+		DefaultsMode:          cfg.DefaultsMode,
+		RuntimeEnvironment:    cfg.RuntimeEnvironment,
+		HTTPClient:            cfg.HTTPClient,
+		Credentials:           cfg.Credentials,
+		APIOptions:            cfg.APIOptions,
+		Logger:                cfg.Logger,
+		ClientLogMode:         cfg.ClientLogMode,
+		AppID:                 cfg.AppID,
+		AccountIDEndpointMode: cfg.AccountIDEndpointMode,
+	}
+	resolveAWSRetryerProvider(cfg, &opts)
+	resolveAWSRetryMaxAttempts(cfg, &opts)
+	resolveAWSRetryMode(cfg, &opts)
+	resolveAWSEndpointResolver(cfg, &opts)
+	resolveUseDualStackEndpoint(cfg, &opts)
+	resolveUseFIPSEndpoint(cfg, &opts)
+	resolveBaseEndpoint(cfg, &opts)
+	return New(opts, optFns...)
+}
+
+func resolveHTTPClient(o *Options) {
+	var buildable *awshttp.BuildableClient
+
+	if o.HTTPClient != nil {
+		var ok bool
+		buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
+		if !ok {
+			return
+		}
+	} else {
+		buildable = awshttp.NewBuildableClient()
+	}
+
+	modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+	if err == nil {
+		buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
+			if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
+				dialer.Timeout = dialerTimeout
+			}
+		})
+
+		buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
+			if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
+				transport.TLSHandshakeTimeout = tlsHandshakeTimeout
+			}
+		})
+	}
+
+	o.HTTPClient = buildable
+}
+
+func resolveRetryer(o *Options) {
+	if o.Retryer != nil {
+		return
+	}
+
+	if len(o.RetryMode) == 0 {
+		modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+		if err == nil {
+			o.RetryMode = modeConfig.RetryMode
+		}
+	}
+	if len(o.RetryMode) == 0 {
+		o.RetryMode = aws.RetryModeStandard
+	}
+
+	var standardOptions []func(*retry.StandardOptions)
+	if v := o.RetryMaxAttempts; v != 0 {
+		standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
+			so.MaxAttempts = v
+		})
+	}
+
+	switch o.RetryMode {
+	case aws.RetryModeAdaptive:
+		var adaptiveOptions []func(*retry.AdaptiveModeOptions)
+		if len(standardOptions) != 0 {
+			adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
+				ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
+			})
+		}
+		o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
+
+	default:
+		o.Retryer = retry.NewStandard(standardOptions...)
+	}
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+	if cfg.Retryer == nil {
+		return
+	}
+	o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSRetryMode(cfg aws.Config, o *Options) {
+	if len(cfg.RetryMode) == 0 {
+		return
+	}
+	o.RetryMode = cfg.RetryMode
+}
+func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
+	if cfg.RetryMaxAttempts == 0 {
+		return
+	}
+	o.RetryMaxAttempts = cfg.RetryMaxAttempts
+}
+
+func finalizeRetryMaxAttempts(o *Options) {
+	if o.RetryMaxAttempts == 0 {
+		return
+	}
+
+	o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
+	if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
+		return
+	}
+
+	o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+	if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
+		return
+	}
+	o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
+}
+
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+	ua, err := getOrAddRequestUserAgent(stack)
+	if err != nil {
+		return err
+	}
+
+	ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)
+	if len(options.AppID) > 0 {
+		ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+	}
+
+	return nil
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+	id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+	mw, ok := stack.Build.Get(id)
+	if !ok {
+		mw = awsmiddleware.NewRequestUserAgent()
+		if err := stack.Build.Add(mw, middleware.After); err != nil {
+			return nil, err
+		}
+	}
+
+	ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+	if !ok {
+		return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+	}
+
+	return ua, nil
+}
+
+type HTTPSignerV4 interface {
+	SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+	if o.HTTPSignerV4 != nil {
+		return
+	}
+	o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+	return v4.NewSigner(func(so *v4.SignerOptions) {
+		so.Logger = o.Logger
+		so.LogSigning = o.ClientLogMode.IsSigning()
+	})
+}
+
+func addClientRequestID(stack *middleware.Stack) error {
+	return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+	return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+	return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+	return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+	return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+	o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+		ua, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+		return nil
+	})
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+	o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+		ua, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+		return nil
+	})
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+	attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+		m.LogAttempts = o.ClientLogMode.IsRetries()
+	})
+	if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil {
+		return err
+	}
+	if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+		return err
+	}
+	return nil
+}
+
+// resolves dual-stack endpoint configuration
+func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
+	if len(cfg.ConfigSources) == 0 {
+		return nil
+	}
+	value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
+	if err != nil {
+		return err
+	}
+	if found {
+		o.EndpointOptions.UseDualStackEndpoint = value
+	}
+	return nil
+}
+
+// resolves FIPS endpoint configuration
+func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
+	if len(cfg.ConfigSources) == 0 {
+		return nil
+	}
+	value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
+	if err != nil {
+		return err
+	}
+	if found {
+		o.EndpointOptions.UseFIPSEndpoint = value
+	}
+	return nil
+}
+
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+	if mode == aws.AccountIDEndpointModeDisabled {
+		return nil
+	}
+
+	if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+		return aws.String(ca.Credentials.AccountID)
+	}
+
+	return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+	mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+	if err := stack.Build.Add(&mw, middleware.After); err != nil {
+		return err
+	}
+	return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+	c.timeOffset = new(atomic.Int64)
+}
+
+func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error {
+	switch mode {
+	case aws.AccountIDEndpointModeUnset:
+	case aws.AccountIDEndpointModePreferred:
+	case aws.AccountIDEndpointModeDisabled:
+	case aws.AccountIDEndpointModeRequired:
+		if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok {
+			return fmt.Errorf("accountID is required but not set")
+		} else if ca.Credentials.AccountID == "" {
+			return fmt.Errorf("accountID is required but not set")
+		}
+	// default check in case invalid mode is configured through request config
+	default:
+		return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode)
+	}
+
+	return nil
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+	ua, err := getOrAddRequestUserAgent(stack)
+	if err != nil {
+		return err
+	}
+
+	switch options.Retryer.(type) {
+	case *retry.Standard:
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+	case *retry.AdaptiveMode:
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+	}
+	return nil
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+	return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+	return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+	return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+	return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+		LogRequest:          o.ClientLogMode.IsRequest(),
+		LogRequestWithBody:  o.ClientLogMode.IsRequestWithBody(),
+		LogResponse:         o.ClientLogMode.IsResponse(),
+		LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+	}, middleware.After)
+}
+
+type disableHTTPSMiddleware struct {
+	DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+	return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+		req.URL.Scheme = "http"
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+	return stack.Finalize.Insert(&disableHTTPSMiddleware{
+		DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+	}, "ResolveEndpointV2", middleware.After)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,153 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/service/sso/types"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the STS short-term credentials for a given role name that is assigned
+// to the user.
+func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) {
+	if params == nil {
+		params = &GetRoleCredentialsInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetRoleCredentialsOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type GetRoleCredentialsInput struct {
+
+	// The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+	// IAM Identity Center OIDC API Reference Guide.
+	//
+	// [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
+	//
+	// This member is required.
+	AccessToken *string
+
+	// The identifier for the AWS account that is assigned to the user.
+	//
+	// This member is required.
+	AccountId *string
+
+	// The friendly name of the role that is assigned to the user.
+	//
+	// This member is required.
+	RoleName *string
+
+	noSmithyDocumentSerde
+}
+
+type GetRoleCredentialsOutput struct {
+
+	// The credentials for the role that is assigned to the user.
+	RoleCredentials *types.RoleCredentials
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "GetRoleCredentials"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "GetRoleCredentials",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,251 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/service/sso/types"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists all roles that are assigned to the user for a given AWS account.
+func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) {
+	if params == nil {
+		params = &ListAccountRolesInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*ListAccountRolesOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type ListAccountRolesInput struct {
+
+	// The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+	// IAM Identity Center OIDC API Reference Guide.
+	//
+	// [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
+	//
+	// This member is required.
+	AccessToken *string
+
+	// The identifier for the AWS account that is assigned to the user.
+	//
+	// This member is required.
+	AccountId *string
+
+	// The number of items that clients can request per page.
+	MaxResults *int32
+
+	// The page token from the previous response output when you request subsequent
+	// pages.
+	NextToken *string
+
+	noSmithyDocumentSerde
+}
+
+type ListAccountRolesOutput struct {
+
+	// The page token client that is used to retrieve the list of accounts.
+	NextToken *string
+
+	// A paginated response with the list of roles and the next token if more results
+	// are available.
+	RoleList []types.RoleInfo
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountRoles"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpListAccountRolesValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles
+type ListAccountRolesPaginatorOptions struct {
+	// The number of items that clients can request per page.
+	Limit int32
+
+	// Set to true if pagination should stop if the service returns a pagination token
+	// that matches the most recent token provided to the service.
+	StopOnDuplicateToken bool
+}
+
+// ListAccountRolesPaginator is a paginator for ListAccountRoles
+type ListAccountRolesPaginator struct {
+	options   ListAccountRolesPaginatorOptions
+	client    ListAccountRolesAPIClient
+	params    *ListAccountRolesInput
+	nextToken *string
+	firstPage bool
+}
+
+// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator
+func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator {
+	if params == nil {
+		params = &ListAccountRolesInput{}
+	}
+
+	options := ListAccountRolesPaginatorOptions{}
+	if params.MaxResults != nil {
+		options.Limit = *params.MaxResults
+	}
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	return &ListAccountRolesPaginator{
+		options:   options,
+		client:    client,
+		params:    params,
+		firstPage: true,
+		nextToken: params.NextToken,
+	}
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListAccountRolesPaginator) HasMorePages() bool {
+	return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListAccountRoles page.
+func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) {
+	if !p.HasMorePages() {
+		return nil, fmt.Errorf("no more pages available")
+	}
+
+	params := *p.params
+	params.NextToken = p.nextToken
+
+	var limit *int32
+	if p.options.Limit > 0 {
+		limit = &p.options.Limit
+	}
+	params.MaxResults = limit
+
+	optFns = append([]func(*Options){
+		addIsPaginatorUserAgent,
+	}, optFns...)
+	result, err := p.client.ListAccountRoles(ctx, &params, optFns...)
+	if err != nil {
+		return nil, err
+	}
+	p.firstPage = false
+
+	prevToken := p.nextToken
+	p.nextToken = result.NextToken
+
+	if p.options.StopOnDuplicateToken &&
+		prevToken != nil &&
+		p.nextToken != nil &&
+		*prevToken == *p.nextToken {
+		p.nextToken = nil
+	}
+
+	return result, nil
+}
+
+// ListAccountRolesAPIClient is a client that implements the ListAccountRoles
+// operation.
+type ListAccountRolesAPIClient interface {
+	ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error)
+}
+
+var _ ListAccountRolesAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "ListAccountRoles",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,249 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/service/sso/types"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by
+// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity
+// Center User Guide. This operation returns a paginated response.
+//
+// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers
+func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) {
+	if params == nil {
+		params = &ListAccountsInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*ListAccountsOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type ListAccountsInput struct {
+
+	// The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+	// IAM Identity Center OIDC API Reference Guide.
+	//
+	// [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
+	//
+	// This member is required.
+	AccessToken *string
+
+	// This is the number of items clients can request per page.
+	MaxResults *int32
+
+	// (Optional) When requesting subsequent pages, this is the page token from the
+	// previous response output.
+	NextToken *string
+
+	noSmithyDocumentSerde
+}
+
+type ListAccountsOutput struct {
+
+	// A paginated response with the list of account information and the next token if
+	// more results are available.
+	AccountList []types.AccountInfo
+
+	// The page token client that is used to retrieve the list of accounts.
+	NextToken *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccounts"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpListAccountsValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ListAccountsPaginatorOptions is the paginator options for ListAccounts
+type ListAccountsPaginatorOptions struct {
+	// This is the number of items clients can request per page.
+	Limit int32
+
+	// Set to true if pagination should stop if the service returns a pagination token
+	// that matches the most recent token provided to the service.
+	StopOnDuplicateToken bool
+}
+
+// ListAccountsPaginator is a paginator for ListAccounts
+type ListAccountsPaginator struct {
+	options   ListAccountsPaginatorOptions
+	client    ListAccountsAPIClient
+	params    *ListAccountsInput
+	nextToken *string
+	firstPage bool
+}
+
+// NewListAccountsPaginator returns a new ListAccountsPaginator
+func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator {
+	if params == nil {
+		params = &ListAccountsInput{}
+	}
+
+	options := ListAccountsPaginatorOptions{}
+	if params.MaxResults != nil {
+		options.Limit = *params.MaxResults
+	}
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	return &ListAccountsPaginator{
+		options:   options,
+		client:    client,
+		params:    params,
+		firstPage: true,
+		nextToken: params.NextToken,
+	}
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListAccountsPaginator) HasMorePages() bool {
+	return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListAccounts page.
+func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) {
+	if !p.HasMorePages() {
+		return nil, fmt.Errorf("no more pages available")
+	}
+
+	params := *p.params
+	params.NextToken = p.nextToken
+
+	var limit *int32
+	if p.options.Limit > 0 {
+		limit = &p.options.Limit
+	}
+	params.MaxResults = limit
+
+	optFns = append([]func(*Options){
+		addIsPaginatorUserAgent,
+	}, optFns...)
+	result, err := p.client.ListAccounts(ctx, &params, optFns...)
+	if err != nil {
+		return nil, err
+	}
+	p.firstPage = false
+
+	prevToken := p.nextToken
+	p.nextToken = result.NextToken
+
+	if p.options.StopOnDuplicateToken &&
+		prevToken != nil &&
+		p.nextToken != nil &&
+		*prevToken == *p.nextToken {
+		p.nextToken = nil
+	}
+
+	return result, nil
+}
+
+// ListAccountsAPIClient is a client that implements the ListAccounts operation.
+type ListAccountsAPIClient interface {
+	ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error)
+}
+
+var _ ListAccountsAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "ListAccounts",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,152 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Removes the locally stored SSO tokens from the client-side cache and sends an
+// API call to the IAM Identity Center service to invalidate the corresponding
+// server-side IAM Identity Center sign in session.
+//
+// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM
+// Identity Center sign in session is used to obtain an IAM session, as specified
+// in the corresponding IAM Identity Center permission set. More specifically, IAM
+// Identity Center assumes an IAM role in the target account on behalf of the user,
+// and the corresponding temporary AWS credentials are returned to the client.
+//
+// After user logout, any existing IAM role sessions that were created by using
+// IAM Identity Center permission sets continue based on the duration configured in
+// the permission set. For more information, see [User authentications]in the IAM Identity Center User
+// Guide.
+//
+// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html
+func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) {
+	if params == nil {
+		params = &LogoutInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*LogoutOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type LogoutInput struct {
+
+	// The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+	// IAM Identity Center OIDC API Reference Guide.
+	//
+	// [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
+	//
+	// This member is required.
+	AccessToken *string
+
+	noSmithyDocumentSerde
+}
+
+type LogoutOutput struct {
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "Logout"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpLogoutValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "Logout",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,308 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	smithy "github.com/aws/smithy-go"
+	smithyauth "github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+	params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+	return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	rscheme := getResolvedAuthScheme(ctx)
+	schemeID := rscheme.Scheme.SchemeID()
+
+	if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+		if schemeID == "aws.auth#sigv4" {
+			smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+		} else if schemeID == "aws.auth#sigv4a" {
+			smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+		}
+	}
+
+	if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+		if schemeID == "aws.auth#sigv4" {
+			smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+		} else if schemeID == "aws.auth#sigv4a" {
+			smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+		}
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+	resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+	opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+	if err != nil {
+		return nil, err
+	}
+
+	opts = append(opts, &smithyauth.Option{
+		SchemeID: smithyauth.SchemeIDAnonymous,
+	})
+	return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+	if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+		return
+	}
+
+	options.AuthSchemeResolver = &withAnonymous{
+		resolver: options.AuthSchemeResolver,
+	}
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+	// The name of the operation being invoked.
+	Operation string
+
+	// The region in which the operation is being invoked.
+	Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+	params := &AuthResolverParameters{
+		Operation: operation,
+	}
+
+	bindAuthParamsRegion(ctx, params, input, options)
+
+	return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+	ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+	if overrides, ok := operationAuthOptions[params.Operation]; ok {
+		return overrides(params), nil
+	}
+	return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+	"GetRoleCredentials": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+
+	"ListAccountRoles": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+
+	"ListAccounts": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+
+	"Logout": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+	return []*smithyauth.Option{
+		{
+			SchemeID: smithyauth.SchemeIDSigV4,
+			SignerProperties: func() smithy.Properties {
+				var props smithy.Properties
+				smithyhttp.SetSigV4SigningName(&props, "awsssoportal")
+				smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+				return props
+			}(),
+		},
+	}
+}
+
+type resolveAuthSchemeMiddleware struct {
+	operation string
+	options   Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+	return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+	options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+	if err != nil {
+		return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+	}
+
+	scheme, ok := m.selectScheme(options)
+	if !ok {
+		return out, metadata, fmt.Errorf("could not select an auth scheme")
+	}
+
+	ctx = setResolvedAuthScheme(ctx, scheme)
+	return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+	for _, option := range options {
+		if option.SchemeID == smithyauth.SchemeIDAnonymous {
+			return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+		}
+
+		for _, scheme := range m.options.AuthSchemes {
+			if scheme.SchemeID() != option.SchemeID {
+				continue
+			}
+
+			if scheme.IdentityResolver(m.options) != nil {
+				return newResolvedAuthScheme(scheme, option), true
+			}
+		}
+	}
+
+	return nil, false
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+	Scheme             smithyhttp.AuthScheme
+	IdentityProperties smithy.Properties
+	SignerProperties   smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+	return &resolvedAuthScheme{
+		Scheme:             scheme,
+		IdentityProperties: option.IdentityProperties,
+		SignerProperties:   option.SignerProperties,
+	}
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+	return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+	v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+	return v
+}
+
+type getIdentityMiddleware struct {
+	options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+	return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	resolver := rscheme.Scheme.IdentityResolver(m.options)
+	if resolver == nil {
+		return out, metadata, fmt.Errorf("no identity resolver")
+	}
+
+	identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties)
+	if err != nil {
+		return out, metadata, fmt.Errorf("get identity: %w", err)
+	}
+
+	ctx = setIdentity(ctx, identity)
+	return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+	return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+	v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+	return v
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+	return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+	}
+
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	identity := getIdentity(ctx)
+	if identity == nil {
+		return out, metadata, fmt.Errorf("no identity")
+	}
+
+	signer := rscheme.Scheme.Signer()
+	if signer == nil {
+		return out, metadata, fmt.Errorf("no signer")
+	}
+
+	if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil {
+		return out, metadata, fmt.Errorf("sign request: %w", err)
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,1161 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws/protocol/restjson"
+	"github.com/aws/aws-sdk-go-v2/service/sso/types"
+	smithy "github.com/aws/smithy-go"
+	smithyio "github.com/aws/smithy-go/io"
+	"github.com/aws/smithy-go/middleware"
+	"github.com/aws/smithy-go/ptr"
+	smithytime "github.com/aws/smithy-go/time"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"io"
+	"io/ioutil"
+	"strings"
+	"time"
+)
+
+func deserializeS3Expires(v string) (*time.Time, error) {
+	t, err := smithytime.ParseHTTPDate(v)
+	if err != nil {
+		return nil, nil
+	}
+	return &t, nil
+}
+
+type awsRestjson1_deserializeOpGetRoleCredentials struct {
+}
+
+func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata)
+	}
+	output := &GetRoleCredentialsOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(response.Body, ringBuffer)
+
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	headerCode := response.Header.Get("X-Amzn-ErrorType")
+	if len(headerCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(headerCode)
+	}
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	jsonCode, message, err := restjson.GetErrorInfo(decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+	if len(headerCode) == 0 && len(jsonCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(jsonCode)
+	}
+	if len(message) != 0 {
+		errorMessage = message
+	}
+
+	switch {
+	case strings.EqualFold("InvalidRequestException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+	case strings.EqualFold("ResourceNotFoundException", errorCode):
+		return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+	case strings.EqualFold("TooManyRequestsException", errorCode):
+		return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+	case strings.EqualFold("UnauthorizedException", errorCode):
+		return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *GetRoleCredentialsOutput
+	if *v == nil {
+		sv = &GetRoleCredentialsOutput{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "roleCredentials":
+			if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil {
+				return err
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+type awsRestjson1_deserializeOpListAccountRoles struct {
+}
+
+func (*awsRestjson1_deserializeOpListAccountRoles) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata)
+	}
+	output := &ListAccountRolesOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(response.Body, ringBuffer)
+
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	headerCode := response.Header.Get("X-Amzn-ErrorType")
+	if len(headerCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(headerCode)
+	}
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	jsonCode, message, err := restjson.GetErrorInfo(decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+	if len(headerCode) == 0 && len(jsonCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(jsonCode)
+	}
+	if len(message) != 0 {
+		errorMessage = message
+	}
+
+	switch {
+	case strings.EqualFold("InvalidRequestException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+	case strings.EqualFold("ResourceNotFoundException", errorCode):
+		return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+	case strings.EqualFold("TooManyRequestsException", errorCode):
+		return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+	case strings.EqualFold("UnauthorizedException", errorCode):
+		return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *ListAccountRolesOutput
+	if *v == nil {
+		sv = &ListAccountRolesOutput{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "nextToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value)
+				}
+				sv.NextToken = ptr.String(jtv)
+			}
+
+		case "roleList":
+			if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil {
+				return err
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+type awsRestjson1_deserializeOpListAccounts struct {
+}
+
+func (*awsRestjson1_deserializeOpListAccounts) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata)
+	}
+	output := &ListAccountsOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(response.Body, ringBuffer)
+
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	headerCode := response.Header.Get("X-Amzn-ErrorType")
+	if len(headerCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(headerCode)
+	}
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	jsonCode, message, err := restjson.GetErrorInfo(decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+	if len(headerCode) == 0 && len(jsonCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(jsonCode)
+	}
+	if len(message) != 0 {
+		errorMessage = message
+	}
+
+	switch {
+	case strings.EqualFold("InvalidRequestException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+	case strings.EqualFold("ResourceNotFoundException", errorCode):
+		return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+	case strings.EqualFold("TooManyRequestsException", errorCode):
+		return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+	case strings.EqualFold("UnauthorizedException", errorCode):
+		return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *ListAccountsOutput
+	if *v == nil {
+		sv = &ListAccountsOutput{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "accountList":
+			if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil {
+				return err
+			}
+
+		case "nextToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value)
+				}
+				sv.NextToken = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+type awsRestjson1_deserializeOpLogout struct {
+}
+
+func (*awsRestjson1_deserializeOpLogout) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata)
+	}
+	output := &LogoutOutput{}
+	out.Result = output
+
+	if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+		return out, metadata, &smithy.DeserializationError{
+			Err: fmt.Errorf("failed to discard response body, %w", err),
+		}
+	}
+
+	return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	headerCode := response.Header.Get("X-Amzn-ErrorType")
+	if len(headerCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(headerCode)
+	}
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	jsonCode, message, err := restjson.GetErrorInfo(decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+	if len(headerCode) == 0 && len(jsonCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(jsonCode)
+	}
+	if len(message) != 0 {
+		errorMessage = message
+	}
+
+	switch {
+	case strings.EqualFold("InvalidRequestException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+	case strings.EqualFold("TooManyRequestsException", errorCode):
+		return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+	case strings.EqualFold("UnauthorizedException", errorCode):
+		return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidRequestException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.ResourceNotFoundException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.TooManyRequestsException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.UnauthorizedException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.AccountInfo
+	if *v == nil {
+		sv = &types.AccountInfo{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "accountId":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value)
+				}
+				sv.AccountId = ptr.String(jtv)
+			}
+
+		case "accountName":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value)
+				}
+				sv.AccountName = ptr.String(jtv)
+			}
+
+		case "emailAddress":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value)
+				}
+				sv.EmailAddress = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.([]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var cv []types.AccountInfo
+	if *v == nil {
+		cv = []types.AccountInfo{}
+	} else {
+		cv = *v
+	}
+
+	for _, value := range shape {
+		var col types.AccountInfo
+		destAddr := &col
+		if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil {
+			return err
+		}
+		col = *destAddr
+		cv = append(cv, col)
+
+	}
+	*v = cv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InvalidRequestException
+	if *v == nil {
+		sv = &types.InvalidRequestException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "message":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Message = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.ResourceNotFoundException
+	if *v == nil {
+		sv = &types.ResourceNotFoundException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "message":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Message = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.RoleCredentials
+	if *v == nil {
+		sv = &types.RoleCredentials{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "accessKeyId":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value)
+				}
+				sv.AccessKeyId = ptr.String(jtv)
+			}
+
+		case "expiration":
+			if value != nil {
+				jtv, ok := value.(json.Number)
+				if !ok {
+					return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value)
+				}
+				i64, err := jtv.Int64()
+				if err != nil {
+					return err
+				}
+				sv.Expiration = i64
+			}
+
+		case "secretAccessKey":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value)
+				}
+				sv.SecretAccessKey = ptr.String(jtv)
+			}
+
+		case "sessionToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value)
+				}
+				sv.SessionToken = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.RoleInfo
+	if *v == nil {
+		sv = &types.RoleInfo{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "accountId":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value)
+				}
+				sv.AccountId = ptr.String(jtv)
+			}
+
+		case "roleName":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value)
+				}
+				sv.RoleName = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.([]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var cv []types.RoleInfo
+	if *v == nil {
+		cv = []types.RoleInfo{}
+	} else {
+		cv = *v
+	}
+
+	for _, value := range shape {
+		var col types.RoleInfo
+		destAddr := &col
+		if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil {
+			return err
+		}
+		col = *destAddr
+		cv = append(cv, col)
+
+	}
+	*v = cv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.TooManyRequestsException
+	if *v == nil {
+		sv = &types.TooManyRequestsException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "message":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Message = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.UnauthorizedException
+	if *v == nil {
+		sv = &types.UnauthorizedException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "message":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Message = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,27 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package sso provides the API client, operations, and parameter types for AWS
+// Single Sign-On.
+//
+// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web
+// service that makes it easy for you to assign user access to IAM Identity Center
+// resources such as the AWS access portal. Users can get AWS account applications
+// and roles assigned to them and get federated into the application.
+//
+// Although AWS Single Sign-On was renamed, the sso and identitystore API
+// namespaces will continue to retain their original name for backward
+// compatibility purposes. For more information, see [IAM Identity Center rename].
+//
+// This reference guide describes the IAM Identity Center Portal operations that
+// you can call programatically and includes detailed information on data types and
+// errors.
+//
+// AWS provides SDKs that consist of libraries and sample code for various
+// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android.
+// The SDKs provide a convenient way to create programmatic access to IAM Identity
+// Center and other AWS services. For more information about the AWS SDKs,
+// including how to download and install them, see [Tools for Amazon Web Services].
+//
+// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/
+// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed
+package sso
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,550 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+	"github.com/aws/aws-sdk-go-v2/internal/endpoints"
+	"github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+	internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints"
+	smithyauth "github.com/aws/smithy-go/auth"
+	smithyendpoints "github.com/aws/smithy-go/endpoints"
+	"github.com/aws/smithy-go/middleware"
+	"github.com/aws/smithy-go/ptr"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net/http"
+	"net/url"
+	"os"
+	"strings"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+	ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+	return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+	return fn(region, options)
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+	e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+	for _, fn := range optFns {
+		fn(&e)
+	}
+
+	return EndpointResolverFunc(
+		func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+			if len(e.SigningRegion) == 0 {
+				e.SigningRegion = region
+			}
+			return e, nil
+		},
+	)
+}
+
+type ResolveEndpoint struct {
+	Resolver EndpointResolver
+	Options  EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+	return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+		return next.HandleSerialize(ctx, in)
+	}
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.Resolver == nil {
+		return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+	}
+
+	eo := m.Options
+	eo.Logger = middleware.GetLogger(ctx)
+
+	var endpoint aws.Endpoint
+	endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
+	if err != nil {
+		nf := (&aws.EndpointNotFoundError{})
+		if errors.As(err, &nf) {
+			ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+			return next.HandleSerialize(ctx, in)
+		}
+		return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+	}
+
+	req.URL, err = url.Parse(endpoint.URL)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+	}
+
+	if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+		signingName := endpoint.SigningName
+		if len(signingName) == 0 {
+			signingName = "awsssoportal"
+		}
+		ctx = awsmiddleware.SetSigningName(ctx, signingName)
+	}
+	ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+	ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+	ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+	ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+	return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+	return stack.Serialize.Insert(&ResolveEndpoint{
+		Resolver: o.EndpointResolver,
+		Options:  o.EndpointOptions,
+	}, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+	_, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+	return err
+}
+
+type wrappedEndpointResolver struct {
+	awsResolver aws.EndpointResolverWithOptions
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+	return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
+}
+
+type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
+
+func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
+	return a(service, region)
+}
+
+var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
+
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
+//
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
+	var resolver aws.EndpointResolverWithOptions
+
+	if awsResolverWithOptions != nil {
+		resolver = awsResolverWithOptions
+	} else if awsResolver != nil {
+		resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint)
+	}
+
+	return &wrappedEndpointResolver{
+		awsResolver: resolver,
+	}
+}
+
+func finalizeClientEndpointResolverOptions(options *Options) {
+	options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage()
+
+	if len(options.EndpointOptions.ResolvedRegion) == 0 {
+		const fipsInfix = "-fips-"
+		const fipsPrefix = "fips-"
+		const fipsSuffix = "-fips"
+
+		if strings.Contains(options.Region, fipsInfix) ||
+			strings.Contains(options.Region, fipsPrefix) ||
+			strings.Contains(options.Region, fipsSuffix) {
+			options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
+				options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "")
+			options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+		}
+	}
+
+}
+
+func resolveEndpointResolverV2(options *Options) {
+	if options.EndpointResolverV2 == nil {
+		options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+	}
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+	if cfg.BaseEndpoint != nil {
+		o.BaseEndpoint = cfg.BaseEndpoint
+	}
+
+	_, g := os.LookupEnv("AWS_ENDPOINT_URL")
+	_, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO")
+
+	if g && !s {
+		return
+	}
+
+	value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources)
+	if found && err == nil {
+		o.BaseEndpoint = &value
+	}
+}
+
+func bindRegion(region string) *string {
+	if region == "" {
+		return nil
+	}
+	return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+	// The AWS region used to dispatch the request.
+	//
+	// Parameter is
+	// required.
+	//
+	// AWS::Region
+	Region *string
+
+	// When true, use the dual-stack endpoint. If the configured endpoint does not
+	// support dual-stack, dispatching the request MAY return an error.
+	//
+	// Defaults to
+	// false if no value is provided.
+	//
+	// AWS::UseDualStack
+	UseDualStack *bool
+
+	// When true, send this request to the FIPS-compliant regional endpoint. If the
+	// configured endpoint does not have a FIPS compliant endpoint, dispatching the
+	// request will return an error.
+	//
+	// Defaults to false if no value is
+	// provided.
+	//
+	// AWS::UseFIPS
+	UseFIPS *bool
+
+	// Override the endpoint used to send this request
+	//
+	// Parameter is
+	// required.
+	//
+	// SDK::Endpoint
+	Endpoint *string
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+	if p.UseDualStack == nil {
+		return fmt.Errorf("parameter UseDualStack is required")
+	}
+
+	if p.UseFIPS == nil {
+		return fmt.Errorf("parameter UseFIPS is required")
+	}
+
+	return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+	if p.UseDualStack == nil {
+		p.UseDualStack = ptr.Bool(false)
+	}
+
+	if p.UseFIPS == nil {
+		p.UseFIPS = ptr.Bool(false)
+	}
+	return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+	if i < 0 || i >= len(s) {
+		return nil
+	}
+
+	v := s[i]
+	return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+	// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+	// returning the endpoint if found. Otherwise an error is returned.
+	ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+		smithyendpoints.Endpoint, error,
+	)
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+	return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+	ctx context.Context, params EndpointParameters,
+) (
+	endpoint smithyendpoints.Endpoint, err error,
+) {
+	params = params.WithDefaults()
+	if err = params.ValidateRequired(); err != nil {
+		return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+	}
+	_UseDualStack := *params.UseDualStack
+	_UseFIPS := *params.UseFIPS
+
+	if exprVal := params.Endpoint; exprVal != nil {
+		_Endpoint := *exprVal
+		_ = _Endpoint
+		if _UseFIPS == true {
+			return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+		}
+		if _UseDualStack == true {
+			return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+		}
+		uriString := _Endpoint
+
+		uri, err := url.Parse(uriString)
+		if err != nil {
+			return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+		}
+
+		return smithyendpoints.Endpoint{
+			URI:     *uri,
+			Headers: http.Header{},
+		}, nil
+	}
+	if exprVal := params.Region; exprVal != nil {
+		_Region := *exprVal
+		_ = _Region
+		if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+			_PartitionResult := *exprVal
+			_ = _PartitionResult
+			if _UseFIPS == true {
+				if _UseDualStack == true {
+					if true == _PartitionResult.SupportsFIPS {
+						if true == _PartitionResult.SupportsDualStack {
+							uriString := func() string {
+								var out strings.Builder
+								out.WriteString("https://portal.sso-fips.")
+								out.WriteString(_Region)
+								out.WriteString(".")
+								out.WriteString(_PartitionResult.DualStackDnsSuffix)
+								return out.String()
+							}()
+
+							uri, err := url.Parse(uriString)
+							if err != nil {
+								return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+							}
+
+							return smithyendpoints.Endpoint{
+								URI:     *uri,
+								Headers: http.Header{},
+							}, nil
+						}
+					}
+					return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+				}
+			}
+			if _UseFIPS == true {
+				if true == _PartitionResult.SupportsFIPS {
+					if "aws-us-gov" == _PartitionResult.Name {
+						uriString := func() string {
+							var out strings.Builder
+							out.WriteString("https://portal.sso.")
+							out.WriteString(_Region)
+							out.WriteString(".amazonaws.com")
+							return out.String()
+						}()
+
+						uri, err := url.Parse(uriString)
+						if err != nil {
+							return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+						}
+
+						return smithyendpoints.Endpoint{
+							URI:     *uri,
+							Headers: http.Header{},
+						}, nil
+					}
+					uriString := func() string {
+						var out strings.Builder
+						out.WriteString("https://portal.sso-fips.")
+						out.WriteString(_Region)
+						out.WriteString(".")
+						out.WriteString(_PartitionResult.DnsSuffix)
+						return out.String()
+					}()
+
+					uri, err := url.Parse(uriString)
+					if err != nil {
+						return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+					}
+
+					return smithyendpoints.Endpoint{
+						URI:     *uri,
+						Headers: http.Header{},
+					}, nil
+				}
+				return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+			}
+			if _UseDualStack == true {
+				if true == _PartitionResult.SupportsDualStack {
+					uriString := func() string {
+						var out strings.Builder
+						out.WriteString("https://portal.sso.")
+						out.WriteString(_Region)
+						out.WriteString(".")
+						out.WriteString(_PartitionResult.DualStackDnsSuffix)
+						return out.String()
+					}()
+
+					uri, err := url.Parse(uriString)
+					if err != nil {
+						return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+					}
+
+					return smithyendpoints.Endpoint{
+						URI:     *uri,
+						Headers: http.Header{},
+					}, nil
+				}
+				return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+			}
+			uriString := func() string {
+				var out strings.Builder
+				out.WriteString("https://portal.sso.")
+				out.WriteString(_Region)
+				out.WriteString(".")
+				out.WriteString(_PartitionResult.DnsSuffix)
+				return out.String()
+			}()
+
+			uri, err := url.Parse(uriString)
+			if err != nil {
+				return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+			}
+
+			return smithyendpoints.Endpoint{
+				URI:     *uri,
+				Headers: http.Header{},
+			}, nil
+		}
+		return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+	}
+	return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+	bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+	params := &EndpointParameters{}
+
+	params.Region = bindRegion(options.Region)
+	params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+	params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+	params.Endpoint = options.BaseEndpoint
+
+	if b, ok := input.(endpointParamsBinder); ok {
+		b.bindEndpointParams(params)
+	}
+
+	return params
+}
+
+type resolveEndpointV2Middleware struct {
+	options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+	return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+		return next.HandleFinalize(ctx, in)
+	}
+
+	if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil {
+		return out, metadata, fmt.Errorf("invalid accountID set: %w", err)
+	}
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.options.EndpointResolverV2 == nil {
+		return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+	}
+
+	params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+	endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+	}
+
+	if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+		endpt.URI.RawPath = endpt.URI.Path
+	}
+	req.URL.Scheme = endpt.URI.Scheme
+	req.URL.Host = endpt.URI.Host
+	req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+	req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+	for k := range endpt.Headers {
+		req.Header.Set(k, endpt.Headers.Get(k))
+	}
+
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+	for _, o := range opts {
+		rscheme.SignerProperties.SetAll(&o.SignerProperties)
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,35 @@
+{
+    "dependencies": {
+        "github.com/aws/aws-sdk-go-v2": "v1.4.0",
+        "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
+        "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+        "github.com/aws/smithy-go": "v1.4.0"
+    },
+    "files": [
+        "api_client.go",
+        "api_client_test.go",
+        "api_op_GetRoleCredentials.go",
+        "api_op_ListAccountRoles.go",
+        "api_op_ListAccounts.go",
+        "api_op_Logout.go",
+        "auth.go",
+        "deserializers.go",
+        "doc.go",
+        "endpoints.go",
+        "endpoints_config_test.go",
+        "endpoints_test.go",
+        "generated.json",
+        "internal/endpoints/endpoints.go",
+        "internal/endpoints/endpoints_test.go",
+        "options.go",
+        "protocol_test.go",
+        "serializers.go",
+        "snapshot_test.go",
+        "types/errors.go",
+        "types/types.go",
+        "validators.go"
+    ],
+    "go": "1.15",
+    "module": "github.com/aws/aws-sdk-go-v2/service/sso",
+    "unstable": false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package sso
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.22.4"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,566 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+	"github.com/aws/aws-sdk-go-v2/aws"
+	endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
+	"github.com/aws/smithy-go/logging"
+	"regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+	// Logger is a logging implementation that log events should be sent to.
+	Logger logging.Logger
+
+	// LogDeprecated indicates that deprecated endpoints should be logged to the
+	// provided logger.
+	LogDeprecated bool
+
+	// ResolvedRegion is used to override the region to be resolved, rather then the
+	// using the value passed to the ResolveEndpoint method. This value is used by the
+	// SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
+	// name. You must not set this value directly in your application.
+	ResolvedRegion string
+
+	// DisableHTTPS informs the resolver to return an endpoint that does not use the
+	// HTTPS scheme.
+	DisableHTTPS bool
+
+	// UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
+	UseDualStackEndpoint aws.DualStackEndpointState
+
+	// UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+	UseFIPSEndpoint aws.FIPSEndpointState
+}
+
+func (o Options) GetResolvedRegion() string {
+	return o.ResolvedRegion
+}
+
+func (o Options) GetDisableHTTPS() bool {
+	return o.DisableHTTPS
+}
+
+func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
+	return o.UseDualStackEndpoint
+}
+
+func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
+	return o.UseFIPSEndpoint
+}
+
+func transformToSharedOptions(options Options) endpoints.Options {
+	return endpoints.Options{
+		Logger:               options.Logger,
+		LogDeprecated:        options.LogDeprecated,
+		ResolvedRegion:       options.ResolvedRegion,
+		DisableHTTPS:         options.DisableHTTPS,
+		UseDualStackEndpoint: options.UseDualStackEndpoint,
+		UseFIPSEndpoint:      options.UseFIPSEndpoint,
+	}
+}
+
+// Resolver SSO endpoint resolver
+type Resolver struct {
+	partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+	if len(region) == 0 {
+		return endpoint, &aws.MissingRegionError{}
+	}
+
+	opt := transformToSharedOptions(options)
+	return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+	return &Resolver{
+		partitions: defaultPartitions,
+	}
+}
+
+var partitionRegexp = struct {
+	Aws      *regexp.Regexp
+	AwsCn    *regexp.Regexp
+	AwsIso   *regexp.Regexp
+	AwsIsoB  *regexp.Regexp
+	AwsIsoE  *regexp.Regexp
+	AwsIsoF  *regexp.Regexp
+	AwsUsGov *regexp.Regexp
+}{
+
+	Aws:      regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"),
+	AwsCn:    regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+	AwsIso:   regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+	AwsIsoB:  regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+	AwsIsoE:  regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+	AwsIsoF:  regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
+	AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+}
+
+var defaultPartitions = endpoints.Partitions{
+	{
+		ID: "aws",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "portal.sso.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "portal.sso.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.Aws,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "af-south-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.af-south-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "af-south-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-northeast-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-northeast-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-2",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-northeast-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-northeast-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-3",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-northeast-3.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-northeast-3",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-south-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-south-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-south-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-south-2",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-south-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-south-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-southeast-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-southeast-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-2",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-southeast-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-southeast-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-3",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-southeast-3.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-southeast-3",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-4",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ap-southeast-4.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-southeast-4",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ca-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ca-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ca-central-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ca-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.ca-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ca-west-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.eu-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-central-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-central-2",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.eu-central-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-central-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-north-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.eu-north-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-north-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-south-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.eu-south-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-south-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-south-2",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.eu-south-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-south-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.eu-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-west-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-west-2",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.eu-west-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-west-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-west-3",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.eu-west-3.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-west-3",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "il-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.il-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "il-central-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "me-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.me-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "me-central-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "me-south-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.me-south-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "me-south-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "sa-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.sa-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "sa-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.us-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-east-2",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.us-east-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-east-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.us-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-west-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-west-2",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.us-west-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-west-2",
+				},
+			},
+		},
+	},
+	{
+		ID: "aws-cn",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "portal.sso.{region}.api.amazonwebservices.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.amazonaws.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.api.amazonwebservices.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "portal.sso.{region}.amazonaws.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsCn,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "cn-north-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.cn-north-1.amazonaws.com.cn",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "cn-north-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "cn-northwest-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "cn-northwest-1",
+				},
+			},
+		},
+	},
+	{
+		ID: "aws-iso",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.c2s.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "portal.sso.{region}.c2s.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIso,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-iso-b",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.sc2s.sgov.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "portal.sso.{region}.sc2s.sgov.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoB,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-iso-e",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.cloud.adc-e.uk",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "portal.sso.{region}.cloud.adc-e.uk",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoE,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-iso-f",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.csp.hci.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "portal.sso.{region}.csp.hci.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoF,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-us-gov",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "portal.sso.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "portal.sso-fips.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "portal.sso.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsUsGov,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "us-gov-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.us-gov-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-gov-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-gov-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "portal.sso.us-gov-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-gov-west-1",
+				},
+			},
+		},
+	},
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,227 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+	smithyauth "github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net/http"
+)
+
+type HTTPClient interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+	// Set of options to modify how an operation is invoked. These apply to all
+	// operations invoked for this client. Use functional options on operation call to
+	// modify this list for per operation behavior.
+	APIOptions []func(*middleware.Stack) error
+
+	// Indicates how aws account ID is applied in endpoint2.0 routing
+	AccountIDEndpointMode aws.AccountIDEndpointMode
+
+	// The optional application specific identifier appended to the User-Agent header.
+	AppID string
+
+	// This endpoint will be given as input to an EndpointResolverV2. It is used for
+	// providing a custom base endpoint that is subject to modifications by the
+	// processing EndpointResolverV2.
+	BaseEndpoint *string
+
+	// Configures the events that will be sent to the configured logger.
+	ClientLogMode aws.ClientLogMode
+
+	// The credentials object to use when signing requests.
+	Credentials aws.CredentialsProvider
+
+	// The configuration DefaultsMode that the SDK should use when constructing the
+	// clients initial default settings.
+	DefaultsMode aws.DefaultsMode
+
+	// The endpoint options to be used when attempting to resolve an endpoint.
+	EndpointOptions EndpointResolverOptions
+
+	// The service endpoint resolver.
+	//
+	// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+	// value for this field will likely prevent you from using any endpoint-related
+	// service features released after the introduction of EndpointResolverV2 and
+	// BaseEndpoint.
+	//
+	// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+	// the client option BaseEndpoint instead.
+	EndpointResolver EndpointResolver
+
+	// Resolves the endpoint used for a particular service operation. This should be
+	// used over the deprecated EndpointResolver.
+	EndpointResolverV2 EndpointResolverV2
+
+	// Signature Version 4 (SigV4) Signer
+	HTTPSignerV4 HTTPSignerV4
+
+	// The logger writer interface to write logging messages to.
+	Logger logging.Logger
+
+	// The region to send requests to. (Required)
+	Region string
+
+	// RetryMaxAttempts specifies the maximum number attempts an API client will call
+	// an operation that fails with a retryable error. A value of 0 is ignored, and
+	// will not be used to configure the API client created default retryer, or modify
+	// per operation call's retry max attempts.
+	//
+	// If specified in an operation call's functional options with a value that is
+	// different than the constructed client's Options, the Client's Retryer will be
+	// wrapped to use the operation's specific RetryMaxAttempts value.
+	RetryMaxAttempts int
+
+	// RetryMode specifies the retry mode the API client will be created with, if
+	// Retryer option is not also specified.
+	//
+	// When creating a new API Clients this member will only be used if the Retryer
+	// Options member is nil. This value will be ignored if Retryer is not nil.
+	//
+	// Currently does not support per operation call overrides, may in the future.
+	RetryMode aws.RetryMode
+
+	// Retryer guides how HTTP requests should be retried in case of recoverable
+	// failures. When nil the API client will use a default retryer. The kind of
+	// default retry created by the API client can be changed with the RetryMode
+	// option.
+	Retryer aws.Retryer
+
+	// The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+	// to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+	// should not populate this structure programmatically, or rely on the values here
+	// within your applications.
+	RuntimeEnvironment aws.RuntimeEnvironment
+
+	// The initial DefaultsMode used when the client options were constructed. If the
+	// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+	// value was at that point in time.
+	//
+	// Currently does not support per operation call overrides, may in the future.
+	resolvedDefaultsMode aws.DefaultsMode
+
+	// The HTTP client to invoke API calls with. Defaults to client's default HTTP
+	// implementation if nil.
+	HTTPClient HTTPClient
+
+	// The auth scheme resolver which determines how to authenticate for each
+	// operation.
+	AuthSchemeResolver AuthSchemeResolver
+
+	// The list of auth schemes supported by the client.
+	AuthSchemes []smithyhttp.AuthScheme
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+	to := o
+	to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+	copy(to.APIOptions, o.APIOptions)
+
+	return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+	if schemeID == "aws.auth#sigv4" {
+		return getSigV4IdentityResolver(o)
+	}
+	if schemeID == "smithy.api#noAuth" {
+		return &smithyauth.AnonymousIdentityResolver{}
+	}
+	return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, optFns...)
+	}
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+	return func(o *Options) {
+		o.EndpointResolver = v
+	}
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+	return func(o *Options) {
+		o.EndpointResolverV2 = v
+	}
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+	if o.Credentials != nil {
+		return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+	}
+	return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+	fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+		out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+	) {
+		return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+	}
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+			return s.Initialize.Add(
+				middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+				middleware.Before,
+			)
+		})
+	}
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+	fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+		out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+	) {
+		return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+	}
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+			return s.Initialize.Add(
+				middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+				middleware.Before,
+			)
+		})
+	}
+}
+
+func ignoreAnonymousAuth(options *Options) {
+	if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+		options.Credentials = nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,284 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"fmt"
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/encoding/httpbinding"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type awsRestjson1_serializeOpGetRoleCredentials struct {
+}
+
+func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*GetRoleCredentialsInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	opPath, opQuery := httpbinding.SplitURI("/federation/credentials")
+	request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+	request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+	request.Method = "GET"
+	var restEncoder *httpbinding.Encoder
+	if request.URL.RawPath == "" {
+		restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	} else {
+		request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+		restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+	}
+
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error {
+	if v == nil {
+		return fmt.Errorf("unsupported serialization of nil %T", v)
+	}
+
+	if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+		locationName := "X-Amz-Sso_bearer_token"
+		encoder.SetHeader(locationName).String(*v.AccessToken)
+	}
+
+	if v.AccountId != nil {
+		encoder.SetQuery("account_id").String(*v.AccountId)
+	}
+
+	if v.RoleName != nil {
+		encoder.SetQuery("role_name").String(*v.RoleName)
+	}
+
+	return nil
+}
+
+type awsRestjson1_serializeOpListAccountRoles struct {
+}
+
+func (*awsRestjson1_serializeOpListAccountRoles) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*ListAccountRolesInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	opPath, opQuery := httpbinding.SplitURI("/assignment/roles")
+	request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+	request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+	request.Method = "GET"
+	var restEncoder *httpbinding.Encoder
+	if request.URL.RawPath == "" {
+		restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	} else {
+		request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+		restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+	}
+
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error {
+	if v == nil {
+		return fmt.Errorf("unsupported serialization of nil %T", v)
+	}
+
+	if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+		locationName := "X-Amz-Sso_bearer_token"
+		encoder.SetHeader(locationName).String(*v.AccessToken)
+	}
+
+	if v.AccountId != nil {
+		encoder.SetQuery("account_id").String(*v.AccountId)
+	}
+
+	if v.MaxResults != nil {
+		encoder.SetQuery("max_result").Integer(*v.MaxResults)
+	}
+
+	if v.NextToken != nil {
+		encoder.SetQuery("next_token").String(*v.NextToken)
+	}
+
+	return nil
+}
+
+type awsRestjson1_serializeOpListAccounts struct {
+}
+
+func (*awsRestjson1_serializeOpListAccounts) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*ListAccountsInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	opPath, opQuery := httpbinding.SplitURI("/assignment/accounts")
+	request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+	request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+	request.Method = "GET"
+	var restEncoder *httpbinding.Encoder
+	if request.URL.RawPath == "" {
+		restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	} else {
+		request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+		restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+	}
+
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error {
+	if v == nil {
+		return fmt.Errorf("unsupported serialization of nil %T", v)
+	}
+
+	if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+		locationName := "X-Amz-Sso_bearer_token"
+		encoder.SetHeader(locationName).String(*v.AccessToken)
+	}
+
+	if v.MaxResults != nil {
+		encoder.SetQuery("max_result").Integer(*v.MaxResults)
+	}
+
+	if v.NextToken != nil {
+		encoder.SetQuery("next_token").String(*v.NextToken)
+	}
+
+	return nil
+}
+
+type awsRestjson1_serializeOpLogout struct {
+}
+
+func (*awsRestjson1_serializeOpLogout) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*LogoutInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	opPath, opQuery := httpbinding.SplitURI("/logout")
+	request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+	request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+	request.Method = "POST"
+	var restEncoder *httpbinding.Encoder
+	if request.URL.RawPath == "" {
+		restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	} else {
+		request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+		restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+	}
+
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error {
+	if v == nil {
+		return fmt.Errorf("unsupported serialization of nil %T", v)
+	}
+
+	if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+		locationName := "X-Amz-Sso_bearer_token"
+		encoder.SetHeader(locationName).String(*v.AccessToken)
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,115 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+	"fmt"
+	smithy "github.com/aws/smithy-go"
+)
+
+// Indicates that a problem occurred with the input to the request. For example, a
+// required parameter might be missing or out of range.
+type InvalidRequestException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidRequestException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequestException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidRequestException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidRequestException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified resource doesn't exist.
+type ResourceNotFoundException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *ResourceNotFoundException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ResourceNotFoundException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *ResourceNotFoundException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "ResourceNotFoundException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the request is being made too frequently and is more than what
+// the server can handle.
+type TooManyRequestsException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *TooManyRequestsException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TooManyRequestsException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *TooManyRequestsException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "TooManyRequestsException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+type UnauthorizedException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *UnauthorizedException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *UnauthorizedException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *UnauthorizedException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "UnauthorizedException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,63 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+	smithydocument "github.com/aws/smithy-go/document"
+)
+
+// Provides information about your AWS account.
+type AccountInfo struct {
+
+	// The identifier of the AWS account that is assigned to the user.
+	AccountId *string
+
+	// The display name of the AWS account that is assigned to the user.
+	AccountName *string
+
+	// The email address of the AWS account that is assigned to the user.
+	EmailAddress *string
+
+	noSmithyDocumentSerde
+}
+
+// Provides information about the role credentials that are assigned to the user.
+type RoleCredentials struct {
+
+	// The identifier used for the temporary security credentials. For more
+	// information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide.
+	//
+	// [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
+	AccessKeyId *string
+
+	// The date on which temporary security credentials expire.
+	Expiration int64
+
+	// The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS
+	// IAM User Guide.
+	//
+	// [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
+	SecretAccessKey *string
+
+	// The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS
+	// IAM User Guide.
+	//
+	// [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
+	SessionToken *string
+
+	noSmithyDocumentSerde
+}
+
+// Provides information about the role that is assigned to the user.
+type RoleInfo struct {
+
+	// The identifier of the AWS account assigned to the user.
+	AccountId *string
+
+	// The friendly name of the role that is assigned to the user.
+	RoleName *string
+
+	noSmithyDocumentSerde
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,175 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+	"context"
+	"fmt"
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type validateOpGetRoleCredentials struct {
+}
+
+func (*validateOpGetRoleCredentials) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*GetRoleCredentialsInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpGetRoleCredentialsInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListAccountRoles struct {
+}
+
+func (*validateOpListAccountRoles) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*ListAccountRolesInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpListAccountRolesInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListAccounts struct {
+}
+
+func (*validateOpListAccounts) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*ListAccountsInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpListAccountsInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpLogout struct {
+}
+
+func (*validateOpLogout) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*LogoutInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpLogoutInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After)
+}
+
+func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After)
+}
+
+func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After)
+}
+
+func addOpLogoutValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpLogout{}, middleware.After)
+}
+
+func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"}
+	if v.RoleName == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("RoleName"))
+	}
+	if v.AccountId == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("AccountId"))
+	}
+	if v.AccessToken == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpListAccountRolesInput(v *ListAccountRolesInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"}
+	if v.AccessToken == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+	}
+	if v.AccountId == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("AccountId"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpListAccountsInput(v *ListAccountsInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"}
+	if v.AccessToken == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpLogoutInput(v *LogoutInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"}
+	if v.AccessToken == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,469 @@
+# v1.26.4 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2024-07-03)
+
+* No change notes available for this release.
+
+# v1.26.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.25.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.6 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.5 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.4 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.3 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.24.2 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.1 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2024-05-10)
+
+* **Feature**: Updated request parameters for PKCE support.
+
+# v1.23.5 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.23.4 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.3 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.2 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.22.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.22.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.7 (2024-01-16)
+
+* No change notes available for this release.
+
+# v1.21.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.21.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
+# v1.21.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.3 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.2 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.20.1 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-11-17)
+
+* **Feature**: Adding support for `sso-oauth:CreateTokenWithIAM`.
+
+# v1.19.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2023-09-22)
+
+* No change notes available for this release.
+
+# v1.17.0 (2023-09-20)
+
+* **Feature**: Update FIPS endpoints in aws-us-gov.
+
+# v1.16.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.15.6 (2023-09-05)
+
+* No change notes available for this release.
+
+# v1.15.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.15.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.14 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.13 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.12 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.14.11 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.10 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.14.9 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.8 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.14.7 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.6 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.5 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.4 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.14.3 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.2 (2023-02-15)
+
+* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
+* **Bug Fix**: Correct error type parsing for restJson services.
+
+# v1.14.1 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.13.11 (2022-12-19)
+
+* No change notes available for this release.
+
+# v1.13.10 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2022-09-30)
+
+* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference.
+
+# v1.13.5 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-08-25)
+
+* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action.
+
+# v1.12.14 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2022-08-08)
+
+* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.11 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2022-07-11)
+
+* No change notes available for this release.
+
+# v1.12.9 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.7 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2022-05-27)
+
+* No change notes available for this release.
+
+# v1.12.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-02-24)
+
+* **Feature**: API client updated
+* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-01-07)
+
+* **Feature**: API client updated
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.2 (2021-12-02)
+
+* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-10-21)
+
+* **Feature**: Updated  to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-10-11)
+
+* **Feature**: API client updated
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-09-17)
+
+* **Feature**: Updated API client and endpoints to latest revision.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-08-27)
+
+* **Feature**: Updated API model to latest revision.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,627 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/aws/defaults"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/aws/retry"
+	"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+	awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+	internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+	internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+	internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+	internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+	smithy "github.com/aws/smithy-go"
+	smithyauth "github.com/aws/smithy-go/auth"
+	smithydocument "github.com/aws/smithy-go/document"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net"
+	"net/http"
+	"sync/atomic"
+	"time"
+)
+
+const ServiceID = "SSO OIDC"
+const ServiceAPIVersion = "2019-06-10"
+
+// Client provides the API client to make operations call for AWS SSO OIDC.
+type Client struct {
+	options Options
+
+	// Difference between the time reported by the server and the client
+	timeOffset *atomic.Int64
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+	options = options.Copy()
+
+	resolveDefaultLogger(&options)
+
+	setResolvedDefaultsMode(&options)
+
+	resolveRetryer(&options)
+
+	resolveHTTPClient(&options)
+
+	resolveHTTPSignerV4(&options)
+
+	resolveEndpointResolverV2(&options)
+
+	resolveAuthSchemeResolver(&options)
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	finalizeRetryMaxAttempts(&options)
+
+	ignoreAnonymousAuth(&options)
+
+	wrapWithAnonymousAuth(&options)
+
+	resolveAuthSchemes(&options)
+
+	client := &Client{
+		options: options,
+	}
+
+	initializeTimeOffsetResolver(client)
+
+	return client
+}
+
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+	return c.options.Copy()
+}
+
+func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+	ctx = middleware.ClearStackValues(ctx)
+	stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+	options := c.options.Copy()
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	finalizeOperationRetryMaxAttempts(&options, *c)
+
+	finalizeClientEndpointResolverOptions(&options)
+
+	for _, fn := range stackFns {
+		if err := fn(stack, options); err != nil {
+			return nil, metadata, err
+		}
+	}
+
+	for _, fn := range options.APIOptions {
+		if err := fn(stack); err != nil {
+			return nil, metadata, err
+		}
+	}
+
+	handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+	result, metadata, err = handler.Handle(ctx, params)
+	if err != nil {
+		err = &smithy.OperationError{
+			ServiceID:     ServiceID,
+			OperationName: opID,
+			Err:           err,
+		}
+	}
+	return result, metadata, err
+}
+
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+	return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+	return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+	return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	ctx = setOperationInput(ctx, in.Parameters)
+	return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+	if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+		return fmt.Errorf("add ResolveAuthScheme: %w", err)
+	}
+	if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+		return fmt.Errorf("add GetIdentity: %v", err)
+	}
+	if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+		return fmt.Errorf("add ResolveEndpointV2: %v", err)
+	}
+	if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
+		return fmt.Errorf("add Signing: %w", err)
+	}
+	return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+	if options.AuthSchemeResolver == nil {
+		options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+	}
+}
+
+func resolveAuthSchemes(options *Options) {
+	if options.AuthSchemes == nil {
+		options.AuthSchemes = []smithyhttp.AuthScheme{
+			internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+				Signer:     options.HTTPSignerV4,
+				Logger:     options.Logger,
+				LogSigning: options.ClientLogMode.IsSigning(),
+			}),
+		}
+	}
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+type legacyEndpointContextSetter struct {
+	LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+	return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	if m.LegacyResolver != nil {
+		ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+	}
+
+	return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+	return stack.Initialize.Add(&legacyEndpointContextSetter{
+		LegacyResolver: o.EndpointResolver,
+	}, middleware.Before)
+}
+
+func resolveDefaultLogger(o *Options) {
+	if o.Logger != nil {
+		return
+	}
+	o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+	return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+func setResolvedDefaultsMode(o *Options) {
+	if len(o.resolvedDefaultsMode) > 0 {
+		return
+	}
+
+	var mode aws.DefaultsMode
+	mode.SetFromString(string(o.DefaultsMode))
+
+	if mode == aws.DefaultsModeAuto {
+		mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
+	}
+
+	o.resolvedDefaultsMode = mode
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+	opts := Options{
+		Region:                cfg.Region,
+		DefaultsMode:          cfg.DefaultsMode,
+		RuntimeEnvironment:    cfg.RuntimeEnvironment,
+		HTTPClient:            cfg.HTTPClient,
+		Credentials:           cfg.Credentials,
+		APIOptions:            cfg.APIOptions,
+		Logger:                cfg.Logger,
+		ClientLogMode:         cfg.ClientLogMode,
+		AppID:                 cfg.AppID,
+		AccountIDEndpointMode: cfg.AccountIDEndpointMode,
+	}
+	resolveAWSRetryerProvider(cfg, &opts)
+	resolveAWSRetryMaxAttempts(cfg, &opts)
+	resolveAWSRetryMode(cfg, &opts)
+	resolveAWSEndpointResolver(cfg, &opts)
+	resolveUseDualStackEndpoint(cfg, &opts)
+	resolveUseFIPSEndpoint(cfg, &opts)
+	resolveBaseEndpoint(cfg, &opts)
+	return New(opts, optFns...)
+}
+
+func resolveHTTPClient(o *Options) {
+	var buildable *awshttp.BuildableClient
+
+	if o.HTTPClient != nil {
+		var ok bool
+		buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
+		if !ok {
+			return
+		}
+	} else {
+		buildable = awshttp.NewBuildableClient()
+	}
+
+	modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+	if err == nil {
+		buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
+			if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
+				dialer.Timeout = dialerTimeout
+			}
+		})
+
+		buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
+			if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
+				transport.TLSHandshakeTimeout = tlsHandshakeTimeout
+			}
+		})
+	}
+
+	o.HTTPClient = buildable
+}
+
+func resolveRetryer(o *Options) {
+	if o.Retryer != nil {
+		return
+	}
+
+	if len(o.RetryMode) == 0 {
+		modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+		if err == nil {
+			o.RetryMode = modeConfig.RetryMode
+		}
+	}
+	if len(o.RetryMode) == 0 {
+		o.RetryMode = aws.RetryModeStandard
+	}
+
+	var standardOptions []func(*retry.StandardOptions)
+	if v := o.RetryMaxAttempts; v != 0 {
+		standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
+			so.MaxAttempts = v
+		})
+	}
+
+	switch o.RetryMode {
+	case aws.RetryModeAdaptive:
+		var adaptiveOptions []func(*retry.AdaptiveModeOptions)
+		if len(standardOptions) != 0 {
+			adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
+				ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
+			})
+		}
+		o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
+
+	default:
+		o.Retryer = retry.NewStandard(standardOptions...)
+	}
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+	if cfg.Retryer == nil {
+		return
+	}
+	o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSRetryMode(cfg aws.Config, o *Options) {
+	if len(cfg.RetryMode) == 0 {
+		return
+	}
+	o.RetryMode = cfg.RetryMode
+}
+func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
+	if cfg.RetryMaxAttempts == 0 {
+		return
+	}
+	o.RetryMaxAttempts = cfg.RetryMaxAttempts
+}
+
+func finalizeRetryMaxAttempts(o *Options) {
+	if o.RetryMaxAttempts == 0 {
+		return
+	}
+
+	o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
+	if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
+		return
+	}
+
+	o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+	if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
+		return
+	}
+	o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
+}
+
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+	ua, err := getOrAddRequestUserAgent(stack)
+	if err != nil {
+		return err
+	}
+
+	ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)
+	if len(options.AppID) > 0 {
+		ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+	}
+
+	return nil
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+	id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+	mw, ok := stack.Build.Get(id)
+	if !ok {
+		mw = awsmiddleware.NewRequestUserAgent()
+		if err := stack.Build.Add(mw, middleware.After); err != nil {
+			return nil, err
+		}
+	}
+
+	ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+	if !ok {
+		return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+	}
+
+	return ua, nil
+}
+
+type HTTPSignerV4 interface {
+	SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+	if o.HTTPSignerV4 != nil {
+		return
+	}
+	o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+	return v4.NewSigner(func(so *v4.SignerOptions) {
+		so.Logger = o.Logger
+		so.LogSigning = o.ClientLogMode.IsSigning()
+	})
+}
+
+func addClientRequestID(stack *middleware.Stack) error {
+	return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+	return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+	return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+	return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+	return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+	o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+		ua, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+		return nil
+	})
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+	o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+		ua, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+		return nil
+	})
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+	attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+		m.LogAttempts = o.ClientLogMode.IsRetries()
+	})
+	if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil {
+		return err
+	}
+	if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+		return err
+	}
+	return nil
+}
+
+// resolves dual-stack endpoint configuration
+func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
+	if len(cfg.ConfigSources) == 0 {
+		return nil
+	}
+	value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
+	if err != nil {
+		return err
+	}
+	if found {
+		o.EndpointOptions.UseDualStackEndpoint = value
+	}
+	return nil
+}
+
+// resolves FIPS endpoint configuration
+func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
+	if len(cfg.ConfigSources) == 0 {
+		return nil
+	}
+	value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
+	if err != nil {
+		return err
+	}
+	if found {
+		o.EndpointOptions.UseFIPSEndpoint = value
+	}
+	return nil
+}
+
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+	if mode == aws.AccountIDEndpointModeDisabled {
+		return nil
+	}
+
+	if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+		return aws.String(ca.Credentials.AccountID)
+	}
+
+	return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+	mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+	if err := stack.Build.Add(&mw, middleware.After); err != nil {
+		return err
+	}
+	return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+	c.timeOffset = new(atomic.Int64)
+}
+
+func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error {
+	switch mode {
+	case aws.AccountIDEndpointModeUnset:
+	case aws.AccountIDEndpointModePreferred:
+	case aws.AccountIDEndpointModeDisabled:
+	case aws.AccountIDEndpointModeRequired:
+		if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok {
+			return fmt.Errorf("accountID is required but not set")
+		} else if ca.Credentials.AccountID == "" {
+			return fmt.Errorf("accountID is required but not set")
+		}
+	// default check in case invalid mode is configured through request config
+	default:
+		return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode)
+	}
+
+	return nil
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+	ua, err := getOrAddRequestUserAgent(stack)
+	if err != nil {
+		return err
+	}
+
+	switch options.Retryer.(type) {
+	case *retry.Standard:
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+	case *retry.AdaptiveMode:
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+	}
+	return nil
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+	return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+	return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+	return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+	return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+		LogRequest:          o.ClientLogMode.IsRequest(),
+		LogRequestWithBody:  o.ClientLogMode.IsRequestWithBody(),
+		LogResponse:         o.ClientLogMode.IsResponse(),
+		LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+	}, middleware.After)
+}
+
+type disableHTTPSMiddleware struct {
+	DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+	return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+		req.URL.Scheme = "http"
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+	return stack.Finalize.Insert(&disableHTTPSMiddleware{
+		DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+	}, "ResolveEndpointV2", middleware.After)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,225 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates and returns access and refresh tokens for clients that are
+// authenticated using client secrets. The access token can be used to fetch
+// short-term credentials for the assigned AWS accounts or to access application
+// APIs using bearer authentication.
+func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) {
+	if params == nil {
+		params = &CreateTokenInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*CreateTokenOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type CreateTokenInput struct {
+
+	// The unique identifier string for the client or application. This value comes
+	// from the result of the RegisterClientAPI.
+	//
+	// This member is required.
+	ClientId *string
+
+	// A secret string generated for the client. This value should come from the
+	// persisted result of the RegisterClientAPI.
+	//
+	// This member is required.
+	ClientSecret *string
+
+	// Supports the following OAuth grant types: Device Code and Refresh Token.
+	// Specify either of the following values, depending on the grant type that you
+	// want:
+	//
+	// * Device Code - urn:ietf:params:oauth:grant-type:device_code
+	//
+	// * Refresh Token - refresh_token
+	//
+	// For information about how to obtain the device code, see the StartDeviceAuthorization topic.
+	//
+	// This member is required.
+	GrantType *string
+
+	// Used only when calling this API for the Authorization Code grant type. The
+	// short-term code is used to identify this authorization request. This grant type
+	// is currently unsupported for the CreateTokenAPI.
+	Code *string
+
+	// Used only when calling this API for the Authorization Code grant type. This
+	// value is generated by the client and presented to validate the original code
+	// challenge value the client passed at authorization time.
+	CodeVerifier *string
+
+	// Used only when calling this API for the Device Code grant type. This short-term
+	// code is used to identify this authorization request. This comes from the result
+	// of the StartDeviceAuthorizationAPI.
+	DeviceCode *string
+
+	// Used only when calling this API for the Authorization Code grant type. This
+	// value specifies the location of the client or application that has registered to
+	// receive the authorization code.
+	RedirectUri *string
+
+	// Used only when calling this API for the Refresh Token grant type. This token is
+	// used to refresh short-term tokens, such as the access token, that might expire.
+	//
+	// For more information about the features and limitations of the current IAM
+	// Identity Center OIDC implementation, see Considerations for Using this Guide in
+	// the [IAM Identity Center OIDC API Reference].
+	//
+	// [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+	RefreshToken *string
+
+	// The list of scopes for which authorization is requested. The access token that
+	// is issued is limited to the scopes that are granted. If this value is not
+	// specified, IAM Identity Center authorizes all scopes that are configured for the
+	// client during the call to RegisterClient.
+	Scope []string
+
+	noSmithyDocumentSerde
+}
+
+type CreateTokenOutput struct {
+
+	// A bearer token to access Amazon Web Services accounts and applications assigned
+	// to a user.
+	AccessToken *string
+
+	// Indicates the time in seconds when an access token will expire.
+	ExpiresIn int32
+
+	// The idToken is not implemented or supported. For more information about the
+	// features and limitations of the current IAM Identity Center OIDC implementation,
+	// see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference].
+	//
+	// A JSON Web Token (JWT) that identifies who is associated with the issued access
+	// token.
+	//
+	// [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+	IdToken *string
+
+	// A token that, if present, can be used to refresh a previously issued access
+	// token that might have expired.
+	//
+	// For more information about the features and limitations of the current IAM
+	// Identity Center OIDC implementation, see Considerations for Using this Guide in
+	// the [IAM Identity Center OIDC API Reference].
+	//
+	// [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+	RefreshToken *string
+
+	// Used to notify the client that the returned token is an access token. The
+	// supported token type is Bearer .
+	TokenType *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "CreateToken"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpCreateTokenValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "CreateToken",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,256 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates and returns access and refresh tokens for clients and applications that
+// are authenticated using IAM entities. The access token can be used to fetch
+// short-term credentials for the assigned Amazon Web Services accounts or to
+// access application APIs using bearer authentication.
+func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) {
+	if params == nil {
+		params = &CreateTokenWithIAMInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "CreateTokenWithIAM", params, optFns, c.addOperationCreateTokenWithIAMMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*CreateTokenWithIAMOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type CreateTokenWithIAMInput struct {
+
+	// The unique identifier string for the client or application. This value is an
+	// application ARN that has OAuth grants configured.
+	//
+	// This member is required.
+	ClientId *string
+
+	// Supports the following OAuth grant types: Authorization Code, Refresh Token,
+	// JWT Bearer, and Token Exchange. Specify one of the following values, depending
+	// on the grant type that you want:
+	//
+	// * Authorization Code - authorization_code
+	//
+	// * Refresh Token - refresh_token
+	//
+	// * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer
+	//
+	// * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange
+	//
+	// This member is required.
+	GrantType *string
+
+	// Used only when calling this API for the JWT Bearer grant type. This value
+	// specifies the JSON Web Token (JWT) issued by a trusted token issuer. To
+	// authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the
+	// application.
+	Assertion *string
+
+	// Used only when calling this API for the Authorization Code grant type. This
+	// short-term code is used to identify this authorization request. The code is
+	// obtained through a redirect from IAM Identity Center to a redirect URI persisted
+	// in the Authorization Code GrantOptions for the application.
+	Code *string
+
+	// Used only when calling this API for the Authorization Code grant type. This
+	// value is generated by the client and presented to validate the original code
+	// challenge value the client passed at authorization time.
+	CodeVerifier *string
+
+	// Used only when calling this API for the Authorization Code grant type. This
+	// value specifies the location of the client or application that has registered to
+	// receive the authorization code.
+	RedirectUri *string
+
+	// Used only when calling this API for the Refresh Token grant type. This token is
+	// used to refresh short-term tokens, such as the access token, that might expire.
+	//
+	// For more information about the features and limitations of the current IAM
+	// Identity Center OIDC implementation, see Considerations for Using this Guide in
+	// the [IAM Identity Center OIDC API Reference].
+	//
+	// [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+	RefreshToken *string
+
+	// Used only when calling this API for the Token Exchange grant type. This value
+	// specifies the type of token that the requester can receive. The following values
+	// are supported:
+	//
+	// * Access Token - urn:ietf:params:oauth:token-type:access_token
+	//
+	// * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+	RequestedTokenType *string
+
+	// The list of scopes for which authorization is requested. The access token that
+	// is issued is limited to the scopes that are granted. If the value is not
+	// specified, IAM Identity Center authorizes all scopes configured for the
+	// application, including the following default scopes: openid , aws ,
+	// sts:identity_context .
+	Scope []string
+
+	// Used only when calling this API for the Token Exchange grant type. This value
+	// specifies the subject of the exchange. The value of the subject token must be an
+	// access token issued by IAM Identity Center to a different client or application.
+	// The access token must have authorized scopes that indicate the requested
+	// application as a target audience.
+	SubjectToken *string
+
+	// Used only when calling this API for the Token Exchange grant type. This value
+	// specifies the type of token that is passed as the subject of the exchange. The
+	// following value is supported:
+	//
+	// * Access Token - urn:ietf:params:oauth:token-type:access_token
+	SubjectTokenType *string
+
+	noSmithyDocumentSerde
+}
+
+type CreateTokenWithIAMOutput struct {
+
+	// A bearer token to access Amazon Web Services accounts and applications assigned
+	// to a user.
+	AccessToken *string
+
+	// Indicates the time in seconds when an access token will expire.
+	ExpiresIn int32
+
+	// A JSON Web Token (JWT) that identifies the user associated with the issued
+	// access token.
+	IdToken *string
+
+	// Indicates the type of tokens that are issued by IAM Identity Center. The
+	// following values are supported:
+	//
+	// * Access Token - urn:ietf:params:oauth:token-type:access_token
+	//
+	// * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+	IssuedTokenType *string
+
+	// A token that, if present, can be used to refresh a previously issued access
+	// token that might have expired.
+	//
+	// For more information about the features and limitations of the current IAM
+	// Identity Center OIDC implementation, see Considerations for Using this Guide in
+	// the [IAM Identity Center OIDC API Reference].
+	//
+	// [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+	RefreshToken *string
+
+	// The list of scopes for which authorization is granted. The access token that is
+	// issued is limited to the scopes that are granted.
+	Scope []string
+
+	// Used to notify the requester that the returned token is an access token. The
+	// supported token type is Bearer .
+	TokenType *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateTokenWithIAM{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateTokenWithIAM{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTokenWithIAM"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addComputePayloadSHA256(stack); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opCreateTokenWithIAM(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "CreateTokenWithIAM",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,186 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Registers a client with IAM Identity Center. This allows clients to initiate
+// device authorization. The output should be persisted for reuse through many
+// authentication requests.
+func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) {
+	if params == nil {
+		params = &RegisterClientInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*RegisterClientOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type RegisterClientInput struct {
+
+	// The friendly name of the client.
+	//
+	// This member is required.
+	ClientName *string
+
+	// The type of client. The service supports only public as a client type. Anything
+	// other than public will be rejected by the service.
+	//
+	// This member is required.
+	ClientType *string
+
+	// This IAM Identity Center application ARN is used to define
+	// administrator-managed configuration for public client access to resources. At
+	// authorization, the scopes, grants, and redirect URI available to this client
+	// will be restricted by this application resource.
+	EntitledApplicationArn *string
+
+	// The list of OAuth 2.0 grant types that are defined by the client. This list is
+	// used to restrict the token granting flows available to the client.
+	GrantTypes []string
+
+	// The IAM Identity Center Issuer URL associated with an instance of IAM Identity
+	// Center. This value is needed for user access to resources through the client.
+	IssuerUrl *string
+
+	// The list of redirect URI that are defined by the client. At completion of
+	// authorization, this list is used to restrict what locations the user agent can
+	// be redirected back to.
+	RedirectUris []string
+
+	// The list of scopes that are defined by the client. Upon authorization, this
+	// list is used to restrict permissions when granting an access token.
+	Scopes []string
+
+	noSmithyDocumentSerde
+}
+
+type RegisterClientOutput struct {
+
+	// An endpoint that the client can use to request authorization.
+	AuthorizationEndpoint *string
+
+	// The unique identifier string for each client. This client uses this identifier
+	// to get authenticated by the service in subsequent calls.
+	ClientId *string
+
+	// Indicates the time at which the clientId and clientSecret were issued.
+	ClientIdIssuedAt int64
+
+	// A secret string generated for the client. The client will use this string to
+	// get authenticated by the service in subsequent calls.
+	ClientSecret *string
+
+	// Indicates the time at which the clientId and clientSecret will become invalid.
+	ClientSecretExpiresAt int64
+
+	// An endpoint that the client can use to create tokens.
+	TokenEndpoint *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterClient"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpRegisterClientValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "RegisterClient",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,176 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Initiates device authorization by requesting a pair of verification codes from
+// the authorization service.
+func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) {
+	if params == nil {
+		params = &StartDeviceAuthorizationInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*StartDeviceAuthorizationOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type StartDeviceAuthorizationInput struct {
+
+	// The unique identifier string for the client that is registered with IAM
+	// Identity Center. This value should come from the persisted result of the RegisterClientAPI
+	// operation.
+	//
+	// This member is required.
+	ClientId *string
+
+	// A secret string that is generated for the client. This value should come from
+	// the persisted result of the RegisterClientAPI operation.
+	//
+	// This member is required.
+	ClientSecret *string
+
+	// The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal]
+	// in the IAM Identity Center User Guide.
+	//
+	// [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html
+	//
+	// This member is required.
+	StartUrl *string
+
+	noSmithyDocumentSerde
+}
+
+type StartDeviceAuthorizationOutput struct {
+
+	// The short-lived code that is used by the device when polling for a session
+	// token.
+	DeviceCode *string
+
+	// Indicates the number of seconds in which the verification code will become
+	// invalid.
+	ExpiresIn int32
+
+	// Indicates the number of seconds the client must wait between attempts when
+	// polling for a session.
+	Interval int32
+
+	// A one-time user verification code. This is needed to authorize an in-use device.
+	UserCode *string
+
+	// The URI of the verification page that takes the userCode to authorize the
+	// device.
+	VerificationUri *string
+
+	// An alternate URL that the client can use to automatically launch a browser.
+	// This process skips the manual step in which the user visits the verification
+	// page and enters their code.
+	VerificationUriComplete *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "StartDeviceAuthorization"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "StartDeviceAuthorization",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,302 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	smithy "github.com/aws/smithy-go"
+	smithyauth "github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+	params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+	return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	rscheme := getResolvedAuthScheme(ctx)
+	schemeID := rscheme.Scheme.SchemeID()
+
+	if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+		if schemeID == "aws.auth#sigv4" {
+			smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+		} else if schemeID == "aws.auth#sigv4a" {
+			smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+		}
+	}
+
+	if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+		if schemeID == "aws.auth#sigv4" {
+			smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+		} else if schemeID == "aws.auth#sigv4a" {
+			smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+		}
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+	resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+	opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+	if err != nil {
+		return nil, err
+	}
+
+	opts = append(opts, &smithyauth.Option{
+		SchemeID: smithyauth.SchemeIDAnonymous,
+	})
+	return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+	if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+		return
+	}
+
+	options.AuthSchemeResolver = &withAnonymous{
+		resolver: options.AuthSchemeResolver,
+	}
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+	// The name of the operation being invoked.
+	Operation string
+
+	// The region in which the operation is being invoked.
+	Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+	params := &AuthResolverParameters{
+		Operation: operation,
+	}
+
+	bindAuthParamsRegion(ctx, params, input, options)
+
+	return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+	ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+	if overrides, ok := operationAuthOptions[params.Operation]; ok {
+		return overrides(params), nil
+	}
+	return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+	"CreateToken": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+
+	"RegisterClient": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+
+	"StartDeviceAuthorization": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+	return []*smithyauth.Option{
+		{
+			SchemeID: smithyauth.SchemeIDSigV4,
+			SignerProperties: func() smithy.Properties {
+				var props smithy.Properties
+				smithyhttp.SetSigV4SigningName(&props, "sso-oauth")
+				smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+				return props
+			}(),
+		},
+	}
+}
+
+type resolveAuthSchemeMiddleware struct {
+	operation string
+	options   Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+	return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+	options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+	if err != nil {
+		return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+	}
+
+	scheme, ok := m.selectScheme(options)
+	if !ok {
+		return out, metadata, fmt.Errorf("could not select an auth scheme")
+	}
+
+	ctx = setResolvedAuthScheme(ctx, scheme)
+	return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+	for _, option := range options {
+		if option.SchemeID == smithyauth.SchemeIDAnonymous {
+			return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+		}
+
+		for _, scheme := range m.options.AuthSchemes {
+			if scheme.SchemeID() != option.SchemeID {
+				continue
+			}
+
+			if scheme.IdentityResolver(m.options) != nil {
+				return newResolvedAuthScheme(scheme, option), true
+			}
+		}
+	}
+
+	return nil, false
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+	Scheme             smithyhttp.AuthScheme
+	IdentityProperties smithy.Properties
+	SignerProperties   smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+	return &resolvedAuthScheme{
+		Scheme:             scheme,
+		IdentityProperties: option.IdentityProperties,
+		SignerProperties:   option.SignerProperties,
+	}
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+	return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+	v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+	return v
+}
+
+type getIdentityMiddleware struct {
+	options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+	return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	resolver := rscheme.Scheme.IdentityResolver(m.options)
+	if resolver == nil {
+		return out, metadata, fmt.Errorf("no identity resolver")
+	}
+
+	identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties)
+	if err != nil {
+		return out, metadata, fmt.Errorf("get identity: %w", err)
+	}
+
+	ctx = setIdentity(ctx, identity)
+	return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+	return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+	v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+	return v
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+	return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+	}
+
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	identity := getIdentity(ctx)
+	if identity == nil {
+		return out, metadata, fmt.Errorf("no identity")
+	}
+
+	signer := rscheme.Scheme.Signer()
+	if signer == nil {
+		return out, metadata, fmt.Errorf("no signer")
+	}
+
+	if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil {
+		return out, metadata, fmt.Errorf("sign request: %w", err)
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2167 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws/protocol/restjson"
+	"github.com/aws/aws-sdk-go-v2/service/ssooidc/types"
+	smithy "github.com/aws/smithy-go"
+	smithyio "github.com/aws/smithy-go/io"
+	"github.com/aws/smithy-go/middleware"
+	"github.com/aws/smithy-go/ptr"
+	smithytime "github.com/aws/smithy-go/time"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"io"
+	"strings"
+	"time"
+)
+
+func deserializeS3Expires(v string) (*time.Time, error) {
+	t, err := smithytime.ParseHTTPDate(v)
+	if err != nil {
+		return nil, nil
+	}
+	return &t, nil
+}
+
+type awsRestjson1_deserializeOpCreateToken struct {
+}
+
+func (*awsRestjson1_deserializeOpCreateToken) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata)
+	}
+	output := &CreateTokenOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(response.Body, ringBuffer)
+
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	headerCode := response.Header.Get("X-Amzn-ErrorType")
+	if len(headerCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(headerCode)
+	}
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	jsonCode, message, err := restjson.GetErrorInfo(decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+	if len(headerCode) == 0 && len(jsonCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(jsonCode)
+	}
+	if len(message) != 0 {
+		errorMessage = message
+	}
+
+	switch {
+	case strings.EqualFold("AccessDeniedException", errorCode):
+		return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody)
+
+	case strings.EqualFold("AuthorizationPendingException", errorCode):
+		return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody)
+
+	case strings.EqualFold("ExpiredTokenException", errorCode):
+		return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody)
+
+	case strings.EqualFold("InternalServerException", errorCode):
+		return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+	case strings.EqualFold("InvalidClientException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody)
+
+	case strings.EqualFold("InvalidGrantException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody)
+
+	case strings.EqualFold("InvalidRequestException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+	case strings.EqualFold("InvalidScopeException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+
+	case strings.EqualFold("SlowDownException", errorCode):
+		return awsRestjson1_deserializeErrorSlowDownException(response, errorBody)
+
+	case strings.EqualFold("UnauthorizedClientException", errorCode):
+		return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody)
+
+	case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+		return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *CreateTokenOutput
+	if *v == nil {
+		sv = &CreateTokenOutput{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "accessToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value)
+				}
+				sv.AccessToken = ptr.String(jtv)
+			}
+
+		case "expiresIn":
+			if value != nil {
+				jtv, ok := value.(json.Number)
+				if !ok {
+					return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value)
+				}
+				i64, err := jtv.Int64()
+				if err != nil {
+					return err
+				}
+				sv.ExpiresIn = int32(i64)
+			}
+
+		case "idToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected IdToken to be of type string, got %T instead", value)
+				}
+				sv.IdToken = ptr.String(jtv)
+			}
+
+		case "refreshToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value)
+				}
+				sv.RefreshToken = ptr.String(jtv)
+			}
+
+		case "tokenType":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected TokenType to be of type string, got %T instead", value)
+				}
+				sv.TokenType = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+type awsRestjson1_deserializeOpCreateTokenWithIAM struct {
+}
+
+func (*awsRestjson1_deserializeOpCreateTokenWithIAM) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response, &metadata)
+	}
+	output := &CreateTokenWithIAMOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(response.Body, ringBuffer)
+
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	err = awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(&output, shape)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	headerCode := response.Header.Get("X-Amzn-ErrorType")
+	if len(headerCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(headerCode)
+	}
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	jsonCode, message, err := restjson.GetErrorInfo(decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+	if len(headerCode) == 0 && len(jsonCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(jsonCode)
+	}
+	if len(message) != 0 {
+		errorMessage = message
+	}
+
+	switch {
+	case strings.EqualFold("AccessDeniedException", errorCode):
+		return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody)
+
+	case strings.EqualFold("AuthorizationPendingException", errorCode):
+		return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody)
+
+	case strings.EqualFold("ExpiredTokenException", errorCode):
+		return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody)
+
+	case strings.EqualFold("InternalServerException", errorCode):
+		return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+	case strings.EqualFold("InvalidClientException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody)
+
+	case strings.EqualFold("InvalidGrantException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody)
+
+	case strings.EqualFold("InvalidRequestException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+	case strings.EqualFold("InvalidRequestRegionException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestRegionException(response, errorBody)
+
+	case strings.EqualFold("InvalidScopeException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+
+	case strings.EqualFold("SlowDownException", errorCode):
+		return awsRestjson1_deserializeErrorSlowDownException(response, errorBody)
+
+	case strings.EqualFold("UnauthorizedClientException", errorCode):
+		return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody)
+
+	case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+		return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenWithIAMOutput, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *CreateTokenWithIAMOutput
+	if *v == nil {
+		sv = &CreateTokenWithIAMOutput{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "accessToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value)
+				}
+				sv.AccessToken = ptr.String(jtv)
+			}
+
+		case "expiresIn":
+			if value != nil {
+				jtv, ok := value.(json.Number)
+				if !ok {
+					return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value)
+				}
+				i64, err := jtv.Int64()
+				if err != nil {
+					return err
+				}
+				sv.ExpiresIn = int32(i64)
+			}
+
+		case "idToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected IdToken to be of type string, got %T instead", value)
+				}
+				sv.IdToken = ptr.String(jtv)
+			}
+
+		case "issuedTokenType":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected TokenTypeURI to be of type string, got %T instead", value)
+				}
+				sv.IssuedTokenType = ptr.String(jtv)
+			}
+
+		case "refreshToken":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value)
+				}
+				sv.RefreshToken = ptr.String(jtv)
+			}
+
+		case "scope":
+			if err := awsRestjson1_deserializeDocumentScopes(&sv.Scope, value); err != nil {
+				return err
+			}
+
+		case "tokenType":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected TokenType to be of type string, got %T instead", value)
+				}
+				sv.TokenType = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+type awsRestjson1_deserializeOpRegisterClient struct {
+}
+
+func (*awsRestjson1_deserializeOpRegisterClient) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata)
+	}
+	output := &RegisterClientOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(response.Body, ringBuffer)
+
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	headerCode := response.Header.Get("X-Amzn-ErrorType")
+	if len(headerCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(headerCode)
+	}
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	jsonCode, message, err := restjson.GetErrorInfo(decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+	if len(headerCode) == 0 && len(jsonCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(jsonCode)
+	}
+	if len(message) != 0 {
+		errorMessage = message
+	}
+
+	switch {
+	case strings.EqualFold("InternalServerException", errorCode):
+		return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+	case strings.EqualFold("InvalidClientMetadataException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody)
+
+	case strings.EqualFold("InvalidRedirectUriException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody)
+
+	case strings.EqualFold("InvalidRequestException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+	case strings.EqualFold("InvalidScopeException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+
+	case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+		return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *RegisterClientOutput
+	if *v == nil {
+		sv = &RegisterClientOutput{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "authorizationEndpoint":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected URI to be of type string, got %T instead", value)
+				}
+				sv.AuthorizationEndpoint = ptr.String(jtv)
+			}
+
+		case "clientId":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ClientId to be of type string, got %T instead", value)
+				}
+				sv.ClientId = ptr.String(jtv)
+			}
+
+		case "clientIdIssuedAt":
+			if value != nil {
+				jtv, ok := value.(json.Number)
+				if !ok {
+					return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value)
+				}
+				i64, err := jtv.Int64()
+				if err != nil {
+					return err
+				}
+				sv.ClientIdIssuedAt = i64
+			}
+
+		case "clientSecret":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value)
+				}
+				sv.ClientSecret = ptr.String(jtv)
+			}
+
+		case "clientSecretExpiresAt":
+			if value != nil {
+				jtv, ok := value.(json.Number)
+				if !ok {
+					return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value)
+				}
+				i64, err := jtv.Int64()
+				if err != nil {
+					return err
+				}
+				sv.ClientSecretExpiresAt = i64
+			}
+
+		case "tokenEndpoint":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected URI to be of type string, got %T instead", value)
+				}
+				sv.TokenEndpoint = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+type awsRestjson1_deserializeOpStartDeviceAuthorization struct {
+}
+
+func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata)
+	}
+	output := &StartDeviceAuthorizationOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(response.Body, ringBuffer)
+
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	headerCode := response.Header.Get("X-Amzn-ErrorType")
+	if len(headerCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(headerCode)
+	}
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	jsonCode, message, err := restjson.GetErrorInfo(decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+	if len(headerCode) == 0 && len(jsonCode) != 0 {
+		errorCode = restjson.SanitizeErrorCode(jsonCode)
+	}
+	if len(message) != 0 {
+		errorMessage = message
+	}
+
+	switch {
+	case strings.EqualFold("InternalServerException", errorCode):
+		return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+	case strings.EqualFold("InvalidClientException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody)
+
+	case strings.EqualFold("InvalidRequestException", errorCode):
+		return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+	case strings.EqualFold("SlowDownException", errorCode):
+		return awsRestjson1_deserializeErrorSlowDownException(response, errorBody)
+
+	case strings.EqualFold("UnauthorizedClientException", errorCode):
+		return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *StartDeviceAuthorizationOutput
+	if *v == nil {
+		sv = &StartDeviceAuthorizationOutput{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "deviceCode":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value)
+				}
+				sv.DeviceCode = ptr.String(jtv)
+			}
+
+		case "expiresIn":
+			if value != nil {
+				jtv, ok := value.(json.Number)
+				if !ok {
+					return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value)
+				}
+				i64, err := jtv.Int64()
+				if err != nil {
+					return err
+				}
+				sv.ExpiresIn = int32(i64)
+			}
+
+		case "interval":
+			if value != nil {
+				jtv, ok := value.(json.Number)
+				if !ok {
+					return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value)
+				}
+				i64, err := jtv.Int64()
+				if err != nil {
+					return err
+				}
+				sv.Interval = int32(i64)
+			}
+
+		case "userCode":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected UserCode to be of type string, got %T instead", value)
+				}
+				sv.UserCode = ptr.String(jtv)
+			}
+
+		case "verificationUri":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected URI to be of type string, got %T instead", value)
+				}
+				sv.VerificationUri = ptr.String(jtv)
+			}
+
+		case "verificationUriComplete":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected URI to be of type string, got %T instead", value)
+				}
+				sv.VerificationUriComplete = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.AccessDeniedException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.AuthorizationPendingException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.ExpiredTokenException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InternalServerException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidClientException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidClientMetadataException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidGrantException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidRedirectUriException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidRequestException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorInvalidRequestRegionException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidRequestRegionException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInvalidRequestRegionException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidScopeException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.SlowDownException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.UnauthorizedClientException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.UnsupportedGrantTypeException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+	body := io.TeeReader(errorBody, ringBuffer)
+	decoder := json.NewDecoder(body)
+	decoder.UseNumber()
+	var shape interface{}
+	if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape)
+
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return err
+	}
+
+	errorBody.Seek(0, io.SeekStart)
+
+	return output
+}
+
+func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.AccessDeniedException
+	if *v == nil {
+		sv = &types.AccessDeniedException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.AuthorizationPendingException
+	if *v == nil {
+		sv = &types.AuthorizationPendingException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.ExpiredTokenException
+	if *v == nil {
+		sv = &types.ExpiredTokenException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InternalServerException
+	if *v == nil {
+		sv = &types.InternalServerException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InvalidClientException
+	if *v == nil {
+		sv = &types.InvalidClientException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InvalidClientMetadataException
+	if *v == nil {
+		sv = &types.InvalidClientMetadataException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InvalidGrantException
+	if *v == nil {
+		sv = &types.InvalidGrantException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InvalidRedirectUriException
+	if *v == nil {
+		sv = &types.InvalidRedirectUriException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InvalidRequestException
+	if *v == nil {
+		sv = &types.InvalidRequestException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRequestRegionException(v **types.InvalidRequestRegionException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InvalidRequestRegionException
+	if *v == nil {
+		sv = &types.InvalidRequestRegionException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "endpoint":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Location to be of type string, got %T instead", value)
+				}
+				sv.Endpoint = ptr.String(jtv)
+			}
+
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		case "region":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Region to be of type string, got %T instead", value)
+				}
+				sv.Region = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.InvalidScopeException
+	if *v == nil {
+		sv = &types.InvalidScopeException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentScopes(v *[]string, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.([]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var cv []string
+	if *v == nil {
+		cv = []string{}
+	} else {
+		cv = *v
+	}
+
+	for _, value := range shape {
+		var col string
+		if value != nil {
+			jtv, ok := value.(string)
+			if !ok {
+				return fmt.Errorf("expected Scope to be of type string, got %T instead", value)
+			}
+			col = jtv
+		}
+		cv = append(cv, col)
+
+	}
+	*v = cv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.SlowDownException
+	if *v == nil {
+		sv = &types.SlowDownException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.UnauthorizedClientException
+	if *v == nil {
+		sv = &types.UnauthorizedClientException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
+
+func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	if value == nil {
+		return nil
+	}
+
+	shape, ok := value.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("unexpected JSON type %v", value)
+	}
+
+	var sv *types.UnsupportedGrantTypeException
+	if *v == nil {
+		sv = &types.UnsupportedGrantTypeException{}
+	} else {
+		sv = *v
+	}
+
+	for key, value := range shape {
+		switch key {
+		case "error":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+				}
+				sv.Error_ = ptr.String(jtv)
+			}
+
+		case "error_description":
+			if value != nil {
+				jtv, ok := value.(string)
+				if !ok {
+					return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+				}
+				sv.Error_description = ptr.String(jtv)
+			}
+
+		default:
+			_, _ = key, value
+
+		}
+	}
+	*v = sv
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,46 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package ssooidc provides the API client, operations, and parameter types for
+// AWS SSO OIDC.
+//
+// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a
+// client (such as CLI or a native application) to register with IAM Identity
+// Center. The service also enables the client to fetch the user’s access token
+// upon successful authentication and authorization with IAM Identity Center.
+//
+// IAM Identity Center uses the sso and identitystore API namespaces.
+//
+// # Considerations for Using This Guide
+//
+// Before you begin using this guide, we recommend that you first review the
+// following important information about how the IAM Identity Center OIDC service
+// works.
+//
+//   - The IAM Identity Center OIDC service currently implements only the portions
+//     of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to
+//     enable single sign-on authentication with the CLI.
+//
+//   - With older versions of the CLI, the service only emits OIDC access tokens,
+//     so to obtain a new token, users must explicitly re-authenticate. To access the
+//     OIDC flow that supports token refresh and doesn’t require re-authentication,
+//     update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with
+//     support for OIDC token refresh and configurable IAM Identity Center session
+//     durations. For more information, see [Configure Amazon Web Services access portal session duration].
+//
+//   - The access tokens provided by this service grant access to all Amazon Web
+//     Services account entitlements assigned to an IAM Identity Center user, not just
+//     a particular application.
+//
+//   - The documentation in this guide does not describe the mechanism to convert
+//     the access token into Amazon Web Services Auth (“sigv4”) credentials for use
+//     with IAM-protected Amazon Web Services service endpoints. For more information,
+//     see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide.
+//
+// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity
+// Center User Guide.
+//
+// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html
+// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html
+// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628
+// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
+package ssooidc
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,550 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+	"github.com/aws/aws-sdk-go-v2/internal/endpoints"
+	"github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+	internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints"
+	smithyauth "github.com/aws/smithy-go/auth"
+	smithyendpoints "github.com/aws/smithy-go/endpoints"
+	"github.com/aws/smithy-go/middleware"
+	"github.com/aws/smithy-go/ptr"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net/http"
+	"net/url"
+	"os"
+	"strings"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+	ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+	return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+	return fn(region, options)
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+	e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+	for _, fn := range optFns {
+		fn(&e)
+	}
+
+	return EndpointResolverFunc(
+		func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+			if len(e.SigningRegion) == 0 {
+				e.SigningRegion = region
+			}
+			return e, nil
+		},
+	)
+}
+
+type ResolveEndpoint struct {
+	Resolver EndpointResolver
+	Options  EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+	return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+		return next.HandleSerialize(ctx, in)
+	}
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.Resolver == nil {
+		return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+	}
+
+	eo := m.Options
+	eo.Logger = middleware.GetLogger(ctx)
+
+	var endpoint aws.Endpoint
+	endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
+	if err != nil {
+		nf := (&aws.EndpointNotFoundError{})
+		if errors.As(err, &nf) {
+			ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+			return next.HandleSerialize(ctx, in)
+		}
+		return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+	}
+
+	req.URL, err = url.Parse(endpoint.URL)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+	}
+
+	if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+		signingName := endpoint.SigningName
+		if len(signingName) == 0 {
+			signingName = "sso-oauth"
+		}
+		ctx = awsmiddleware.SetSigningName(ctx, signingName)
+	}
+	ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+	ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+	ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+	ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+	return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+	return stack.Serialize.Insert(&ResolveEndpoint{
+		Resolver: o.EndpointResolver,
+		Options:  o.EndpointOptions,
+	}, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+	_, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+	return err
+}
+
+type wrappedEndpointResolver struct {
+	awsResolver aws.EndpointResolverWithOptions
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+	return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
+}
+
+type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
+
+func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
+	return a(service, region)
+}
+
+var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
+
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
+//
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
+	var resolver aws.EndpointResolverWithOptions
+
+	if awsResolverWithOptions != nil {
+		resolver = awsResolverWithOptions
+	} else if awsResolver != nil {
+		resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint)
+	}
+
+	return &wrappedEndpointResolver{
+		awsResolver: resolver,
+	}
+}
+
+func finalizeClientEndpointResolverOptions(options *Options) {
+	options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage()
+
+	if len(options.EndpointOptions.ResolvedRegion) == 0 {
+		const fipsInfix = "-fips-"
+		const fipsPrefix = "fips-"
+		const fipsSuffix = "-fips"
+
+		if strings.Contains(options.Region, fipsInfix) ||
+			strings.Contains(options.Region, fipsPrefix) ||
+			strings.Contains(options.Region, fipsSuffix) {
+			options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
+				options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "")
+			options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+		}
+	}
+
+}
+
+func resolveEndpointResolverV2(options *Options) {
+	if options.EndpointResolverV2 == nil {
+		options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+	}
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+	if cfg.BaseEndpoint != nil {
+		o.BaseEndpoint = cfg.BaseEndpoint
+	}
+
+	_, g := os.LookupEnv("AWS_ENDPOINT_URL")
+	_, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC")
+
+	if g && !s {
+		return
+	}
+
+	value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources)
+	if found && err == nil {
+		o.BaseEndpoint = &value
+	}
+}
+
+func bindRegion(region string) *string {
+	if region == "" {
+		return nil
+	}
+	return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+	// The AWS region used to dispatch the request.
+	//
+	// Parameter is
+	// required.
+	//
+	// AWS::Region
+	Region *string
+
+	// When true, use the dual-stack endpoint. If the configured endpoint does not
+	// support dual-stack, dispatching the request MAY return an error.
+	//
+	// Defaults to
+	// false if no value is provided.
+	//
+	// AWS::UseDualStack
+	UseDualStack *bool
+
+	// When true, send this request to the FIPS-compliant regional endpoint. If the
+	// configured endpoint does not have a FIPS compliant endpoint, dispatching the
+	// request will return an error.
+	//
+	// Defaults to false if no value is
+	// provided.
+	//
+	// AWS::UseFIPS
+	UseFIPS *bool
+
+	// Override the endpoint used to send this request
+	//
+	// Parameter is
+	// required.
+	//
+	// SDK::Endpoint
+	Endpoint *string
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+	if p.UseDualStack == nil {
+		return fmt.Errorf("parameter UseDualStack is required")
+	}
+
+	if p.UseFIPS == nil {
+		return fmt.Errorf("parameter UseFIPS is required")
+	}
+
+	return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+	if p.UseDualStack == nil {
+		p.UseDualStack = ptr.Bool(false)
+	}
+
+	if p.UseFIPS == nil {
+		p.UseFIPS = ptr.Bool(false)
+	}
+	return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+	if i < 0 || i >= len(s) {
+		return nil
+	}
+
+	v := s[i]
+	return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+	// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+	// returning the endpoint if found. Otherwise an error is returned.
+	ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+		smithyendpoints.Endpoint, error,
+	)
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+	return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+	ctx context.Context, params EndpointParameters,
+) (
+	endpoint smithyendpoints.Endpoint, err error,
+) {
+	params = params.WithDefaults()
+	if err = params.ValidateRequired(); err != nil {
+		return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+	}
+	_UseDualStack := *params.UseDualStack
+	_UseFIPS := *params.UseFIPS
+
+	if exprVal := params.Endpoint; exprVal != nil {
+		_Endpoint := *exprVal
+		_ = _Endpoint
+		if _UseFIPS == true {
+			return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+		}
+		if _UseDualStack == true {
+			return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+		}
+		uriString := _Endpoint
+
+		uri, err := url.Parse(uriString)
+		if err != nil {
+			return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+		}
+
+		return smithyendpoints.Endpoint{
+			URI:     *uri,
+			Headers: http.Header{},
+		}, nil
+	}
+	if exprVal := params.Region; exprVal != nil {
+		_Region := *exprVal
+		_ = _Region
+		if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+			_PartitionResult := *exprVal
+			_ = _PartitionResult
+			if _UseFIPS == true {
+				if _UseDualStack == true {
+					if true == _PartitionResult.SupportsFIPS {
+						if true == _PartitionResult.SupportsDualStack {
+							uriString := func() string {
+								var out strings.Builder
+								out.WriteString("https://oidc-fips.")
+								out.WriteString(_Region)
+								out.WriteString(".")
+								out.WriteString(_PartitionResult.DualStackDnsSuffix)
+								return out.String()
+							}()
+
+							uri, err := url.Parse(uriString)
+							if err != nil {
+								return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+							}
+
+							return smithyendpoints.Endpoint{
+								URI:     *uri,
+								Headers: http.Header{},
+							}, nil
+						}
+					}
+					return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+				}
+			}
+			if _UseFIPS == true {
+				if _PartitionResult.SupportsFIPS == true {
+					if _PartitionResult.Name == "aws-us-gov" {
+						uriString := func() string {
+							var out strings.Builder
+							out.WriteString("https://oidc.")
+							out.WriteString(_Region)
+							out.WriteString(".amazonaws.com")
+							return out.String()
+						}()
+
+						uri, err := url.Parse(uriString)
+						if err != nil {
+							return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+						}
+
+						return smithyendpoints.Endpoint{
+							URI:     *uri,
+							Headers: http.Header{},
+						}, nil
+					}
+					uriString := func() string {
+						var out strings.Builder
+						out.WriteString("https://oidc-fips.")
+						out.WriteString(_Region)
+						out.WriteString(".")
+						out.WriteString(_PartitionResult.DnsSuffix)
+						return out.String()
+					}()
+
+					uri, err := url.Parse(uriString)
+					if err != nil {
+						return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+					}
+
+					return smithyendpoints.Endpoint{
+						URI:     *uri,
+						Headers: http.Header{},
+					}, nil
+				}
+				return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+			}
+			if _UseDualStack == true {
+				if true == _PartitionResult.SupportsDualStack {
+					uriString := func() string {
+						var out strings.Builder
+						out.WriteString("https://oidc.")
+						out.WriteString(_Region)
+						out.WriteString(".")
+						out.WriteString(_PartitionResult.DualStackDnsSuffix)
+						return out.String()
+					}()
+
+					uri, err := url.Parse(uriString)
+					if err != nil {
+						return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+					}
+
+					return smithyendpoints.Endpoint{
+						URI:     *uri,
+						Headers: http.Header{},
+					}, nil
+				}
+				return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+			}
+			uriString := func() string {
+				var out strings.Builder
+				out.WriteString("https://oidc.")
+				out.WriteString(_Region)
+				out.WriteString(".")
+				out.WriteString(_PartitionResult.DnsSuffix)
+				return out.String()
+			}()
+
+			uri, err := url.Parse(uriString)
+			if err != nil {
+				return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+			}
+
+			return smithyendpoints.Endpoint{
+				URI:     *uri,
+				Headers: http.Header{},
+			}, nil
+		}
+		return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+	}
+	return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+	bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+	params := &EndpointParameters{}
+
+	params.Region = bindRegion(options.Region)
+	params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+	params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+	params.Endpoint = options.BaseEndpoint
+
+	if b, ok := input.(endpointParamsBinder); ok {
+		b.bindEndpointParams(params)
+	}
+
+	return params
+}
+
+type resolveEndpointV2Middleware struct {
+	options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+	return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+		return next.HandleFinalize(ctx, in)
+	}
+
+	if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil {
+		return out, metadata, fmt.Errorf("invalid accountID set: %w", err)
+	}
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.options.EndpointResolverV2 == nil {
+		return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+	}
+
+	params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+	endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+	}
+
+	if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+		endpt.URI.RawPath = endpt.URI.Path
+	}
+	req.URL.Scheme = endpt.URI.Scheme
+	req.URL.Host = endpt.URI.Host
+	req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+	req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+	for k := range endpt.Headers {
+		req.Header.Set(k, endpt.Headers.Get(k))
+	}
+
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+	for _, o := range opts {
+		rscheme.SignerProperties.SetAll(&o.SignerProperties)
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,35 @@
+{
+    "dependencies": {
+        "github.com/aws/aws-sdk-go-v2": "v1.4.0",
+        "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
+        "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+        "github.com/aws/smithy-go": "v1.4.0"
+    },
+    "files": [
+        "api_client.go",
+        "api_client_test.go",
+        "api_op_CreateToken.go",
+        "api_op_CreateTokenWithIAM.go",
+        "api_op_RegisterClient.go",
+        "api_op_StartDeviceAuthorization.go",
+        "auth.go",
+        "deserializers.go",
+        "doc.go",
+        "endpoints.go",
+        "endpoints_config_test.go",
+        "endpoints_test.go",
+        "generated.json",
+        "internal/endpoints/endpoints.go",
+        "internal/endpoints/endpoints_test.go",
+        "options.go",
+        "protocol_test.go",
+        "serializers.go",
+        "snapshot_test.go",
+        "types/errors.go",
+        "types/types.go",
+        "validators.go"
+    ],
+    "go": "1.15",
+    "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc",
+    "unstable": false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package ssooidc
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.26.4"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,566 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+	"github.com/aws/aws-sdk-go-v2/aws"
+	endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
+	"github.com/aws/smithy-go/logging"
+	"regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+	// Logger is a logging implementation that log events should be sent to.
+	Logger logging.Logger
+
+	// LogDeprecated indicates that deprecated endpoints should be logged to the
+	// provided logger.
+	LogDeprecated bool
+
+	// ResolvedRegion is used to override the region to be resolved, rather then the
+	// using the value passed to the ResolveEndpoint method. This value is used by the
+	// SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
+	// name. You must not set this value directly in your application.
+	ResolvedRegion string
+
+	// DisableHTTPS informs the resolver to return an endpoint that does not use the
+	// HTTPS scheme.
+	DisableHTTPS bool
+
+	// UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
+	UseDualStackEndpoint aws.DualStackEndpointState
+
+	// UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+	UseFIPSEndpoint aws.FIPSEndpointState
+}
+
+func (o Options) GetResolvedRegion() string {
+	return o.ResolvedRegion
+}
+
+func (o Options) GetDisableHTTPS() bool {
+	return o.DisableHTTPS
+}
+
+func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
+	return o.UseDualStackEndpoint
+}
+
+func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
+	return o.UseFIPSEndpoint
+}
+
+func transformToSharedOptions(options Options) endpoints.Options {
+	return endpoints.Options{
+		Logger:               options.Logger,
+		LogDeprecated:        options.LogDeprecated,
+		ResolvedRegion:       options.ResolvedRegion,
+		DisableHTTPS:         options.DisableHTTPS,
+		UseDualStackEndpoint: options.UseDualStackEndpoint,
+		UseFIPSEndpoint:      options.UseFIPSEndpoint,
+	}
+}
+
+// Resolver SSO OIDC endpoint resolver
+type Resolver struct {
+	partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+	if len(region) == 0 {
+		return endpoint, &aws.MissingRegionError{}
+	}
+
+	opt := transformToSharedOptions(options)
+	return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+	return &Resolver{
+		partitions: defaultPartitions,
+	}
+}
+
+var partitionRegexp = struct {
+	Aws      *regexp.Regexp
+	AwsCn    *regexp.Regexp
+	AwsIso   *regexp.Regexp
+	AwsIsoB  *regexp.Regexp
+	AwsIsoE  *regexp.Regexp
+	AwsIsoF  *regexp.Regexp
+	AwsUsGov *regexp.Regexp
+}{
+
+	Aws:      regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"),
+	AwsCn:    regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+	AwsIso:   regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+	AwsIsoB:  regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+	AwsIsoE:  regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+	AwsIsoF:  regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
+	AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+}
+
+var defaultPartitions = endpoints.Partitions{
+	{
+		ID: "aws",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "oidc.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "oidc.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.Aws,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "af-south-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.af-south-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "af-south-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-northeast-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-northeast-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-2",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-northeast-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-northeast-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-3",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-northeast-3.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-northeast-3",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-south-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-south-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-south-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-south-2",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-south-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-south-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-southeast-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-southeast-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-2",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-southeast-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-southeast-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-3",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-southeast-3.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-southeast-3",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-4",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ap-southeast-4.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ap-southeast-4",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ca-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ca-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ca-central-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ca-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.ca-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ca-west-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.eu-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-central-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-central-2",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.eu-central-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-central-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-north-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.eu-north-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-north-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-south-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.eu-south-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-south-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-south-2",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.eu-south-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-south-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.eu-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-west-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-west-2",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.eu-west-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-west-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "eu-west-3",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.eu-west-3.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "eu-west-3",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "il-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.il-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "il-central-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "me-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.me-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "me-central-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "me-south-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.me-south-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "me-south-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "sa-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.sa-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "sa-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.us-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-east-2",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.us-east-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-east-2",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.us-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-west-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-west-2",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.us-west-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-west-2",
+				},
+			},
+		},
+	},
+	{
+		ID: "aws-cn",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "oidc.{region}.api.amazonwebservices.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.amazonaws.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.api.amazonwebservices.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "oidc.{region}.amazonaws.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsCn,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "cn-north-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.cn-north-1.amazonaws.com.cn",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "cn-north-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "cn-northwest-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.cn-northwest-1.amazonaws.com.cn",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "cn-northwest-1",
+				},
+			},
+		},
+	},
+	{
+		ID: "aws-iso",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.c2s.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "oidc.{region}.c2s.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIso,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-iso-b",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.sc2s.sgov.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "oidc.{region}.sc2s.sgov.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoB,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-iso-e",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.cloud.adc-e.uk",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "oidc.{region}.cloud.adc-e.uk",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoE,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-iso-f",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.csp.hci.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "oidc.{region}.csp.hci.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoF,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-us-gov",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "oidc.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "oidc-fips.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "oidc.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsUsGov,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "us-gov-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.us-gov-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-gov-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "us-gov-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.us-gov-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-gov-west-1",
+				},
+			},
+		},
+	},
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,227 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+	smithyauth "github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net/http"
+)
+
+type HTTPClient interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+	// Set of options to modify how an operation is invoked. These apply to all
+	// operations invoked for this client. Use functional options on operation call to
+	// modify this list for per operation behavior.
+	APIOptions []func(*middleware.Stack) error
+
+	// Indicates how aws account ID is applied in endpoint2.0 routing
+	AccountIDEndpointMode aws.AccountIDEndpointMode
+
+	// The optional application specific identifier appended to the User-Agent header.
+	AppID string
+
+	// This endpoint will be given as input to an EndpointResolverV2. It is used for
+	// providing a custom base endpoint that is subject to modifications by the
+	// processing EndpointResolverV2.
+	BaseEndpoint *string
+
+	// Configures the events that will be sent to the configured logger.
+	ClientLogMode aws.ClientLogMode
+
+	// The credentials object to use when signing requests.
+	Credentials aws.CredentialsProvider
+
+	// The configuration DefaultsMode that the SDK should use when constructing the
+	// clients initial default settings.
+	DefaultsMode aws.DefaultsMode
+
+	// The endpoint options to be used when attempting to resolve an endpoint.
+	EndpointOptions EndpointResolverOptions
+
+	// The service endpoint resolver.
+	//
+	// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+	// value for this field will likely prevent you from using any endpoint-related
+	// service features released after the introduction of EndpointResolverV2 and
+	// BaseEndpoint.
+	//
+	// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+	// the client option BaseEndpoint instead.
+	EndpointResolver EndpointResolver
+
+	// Resolves the endpoint used for a particular service operation. This should be
+	// used over the deprecated EndpointResolver.
+	EndpointResolverV2 EndpointResolverV2
+
+	// Signature Version 4 (SigV4) Signer
+	HTTPSignerV4 HTTPSignerV4
+
+	// The logger writer interface to write logging messages to.
+	Logger logging.Logger
+
+	// The region to send requests to. (Required)
+	Region string
+
+	// RetryMaxAttempts specifies the maximum number attempts an API client will call
+	// an operation that fails with a retryable error. A value of 0 is ignored, and
+	// will not be used to configure the API client created default retryer, or modify
+	// per operation call's retry max attempts.
+	//
+	// If specified in an operation call's functional options with a value that is
+	// different than the constructed client's Options, the Client's Retryer will be
+	// wrapped to use the operation's specific RetryMaxAttempts value.
+	RetryMaxAttempts int
+
+	// RetryMode specifies the retry mode the API client will be created with, if
+	// Retryer option is not also specified.
+	//
+	// When creating a new API Clients this member will only be used if the Retryer
+	// Options member is nil. This value will be ignored if Retryer is not nil.
+	//
+	// Currently does not support per operation call overrides, may in the future.
+	RetryMode aws.RetryMode
+
+	// Retryer guides how HTTP requests should be retried in case of recoverable
+	// failures. When nil the API client will use a default retryer. The kind of
+	// default retry created by the API client can be changed with the RetryMode
+	// option.
+	Retryer aws.Retryer
+
+	// The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+	// to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+	// should not populate this structure programmatically, or rely on the values here
+	// within your applications.
+	RuntimeEnvironment aws.RuntimeEnvironment
+
+	// The initial DefaultsMode used when the client options were constructed. If the
+	// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+	// value was at that point in time.
+	//
+	// Currently does not support per operation call overrides, may in the future.
+	resolvedDefaultsMode aws.DefaultsMode
+
+	// The HTTP client to invoke API calls with. Defaults to client's default HTTP
+	// implementation if nil.
+	HTTPClient HTTPClient
+
+	// The auth scheme resolver which determines how to authenticate for each
+	// operation.
+	AuthSchemeResolver AuthSchemeResolver
+
+	// The list of auth schemes supported by the client.
+	AuthSchemes []smithyhttp.AuthScheme
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+	to := o
+	to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+	copy(to.APIOptions, o.APIOptions)
+
+	return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+	if schemeID == "aws.auth#sigv4" {
+		return getSigV4IdentityResolver(o)
+	}
+	if schemeID == "smithy.api#noAuth" {
+		return &smithyauth.AnonymousIdentityResolver{}
+	}
+	return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, optFns...)
+	}
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+	return func(o *Options) {
+		o.EndpointResolver = v
+	}
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+	return func(o *Options) {
+		o.EndpointResolverV2 = v
+	}
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+	if o.Credentials != nil {
+		return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+	}
+	return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+	fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+		out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+	) {
+		return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+	}
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+			return s.Initialize.Add(
+				middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+				middleware.Before,
+			)
+		})
+	}
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+	fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+		out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+	) {
+		return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+	}
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+			return s.Initialize.Add(
+				middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+				middleware.Before,
+			)
+		})
+	}
+}
+
+func ignoreAnonymousAuth(options *Options) {
+	if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+		options.Credentials = nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,487 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/encoding/httpbinding"
+	smithyjson "github.com/aws/smithy-go/encoding/json"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type awsRestjson1_serializeOpCreateToken struct {
+}
+
+func (*awsRestjson1_serializeOpCreateToken) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*CreateTokenInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	opPath, opQuery := httpbinding.SplitURI("/token")
+	request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+	request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+	request.Method = "POST"
+	var restEncoder *httpbinding.Encoder
+	if request.URL.RawPath == "" {
+		restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	} else {
+		request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+		restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+	}
+
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	restEncoder.SetHeader("Content-Type").String("application/json")
+
+	jsonEncoder := smithyjson.NewEncoder()
+	if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error {
+	if v == nil {
+		return fmt.Errorf("unsupported serialization of nil %T", v)
+	}
+
+	return nil
+}
+
+func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error {
+	object := value.Object()
+	defer object.Close()
+
+	if v.ClientId != nil {
+		ok := object.Key("clientId")
+		ok.String(*v.ClientId)
+	}
+
+	if v.ClientSecret != nil {
+		ok := object.Key("clientSecret")
+		ok.String(*v.ClientSecret)
+	}
+
+	if v.Code != nil {
+		ok := object.Key("code")
+		ok.String(*v.Code)
+	}
+
+	if v.CodeVerifier != nil {
+		ok := object.Key("codeVerifier")
+		ok.String(*v.CodeVerifier)
+	}
+
+	if v.DeviceCode != nil {
+		ok := object.Key("deviceCode")
+		ok.String(*v.DeviceCode)
+	}
+
+	if v.GrantType != nil {
+		ok := object.Key("grantType")
+		ok.String(*v.GrantType)
+	}
+
+	if v.RedirectUri != nil {
+		ok := object.Key("redirectUri")
+		ok.String(*v.RedirectUri)
+	}
+
+	if v.RefreshToken != nil {
+		ok := object.Key("refreshToken")
+		ok.String(*v.RefreshToken)
+	}
+
+	if v.Scope != nil {
+		ok := object.Key("scope")
+		if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type awsRestjson1_serializeOpCreateTokenWithIAM struct {
+}
+
+func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*CreateTokenWithIAMInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	opPath, opQuery := httpbinding.SplitURI("/token?aws_iam=t")
+	request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+	request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+	request.Method = "POST"
+	var restEncoder *httpbinding.Encoder
+	if request.URL.RawPath == "" {
+		restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	} else {
+		request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+		restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+	}
+
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	restEncoder.SetHeader("Content-Type").String("application/json")
+
+	jsonEncoder := smithyjson.NewEncoder()
+	if err := awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(input, jsonEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error {
+	if v == nil {
+		return fmt.Errorf("unsupported serialization of nil %T", v)
+	}
+
+	return nil
+}
+
+func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, value smithyjson.Value) error {
+	object := value.Object()
+	defer object.Close()
+
+	if v.Assertion != nil {
+		ok := object.Key("assertion")
+		ok.String(*v.Assertion)
+	}
+
+	if v.ClientId != nil {
+		ok := object.Key("clientId")
+		ok.String(*v.ClientId)
+	}
+
+	if v.Code != nil {
+		ok := object.Key("code")
+		ok.String(*v.Code)
+	}
+
+	if v.CodeVerifier != nil {
+		ok := object.Key("codeVerifier")
+		ok.String(*v.CodeVerifier)
+	}
+
+	if v.GrantType != nil {
+		ok := object.Key("grantType")
+		ok.String(*v.GrantType)
+	}
+
+	if v.RedirectUri != nil {
+		ok := object.Key("redirectUri")
+		ok.String(*v.RedirectUri)
+	}
+
+	if v.RefreshToken != nil {
+		ok := object.Key("refreshToken")
+		ok.String(*v.RefreshToken)
+	}
+
+	if v.RequestedTokenType != nil {
+		ok := object.Key("requestedTokenType")
+		ok.String(*v.RequestedTokenType)
+	}
+
+	if v.Scope != nil {
+		ok := object.Key("scope")
+		if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil {
+			return err
+		}
+	}
+
+	if v.SubjectToken != nil {
+		ok := object.Key("subjectToken")
+		ok.String(*v.SubjectToken)
+	}
+
+	if v.SubjectTokenType != nil {
+		ok := object.Key("subjectTokenType")
+		ok.String(*v.SubjectTokenType)
+	}
+
+	return nil
+}
+
+type awsRestjson1_serializeOpRegisterClient struct {
+}
+
+func (*awsRestjson1_serializeOpRegisterClient) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*RegisterClientInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	opPath, opQuery := httpbinding.SplitURI("/client/register")
+	request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+	request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+	request.Method = "POST"
+	var restEncoder *httpbinding.Encoder
+	if request.URL.RawPath == "" {
+		restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	} else {
+		request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+		restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+	}
+
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	restEncoder.SetHeader("Content-Type").String("application/json")
+
+	jsonEncoder := smithyjson.NewEncoder()
+	if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error {
+	if v == nil {
+		return fmt.Errorf("unsupported serialization of nil %T", v)
+	}
+
+	return nil
+}
+
+func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error {
+	object := value.Object()
+	defer object.Close()
+
+	if v.ClientName != nil {
+		ok := object.Key("clientName")
+		ok.String(*v.ClientName)
+	}
+
+	if v.ClientType != nil {
+		ok := object.Key("clientType")
+		ok.String(*v.ClientType)
+	}
+
+	if v.EntitledApplicationArn != nil {
+		ok := object.Key("entitledApplicationArn")
+		ok.String(*v.EntitledApplicationArn)
+	}
+
+	if v.GrantTypes != nil {
+		ok := object.Key("grantTypes")
+		if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil {
+			return err
+		}
+	}
+
+	if v.IssuerUrl != nil {
+		ok := object.Key("issuerUrl")
+		ok.String(*v.IssuerUrl)
+	}
+
+	if v.RedirectUris != nil {
+		ok := object.Key("redirectUris")
+		if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil {
+			return err
+		}
+	}
+
+	if v.Scopes != nil {
+		ok := object.Key("scopes")
+		if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type awsRestjson1_serializeOpStartDeviceAuthorization struct {
+}
+
+func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*StartDeviceAuthorizationInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	opPath, opQuery := httpbinding.SplitURI("/device_authorization")
+	request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+	request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+	request.Method = "POST"
+	var restEncoder *httpbinding.Encoder
+	if request.URL.RawPath == "" {
+		restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	} else {
+		request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+		restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+	}
+
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	restEncoder.SetHeader("Content-Type").String("application/json")
+
+	jsonEncoder := smithyjson.NewEncoder()
+	if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error {
+	if v == nil {
+		return fmt.Errorf("unsupported serialization of nil %T", v)
+	}
+
+	return nil
+}
+
+func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error {
+	object := value.Object()
+	defer object.Close()
+
+	if v.ClientId != nil {
+		ok := object.Key("clientId")
+		ok.String(*v.ClientId)
+	}
+
+	if v.ClientSecret != nil {
+		ok := object.Key("clientSecret")
+		ok.String(*v.ClientSecret)
+	}
+
+	if v.StartUrl != nil {
+		ok := object.Key("startUrl")
+		ok.String(*v.StartUrl)
+	}
+
+	return nil
+}
+
+func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error {
+	array := value.Array()
+	defer array.Close()
+
+	for i := range v {
+		av := array.Value()
+		av.String(v[i])
+	}
+	return nil
+}
+
+func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error {
+	array := value.Array()
+	defer array.Close()
+
+	for i := range v {
+		av := array.Value()
+		av.String(v[i])
+	}
+	return nil
+}
+
+func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error {
+	array := value.Array()
+	defer array.Close()
+
+	for i := range v {
+		av := array.Value()
+		av.String(v[i])
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,428 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+	"fmt"
+	smithy "github.com/aws/smithy-go"
+)
+
+// You do not have sufficient access to perform this action.
+type AccessDeniedException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *AccessDeniedException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *AccessDeniedException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *AccessDeniedException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "AccessDeniedException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that a request to authorize a client with an access user session
+// token is pending.
+type AuthorizationPendingException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *AuthorizationPendingException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *AuthorizationPendingException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *AuthorizationPendingException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "AuthorizationPendingException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the token issued by the service is expired and is no longer
+// valid.
+type ExpiredTokenException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *ExpiredTokenException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ExpiredTokenException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *ExpiredTokenException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "ExpiredTokenException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that an error from the service occurred while trying to process a
+// request.
+type InternalServerException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InternalServerException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InternalServerException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InternalServerException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InternalServerException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer }
+
+// Indicates that the clientId or clientSecret in the request is invalid. For
+// example, this can occur when a client sends an incorrect clientId or an expired
+// clientSecret .
+type InvalidClientException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidClientException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidClientException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidClientException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidClientException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the client information sent in the request during registration
+// is invalid.
+type InvalidClientMetadataException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidClientMetadataException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidClientMetadataException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidClientMetadataException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidClientMetadataException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that a request contains an invalid grant. This can occur if a client
+// makes a CreateTokenrequest with an invalid grant type.
+type InvalidGrantException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidGrantException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidGrantException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidGrantException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidGrantException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+type InvalidRedirectUriException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidRedirectUriException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRedirectUriException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidRedirectUriException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidRedirectUriException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that something is wrong with the input to the request. For example, a
+// required parameter might be missing or out of range.
+type InvalidRequestException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidRequestException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequestException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidRequestException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidRequestException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that a token provided as input to the request was issued by and is
+// only usable by calling IAM Identity Center endpoints in another region.
+type InvalidRequestRegionException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+	Endpoint          *string
+	Region            *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidRequestRegionException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequestRegionException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidRequestRegionException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidRequestRegionException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidRequestRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the scope provided in the request is invalid.
+type InvalidScopeException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidScopeException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidScopeException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidScopeException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidScopeException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the client is making the request too frequently and is more than
+// the service can handle.
+type SlowDownException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *SlowDownException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *SlowDownException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *SlowDownException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "SlowDownException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the client is not currently authorized to make the request. This
+// can happen when a clientId is not issued for a public client.
+type UnauthorizedClientException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *UnauthorizedClientException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *UnauthorizedClientException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *UnauthorizedClientException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "UnauthorizedClientException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the grant type in the request is not supported by the service.
+type UnsupportedGrantTypeException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	Error_            *string
+	Error_description *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *UnsupportedGrantTypeException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *UnsupportedGrantTypeException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *UnsupportedGrantTypeException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "UnsupportedGrantTypeException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,9 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+	smithydocument "github.com/aws/smithy-go/document"
+)
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,184 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+	"context"
+	"fmt"
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type validateOpCreateToken struct {
+}
+
+func (*validateOpCreateToken) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*CreateTokenInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpCreateTokenInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateTokenWithIAM struct {
+}
+
+func (*validateOpCreateTokenWithIAM) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpCreateTokenWithIAM) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*CreateTokenWithIAMInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpCreateTokenWithIAMInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpRegisterClient struct {
+}
+
+func (*validateOpRegisterClient) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*RegisterClientInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpRegisterClientInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpStartDeviceAuthorization struct {
+}
+
+func (*validateOpStartDeviceAuthorization) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*StartDeviceAuthorizationInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpStartDeviceAuthorizationInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After)
+}
+
+func addOpCreateTokenWithIAMValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpCreateTokenWithIAM{}, middleware.After)
+}
+
+func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After)
+}
+
+func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After)
+}
+
+func validateOpCreateTokenInput(v *CreateTokenInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"}
+	if v.ClientId == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("ClientId"))
+	}
+	if v.ClientSecret == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("ClientSecret"))
+	}
+	if v.GrantType == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("GrantType"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpCreateTokenWithIAMInput(v *CreateTokenWithIAMInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "CreateTokenWithIAMInput"}
+	if v.ClientId == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("ClientId"))
+	}
+	if v.GrantType == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("GrantType"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpRegisterClientInput(v *RegisterClientInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"}
+	if v.ClientName == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("ClientName"))
+	}
+	if v.ClientType == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("ClientType"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"}
+	if v.ClientId == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("ClientId"))
+	}
+	if v.ClientSecret == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("ClientSecret"))
+	}
+	if v.StartUrl == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("StartUrl"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,493 @@
+# v1.30.3 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.2 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.29.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.13 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.12 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.11 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.10 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.28.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.7 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.28.6 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.5 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.4 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.3 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-03-04)
+
+* **Bug Fix**: Update internal/presigned-url dependency for corrected API name.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.27.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.27.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.7 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2023-12-20)
+
+* No change notes available for this release.
+
+# v1.26.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.26.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication.
+
+# v1.26.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Documentation**: Documentation updates for AWS Security Token Service.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.5 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.25.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.3 (2023-11-17)
+
+* **Documentation**: API updates for the AWS Security Token Service
+
+# v1.25.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.2 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.1 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2023-10-02)
+
+* **Feature**: STS API updates for assumeRole
+
+# v1.22.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.21.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.21.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.1 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-07-25)
+
+* **Feature**: API updates for the AWS Security Token Service
+
+# v1.19.3 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.2 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.19.1 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.0 (2023-05-08)
+
+* **Feature**: Documentation updates for AWS Security Token Service.
+
+# v1.18.11 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.18.10 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.9 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.18.8 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.18.4 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization.
+
+# v1.18.2 (2023-01-25)
+
+* **Documentation**: Doc only change to update wording in a key topic
+
+# v1.18.1 (2023-01-23)
+
+* No change notes available for this release.
+
+# v1.18.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.17.7 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2022-11-22)
+
+* No change notes available for this release.
+
+# v1.17.4 (2022-11-17)
+
+* **Documentation**: Documentation updates for AWS Security Token Service.
+
+# v1.17.3 (2022-11-16)
+
+* No change notes available for this release.
+
+# v1.17.2 (2022-11-10)
+
+* No change notes available for this release.
+
+# v1.17.1 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2022-10-21)
+
+* **Feature**: Add presign functionality for sts:AssumeRole operation
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.19 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.18 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.17 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2022-08-30)
+
+* No change notes available for this release.
+
+# v1.16.14 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2022-05-16)
+
+* **Documentation**: Documentation updates for AWS Security Token Service.
+
+# v1.16.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Documentation**: Updated service client model to latest release.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2022-02-24)
+
+* **Feature**: API client updated
+* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2021-12-21)
+
+* **Feature**: Updated to latest service endpoints
+
+# v1.11.1 (2021-12-02)
+
+* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2021-11-30)
+
+* **Feature**: API client updated
+
+# v1.10.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2021-11-12)
+
+* **Feature**: Service clients now support custom endpoints that have an initial URI path defined.
+
+# v1.9.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-10-21)
+
+* **Feature**: API client updated
+* **Feature**: Updated  to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.2 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-07-15)
+
+* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
+* **Documentation**: Updated service model to latest revision.
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-06-25)
+
+* **Feature**: API client updated
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,779 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/aws/defaults"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/aws/protocol/query"
+	"github.com/aws/aws-sdk-go-v2/aws/retry"
+	"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+	awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+	internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+	internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+	internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+	internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+	acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
+	presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
+	smithy "github.com/aws/smithy-go"
+	smithyauth "github.com/aws/smithy-go/auth"
+	smithydocument "github.com/aws/smithy-go/document"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net"
+	"net/http"
+	"sync/atomic"
+	"time"
+)
+
+const ServiceID = "STS"
+const ServiceAPIVersion = "2011-06-15"
+
+// Client provides the API client to make operations call for AWS Security Token
+// Service.
+type Client struct {
+	options Options
+
+	// Difference between the time reported by the server and the client
+	timeOffset *atomic.Int64
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+	options = options.Copy()
+
+	resolveDefaultLogger(&options)
+
+	setResolvedDefaultsMode(&options)
+
+	resolveRetryer(&options)
+
+	resolveHTTPClient(&options)
+
+	resolveHTTPSignerV4(&options)
+
+	resolveEndpointResolverV2(&options)
+
+	resolveAuthSchemeResolver(&options)
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	finalizeRetryMaxAttempts(&options)
+
+	ignoreAnonymousAuth(&options)
+
+	wrapWithAnonymousAuth(&options)
+
+	resolveAuthSchemes(&options)
+
+	client := &Client{
+		options: options,
+	}
+
+	initializeTimeOffsetResolver(client)
+
+	return client
+}
+
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+	return c.options.Copy()
+}
+
+func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+	ctx = middleware.ClearStackValues(ctx)
+	stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+	options := c.options.Copy()
+
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	finalizeOperationRetryMaxAttempts(&options, *c)
+
+	finalizeClientEndpointResolverOptions(&options)
+
+	for _, fn := range stackFns {
+		if err := fn(stack, options); err != nil {
+			return nil, metadata, err
+		}
+	}
+
+	for _, fn := range options.APIOptions {
+		if err := fn(stack); err != nil {
+			return nil, metadata, err
+		}
+	}
+
+	handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+	result, metadata, err = handler.Handle(ctx, params)
+	if err != nil {
+		err = &smithy.OperationError{
+			ServiceID:     ServiceID,
+			OperationName: opID,
+			Err:           err,
+		}
+	}
+	return result, metadata, err
+}
+
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+	return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+	return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+	return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	ctx = setOperationInput(ctx, in.Parameters)
+	return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+	if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+		return fmt.Errorf("add ResolveAuthScheme: %w", err)
+	}
+	if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+		return fmt.Errorf("add GetIdentity: %v", err)
+	}
+	if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+		return fmt.Errorf("add ResolveEndpointV2: %v", err)
+	}
+	if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
+		return fmt.Errorf("add Signing: %w", err)
+	}
+	return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+	if options.AuthSchemeResolver == nil {
+		options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+	}
+}
+
+func resolveAuthSchemes(options *Options) {
+	if options.AuthSchemes == nil {
+		options.AuthSchemes = []smithyhttp.AuthScheme{
+			internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+				Signer:     options.HTTPSignerV4,
+				Logger:     options.Logger,
+				LogSigning: options.ClientLogMode.IsSigning(),
+			}),
+		}
+	}
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+type legacyEndpointContextSetter struct {
+	LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+	return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	if m.LegacyResolver != nil {
+		ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+	}
+
+	return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+	return stack.Initialize.Add(&legacyEndpointContextSetter{
+		LegacyResolver: o.EndpointResolver,
+	}, middleware.Before)
+}
+
+func resolveDefaultLogger(o *Options) {
+	if o.Logger != nil {
+		return
+	}
+	o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+	return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+func setResolvedDefaultsMode(o *Options) {
+	if len(o.resolvedDefaultsMode) > 0 {
+		return
+	}
+
+	var mode aws.DefaultsMode
+	mode.SetFromString(string(o.DefaultsMode))
+
+	if mode == aws.DefaultsModeAuto {
+		mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
+	}
+
+	o.resolvedDefaultsMode = mode
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+	opts := Options{
+		Region:                cfg.Region,
+		DefaultsMode:          cfg.DefaultsMode,
+		RuntimeEnvironment:    cfg.RuntimeEnvironment,
+		HTTPClient:            cfg.HTTPClient,
+		Credentials:           cfg.Credentials,
+		APIOptions:            cfg.APIOptions,
+		Logger:                cfg.Logger,
+		ClientLogMode:         cfg.ClientLogMode,
+		AppID:                 cfg.AppID,
+		AccountIDEndpointMode: cfg.AccountIDEndpointMode,
+	}
+	resolveAWSRetryerProvider(cfg, &opts)
+	resolveAWSRetryMaxAttempts(cfg, &opts)
+	resolveAWSRetryMode(cfg, &opts)
+	resolveAWSEndpointResolver(cfg, &opts)
+	resolveUseDualStackEndpoint(cfg, &opts)
+	resolveUseFIPSEndpoint(cfg, &opts)
+	resolveBaseEndpoint(cfg, &opts)
+	return New(opts, optFns...)
+}
+
+func resolveHTTPClient(o *Options) {
+	var buildable *awshttp.BuildableClient
+
+	if o.HTTPClient != nil {
+		var ok bool
+		buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
+		if !ok {
+			return
+		}
+	} else {
+		buildable = awshttp.NewBuildableClient()
+	}
+
+	modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+	if err == nil {
+		buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
+			if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
+				dialer.Timeout = dialerTimeout
+			}
+		})
+
+		buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
+			if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
+				transport.TLSHandshakeTimeout = tlsHandshakeTimeout
+			}
+		})
+	}
+
+	o.HTTPClient = buildable
+}
+
+func resolveRetryer(o *Options) {
+	if o.Retryer != nil {
+		return
+	}
+
+	if len(o.RetryMode) == 0 {
+		modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+		if err == nil {
+			o.RetryMode = modeConfig.RetryMode
+		}
+	}
+	if len(o.RetryMode) == 0 {
+		o.RetryMode = aws.RetryModeStandard
+	}
+
+	var standardOptions []func(*retry.StandardOptions)
+	if v := o.RetryMaxAttempts; v != 0 {
+		standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
+			so.MaxAttempts = v
+		})
+	}
+
+	switch o.RetryMode {
+	case aws.RetryModeAdaptive:
+		var adaptiveOptions []func(*retry.AdaptiveModeOptions)
+		if len(standardOptions) != 0 {
+			adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
+				ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
+			})
+		}
+		o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
+
+	default:
+		o.Retryer = retry.NewStandard(standardOptions...)
+	}
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+	if cfg.Retryer == nil {
+		return
+	}
+	o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSRetryMode(cfg aws.Config, o *Options) {
+	if len(cfg.RetryMode) == 0 {
+		return
+	}
+	o.RetryMode = cfg.RetryMode
+}
+func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
+	if cfg.RetryMaxAttempts == 0 {
+		return
+	}
+	o.RetryMaxAttempts = cfg.RetryMaxAttempts
+}
+
+func finalizeRetryMaxAttempts(o *Options) {
+	if o.RetryMaxAttempts == 0 {
+		return
+	}
+
+	o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
+	if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
+		return
+	}
+
+	o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+	if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
+		return
+	}
+	o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
+}
+
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+	ua, err := getOrAddRequestUserAgent(stack)
+	if err != nil {
+		return err
+	}
+
+	ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)
+	if len(options.AppID) > 0 {
+		ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+	}
+
+	return nil
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+	id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+	mw, ok := stack.Build.Get(id)
+	if !ok {
+		mw = awsmiddleware.NewRequestUserAgent()
+		if err := stack.Build.Add(mw, middleware.After); err != nil {
+			return nil, err
+		}
+	}
+
+	ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+	if !ok {
+		return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+	}
+
+	return ua, nil
+}
+
+type HTTPSignerV4 interface {
+	SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+	if o.HTTPSignerV4 != nil {
+		return
+	}
+	o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+	return v4.NewSigner(func(so *v4.SignerOptions) {
+		so.Logger = o.Logger
+		so.LogSigning = o.ClientLogMode.IsSigning()
+	})
+}
+
+func addClientRequestID(stack *middleware.Stack) error {
+	return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+	return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+	return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+	return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+	return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+	o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+		ua, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+		return nil
+	})
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+	o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+		ua, err := getOrAddRequestUserAgent(stack)
+		if err != nil {
+			return err
+		}
+
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+		return nil
+	})
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+	attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+		m.LogAttempts = o.ClientLogMode.IsRetries()
+	})
+	if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil {
+		return err
+	}
+	if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+		return err
+	}
+	return nil
+}
+
+// resolves dual-stack endpoint configuration
+func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
+	if len(cfg.ConfigSources) == 0 {
+		return nil
+	}
+	value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
+	if err != nil {
+		return err
+	}
+	if found {
+		o.EndpointOptions.UseDualStackEndpoint = value
+	}
+	return nil
+}
+
+// resolves FIPS endpoint configuration
+func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
+	if len(cfg.ConfigSources) == 0 {
+		return nil
+	}
+	value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
+	if err != nil {
+		return err
+	}
+	if found {
+		o.EndpointOptions.UseFIPSEndpoint = value
+	}
+	return nil
+}
+
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+	if mode == aws.AccountIDEndpointModeDisabled {
+		return nil
+	}
+
+	if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+		return aws.String(ca.Credentials.AccountID)
+	}
+
+	return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+	mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+	if err := stack.Build.Add(&mw, middleware.After); err != nil {
+		return err
+	}
+	return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+	c.timeOffset = new(atomic.Int64)
+}
+
+func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error {
+	switch mode {
+	case aws.AccountIDEndpointModeUnset:
+	case aws.AccountIDEndpointModePreferred:
+	case aws.AccountIDEndpointModeDisabled:
+	case aws.AccountIDEndpointModeRequired:
+		if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok {
+			return fmt.Errorf("accountID is required but not set")
+		} else if ca.Credentials.AccountID == "" {
+			return fmt.Errorf("accountID is required but not set")
+		}
+	// default check in case invalid mode is configured through request config
+	default:
+		return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode)
+	}
+
+	return nil
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+	ua, err := getOrAddRequestUserAgent(stack)
+	if err != nil {
+		return err
+	}
+
+	switch options.Retryer.(type) {
+	case *retry.Standard:
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+	case *retry.AdaptiveMode:
+		ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+	}
+	return nil
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+	return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+	return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+	return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
+}
+
+// HTTPPresignerV4 represents presigner interface used by presign url client
+type HTTPPresignerV4 interface {
+	PresignHTTP(
+		ctx context.Context, credentials aws.Credentials, r *http.Request,
+		payloadHash string, service string, region string, signingTime time.Time,
+		optFns ...func(*v4.SignerOptions),
+	) (url string, signedHeader http.Header, err error)
+}
+
+// PresignOptions represents the presign client options
+type PresignOptions struct {
+
+	// ClientOptions are list of functional options to mutate client options used by
+	// the presign client.
+	ClientOptions []func(*Options)
+
+	// Presigner is the presigner used by the presign url client
+	Presigner HTTPPresignerV4
+}
+
+func (o PresignOptions) copy() PresignOptions {
+	clientOptions := make([]func(*Options), len(o.ClientOptions))
+	copy(clientOptions, o.ClientOptions)
+	o.ClientOptions = clientOptions
+	return o
+}
+
+// WithPresignClientFromClientOptions is a helper utility to retrieve a function
+// that takes PresignOption as input
+func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) {
+	return withPresignClientFromClientOptions(optFns).options
+}
+
+type withPresignClientFromClientOptions []func(*Options)
+
+func (w withPresignClientFromClientOptions) options(o *PresignOptions) {
+	o.ClientOptions = append(o.ClientOptions, w...)
+}
+
+// PresignClient represents the presign url client
+type PresignClient struct {
+	client  *Client
+	options PresignOptions
+}
+
+// NewPresignClient generates a presign client using provided API Client and
+// presign options
+func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient {
+	var options PresignOptions
+	for _, fn := range optFns {
+		fn(&options)
+	}
+	if len(options.ClientOptions) != 0 {
+		c = New(c.options, options.ClientOptions...)
+	}
+
+	if options.Presigner == nil {
+		options.Presigner = newDefaultV4Signer(c.options)
+	}
+
+	return &PresignClient{
+		client:  c,
+		options: options,
+	}
+}
+
+func withNopHTTPClientAPIOption(o *Options) {
+	o.HTTPClient = smithyhttp.NopClient{}
+}
+
+type presignContextPolyfillMiddleware struct {
+}
+
+func (*presignContextPolyfillMiddleware) ID() string {
+	return "presignContextPolyfill"
+}
+
+func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	schemeID := rscheme.Scheme.SchemeID()
+
+	if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" {
+		if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok {
+			ctx = awsmiddleware.SetSigningName(ctx, sn)
+		}
+		if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok {
+			ctx = awsmiddleware.SetSigningRegion(ctx, sr)
+		}
+	} else if schemeID == "aws.auth#sigv4a" {
+		if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok {
+			ctx = awsmiddleware.SetSigningName(ctx, sn)
+		}
+		if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok {
+			ctx = awsmiddleware.SetSigningRegion(ctx, sr[0])
+		}
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
+
+type presignConverter PresignOptions
+
+func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) {
+	if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok {
+		stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID())
+	}
+	if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok {
+		stack.Finalize.Remove((*retry.Attempt)(nil).ID())
+	}
+	if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok {
+		stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID())
+	}
+	stack.Deserialize.Clear()
+	stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
+	stack.Build.Remove("UserAgent")
+	if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil {
+		return err
+	}
+
+	pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{
+		CredentialsProvider: options.Credentials,
+		Presigner:           c.Presigner,
+		LogSigning:          options.ClientLogMode.IsSigning(),
+	})
+	if _, err := stack.Finalize.Swap("Signing", pmw); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil {
+		return err
+	}
+	// convert request to a GET request
+	err = query.AddAsGetRequestMiddleware(stack)
+	if err != nil {
+		return err
+	}
+	err = presignedurlcust.AddAsIsPresigningMiddleware(stack)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+	return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+		LogRequest:          o.ClientLogMode.IsRequest(),
+		LogRequestWithBody:  o.ClientLogMode.IsRequestWithBody(),
+		LogResponse:         o.ClientLogMode.IsResponse(),
+		LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+	}, middleware.After)
+}
+
+type disableHTTPSMiddleware struct {
+	DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+	return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+		req.URL.Scheme = "http"
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+	return stack.Finalize.Insert(&disableHTTPSMiddleware{
+		DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+	}, "ResolveEndpointV2", middleware.After)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,520 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials that you can use to access
+// Amazon Web Services resources. These temporary credentials consist of an access
+// key ID, a secret access key, and a security token. Typically, you use AssumeRole
+// within your account or for cross-account access. For a comparison of AssumeRole
+// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the
+// IAM User Guide.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any Amazon Web Services service with the following exception: You
+// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken
+// API operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// When you create a role, you create two policies: a role trust policy that
+// specifies who can assume the role, and a permissions policy that specifies what
+// can be done with the role. You specify the trusted principal that is allowed to
+// assume the role in the role trust policy.
+//
+// To assume a role from a different account, your Amazon Web Services account
+// must be trusted by the role. The trust relationship is defined in the role's
+// trust policy when the role is created. That trust policy states which accounts
+// are allowed to delegate that access to users in the account.
+//
+// A user who wants to access a role in a different account must also have
+// permissions that are delegated from the account administrator. The administrator
+// must attach a policy that allows the user to call AssumeRole for the ARN of the
+// role in the other account.
+//
+// To allow a user to assume a role in the same account, you can do either of the
+// following:
+//
+//   - Attach a policy to the user that allows the user to call AssumeRole (as long
+//     as the role's trust policy trusts the account).
+//
+//   - Add the user as a principal directly in the role's trust policy.
+//
+// You can do either because the role’s trust policy acts as an IAM resource-based
+// policy. When a resource-based policy grants access to a principal in the same
+// account, no additional identity-based policy is required. For more information
+// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide.
+//
+// # Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These tags are
+// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM
+// User Guide.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # Using MFA with AssumeRole
+//
+// (Optional) You can include multi-factor authentication (MFA) information when
+// you call AssumeRole . This is useful for cross-account scenarios to ensure that
+// the user that assumes the role has been authenticated with an Amazon Web
+// Services MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication. If the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication might
+// look like the following example.
+//
+//	"Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that the
+// MFA device produces.
+//
+// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) {
+	if params == nil {
+		params = &AssumeRoleInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*AssumeRoleOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type AssumeRoleInput struct {
+
+	// The Amazon Resource Name (ARN) of the role to assume.
+	//
+	// This member is required.
+	RoleArn *string
+
+	// An identifier for the assumed role session.
+	//
+	// Use the role session name to uniquely identify a session when the same role is
+	// assumed by different principals or for different reasons. In cross-account
+	// scenarios, the role session name is visible to, and can be logged by the account
+	// that owns the role. The role session name is also used in the ARN of the assumed
+	// role principal. This means that subsequent cross-account API requests that use
+	// the temporary security credentials will expose the role session name to the
+	// external account in their CloudTrail logs.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@-
+	//
+	// This member is required.
+	RoleSessionName *string
+
+	// The duration, in seconds, of the role session. The value specified can range
+	// from 900 seconds (15 minutes) up to the maximum session duration set for the
+	// role. The maximum session duration setting can have a value from 1 hour to 12
+	// hours. If you specify a value higher than this setting or the administrator
+	// setting (whichever is lower), the operation fails. For example, if you specify a
+	// session duration of 12 hours, but your administrator set the maximum session
+	// duration to 6 hours, your operation fails.
+	//
+	// Role chaining limits your Amazon Web Services CLI or Amazon Web Services API
+	// role session to a maximum of one hour. When you use the AssumeRole API
+	// operation to assume a role, you can specify the duration of your role session
+	// with the DurationSeconds parameter. You can specify a parameter value of up to
+	// 43200 seconds (12 hours), depending on the maximum session duration setting for
+	// your role. However, if you assume a role using role chaining and provide a
+	// DurationSeconds parameter value greater than one hour, the operation fails. To
+	// learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+	//
+	// By default, the value is set to 3600 seconds.
+	//
+	// The DurationSeconds parameter is separate from the duration of a console
+	// session that you might request using the returned credentials. The request to
+	// the federation endpoint for a console sign-in token takes a SessionDuration
+	// parameter that specifies the maximum length of the console session. For more
+	// information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+	//
+	// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+	// [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
+	DurationSeconds *int32
+
+	// A unique identifier that might be required when you assume a role in another
+	// account. If the administrator of the account to which the role belongs provided
+	// you with an external ID, then provide that value in the ExternalId parameter.
+	// This value can be any string, such as a passphrase or account number. A
+	// cross-account role is usually set up to trust everyone in an account. Therefore,
+	// the administrator of the trusting account might send an external ID to the
+	// administrator of the trusted account. That way, only someone with the ID can
+	// assume the role, rather than everyone in the account. For more information about
+	// the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@:/-
+	//
+	// [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html
+	ExternalId *string
+
+	// An IAM policy in JSON format that you want to use as an inline session policy.
+	//
+	// This parameter is optional. Passing policies to this operation returns new
+	// temporary credentials. The resulting session's permissions are the intersection
+	// of the role's identity-based policy and the session policies. You can use the
+	// role's temporary credentials in subsequent Amazon Web Services API calls to
+	// access resources in the account that owns the role. You cannot use session
+	// policies to grant more permissions than those allowed by the identity-based
+	// policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+	// User Guide.
+	//
+	// The plaintext that you use for both inline and managed session policies can't
+	// exceed 2,048 characters. The JSON policy characters can be any ASCII character
+	// from the space character to the end of the valid character list (\u0020 through
+	// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+	// return (\u000D) characters.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	Policy *string
+
+	// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+	// use as managed session policies. The policies must exist in the same account as
+	// the role.
+	//
+	// This parameter is optional. You can provide up to 10 managed policy ARNs.
+	// However, the plaintext that you use for both inline and managed session policies
+	// can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+	// Amazon Web Services General Reference.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// Passing policies to this operation returns new temporary credentials. The
+	// resulting session's permissions are the intersection of the role's
+	// identity-based policy and the session policies. You can use the role's temporary
+	// credentials in subsequent Amazon Web Services API calls to access resources in
+	// the account that owns the role. You cannot use session policies to grant more
+	// permissions than those allowed by the identity-based policy of the role that is
+	// being assumed. For more information, see [Session Policies]in the IAM User Guide.
+	//
+	// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+	PolicyArns []types.PolicyDescriptorType
+
+	// A list of previously acquired trusted context assertions in the format of a
+	// JSON array. The trusted context assertion is signed and encrypted by Amazon Web
+	// Services STS.
+	//
+	// The following is an example of a ProvidedContext value that includes a single
+	// trusted context assertion and the ARN of the context provider from which the
+	// trusted context assertion was generated.
+	//
+	//     [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
+	ProvidedContexts []types.ProvidedContext
+
+	// The identification number of the MFA device that is associated with the user
+	// who is making the AssumeRole call. Specify this value if the trust policy of
+	// the role being assumed includes a condition that requires MFA authentication.
+	// The value is either the serial number for a hardware device (such as
+	// GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as
+	// arn:aws:iam::123456789012:mfa/user ).
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@-
+	SerialNumber *string
+
+	// The source identity specified by the principal that is calling the AssumeRole
+	// operation.
+	//
+	// You can require users to specify a source identity when they assume a role. You
+	// do this by using the sts:SourceIdentity condition key in a role trust policy.
+	// You can use source identity information in CloudTrail logs to determine who took
+	// actions with a role. You can use the aws:SourceIdentity condition key to
+	// further control access to Amazon Web Services resources based on the value of
+	// source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the
+	// IAM User Guide.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@-. You cannot use a
+	// value that begins with the text aws: . This prefix is reserved for Amazon Web
+	// Services internal use.
+	//
+	// [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+	SourceIdentity *string
+
+	// A list of session tags that you want to pass. Each session tag consists of a
+	// key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions]
+	// in the IAM User Guide.
+	//
+	// This parameter is optional. You can pass up to 50 session tags. The plaintext
+	// session tag keys can’t exceed 128 characters, and the values can’t exceed 256
+	// characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// You can pass a session tag with the same key as a tag that is already attached
+	// to the role. When you do, session tags override a role tag with the same key.
+	//
+	// Tag key–value pairs are not case sensitive, but case is preserved. This means
+	// that you cannot have separate Department and department tag keys. Assume that
+	// the role has the Department = Marketing tag and you pass the department =
+	// engineering session tag. Department and department are not saved as separate
+	// tags, and the session tag passed in the request takes precedence over the role
+	// tag.
+	//
+	// Additionally, if you used temporary credentials to perform this operation, the
+	// new session inherits any transitive session tags from the calling session. If
+	// you pass a session tag with the same key as an inherited tag, the operation
+	// fails. To view the inherited tags for a session, see the CloudTrail logs. For
+	// more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide.
+	//
+	// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+	// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+	// [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs
+	Tags []types.Tag
+
+	// The value provided by the MFA device, if the trust policy of the role being
+	// assumed requires MFA. (In other words, if the policy includes a condition that
+	// tests for MFA). If the role being assumed requires MFA and if the TokenCode
+	// value is missing or expired, the AssumeRole call returns an "access denied"
+	// error.
+	//
+	// The format for this parameter, as described by its regex pattern, is a sequence
+	// of six numeric digits.
+	TokenCode *string
+
+	// A list of keys for session tags that you want to set as transitive. If you set
+	// a tag key as transitive, the corresponding key and value passes to subsequent
+	// sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+	//
+	// This parameter is optional. When you set session tags as transitive, the
+	// session policy and session tags packed binary limit is not affected.
+	//
+	// If you choose not to specify a transitive tag key, then no tags are passed from
+	// this session to any subsequent sessions.
+	//
+	// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+	TransitiveTagKeys []string
+
+	noSmithyDocumentSerde
+}
+
+// Contains the response to a successful AssumeRole request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type AssumeRoleOutput struct {
+
+	// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+	// that you can use to refer to the resulting temporary security credentials. For
+	// example, you can reference these credentials as a principal in a resource-based
+	// policy by using the ARN or assumed role ID. The ARN and ID include the
+	// RoleSessionName that you specified when you called AssumeRole .
+	AssumedRoleUser *types.AssumedRoleUser
+
+	// The temporary security credentials, which include an access key ID, a secret
+	// access key, and a security (or session) token.
+	//
+	// The size of the security token that STS API operations return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size.
+	Credentials *types.Credentials
+
+	// A percentage value that indicates the packed size of the session policies and
+	// session tags combined passed in the request. The request fails if the packed
+	// size is greater than 100 percent, which means the policies and tags exceeded the
+	// allowed space.
+	PackedPolicySize *int32
+
+	// The source identity specified by the principal that is calling the AssumeRole
+	// operation.
+	//
+	// You can require users to specify a source identity when they assume a role. You
+	// do this by using the sts:SourceIdentity condition key in a role trust policy.
+	// You can use source identity information in CloudTrail logs to determine who took
+	// actions with a role. You can use the aws:SourceIdentity condition key to
+	// further control access to Amazon Web Services resources based on the value of
+	// source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the
+	// IAM User Guide.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@-
+	//
+	// [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+	SourceIdentity *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRole"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addComputePayloadSHA256(stack); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpAssumeRoleValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "AssumeRole",
+	}
+}
+
+// PresignAssumeRole is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+	if params == nil {
+		params = &AssumeRoleInput{}
+	}
+	options := c.options.copy()
+	for _, fn := range optFns {
+		fn(&options)
+	}
+	clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+	result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns,
+		c.client.addOperationAssumeRoleMiddlewares,
+		presignConverter(options).convertToPresignMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*v4.PresignedHTTPRequest)
+	return out, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,436 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials for users who have been
+// authenticated via a SAML authentication response. This operation provides a
+// mechanism for tying an enterprise identity store or directory to role-based
+// Amazon Web Services access without user-specific credentials or configuration.
+// For a comparison of AssumeRoleWithSAML with the other API operations that
+// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of an
+// access key ID, a secret access key, and a security token. Applications can use
+// these temporary security credentials to sign calls to Amazon Web Services
+// services.
+//
+// # Session Duration
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML
+// authentication response's SessionNotOnOrAfter value, whichever is shorter. You
+// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the
+// maximum session duration setting for the role. This setting can have a value
+// from 1 hour to 12 hours. To learn how to view the maximum value for your role,
+// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console URL.
+// For more information, see [Using IAM Roles]in the IAM User Guide.
+//
+// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one
+// hour. When you use the AssumeRole API operation to assume a role, you can
+// specify the duration of your role session with the DurationSeconds parameter.
+// You can specify a parameter value of up to 43200 seconds (12 hours), depending
+// on the maximum session duration setting for your role. However, if you assume a
+// role using role chaining and provide a DurationSeconds parameter value greater
+// than one hour, the operation fails.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used to
+// make API calls to any Amazon Web Services service with the following exception:
+// you cannot call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services
+// security credentials. The identity of the caller is validated by using keys in
+// the metadata document that is uploaded for the SAML provider entity for your
+// identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The
+// entry includes the value in the NameID element of the SAML assertion. We
+// recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the
+// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ).
+//
+// # Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your SAML
+// assertion as session tags. Each session tag consists of a key name and an
+// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+// Guide.
+//
+// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed
+// 128 characters and the values can’t exceed 256 characters. For these and
+// additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+//
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has a
+// separate limit. Your request can fail for this limit even if your plaintext
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the upper
+// size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to the
+// role. When you do, session tags override the role's tags with the same key.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # SAML Configuration
+//
+// Before your application can call AssumeRoleWithSAML , you must configure your
+// SAML identity provider (IdP) to issue the claims required by Amazon Web
+// Services. Additionally, you must use Identity and Access Management (IAM) to
+// create a SAML provider entity in your Amazon Web Services account that
+// represents your identity provider. You must also create an IAM role that
+// specifies this SAML provider in its trust policy.
+//
+// For more information, see the following resources:
+//
+// [About SAML 2.0-based Federation]
+//   - in the IAM User Guide.
+//
+// [Creating SAML Identity Providers]
+//   - in the IAM User Guide.
+//
+// [Configuring a Relying Party and Claims]
+//   - in the IAM User Guide.
+//
+// [Creating a Role for SAML 2.0 Federation]
+//   - in the IAM User Guide.
+//
+// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html
+// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html
+// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining
+// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) {
+	if params == nil {
+		params = &AssumeRoleWithSAMLInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*AssumeRoleWithSAMLOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type AssumeRoleWithSAMLInput struct {
+
+	// The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the
+	// IdP.
+	//
+	// This member is required.
+	PrincipalArn *string
+
+	// The Amazon Resource Name (ARN) of the role that the caller is assuming.
+	//
+	// This member is required.
+	RoleArn *string
+
+	// The base64 encoded SAML authentication response provided by the IdP.
+	//
+	// For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide.
+	//
+	// [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html
+	//
+	// This member is required.
+	SAMLAssertion *string
+
+	// The duration, in seconds, of the role session. Your role session lasts for the
+	// duration that you specify for the DurationSeconds parameter, or until the time
+	// specified in the SAML authentication response's SessionNotOnOrAfter value,
+	// whichever is shorter. You can provide a DurationSeconds value from 900 seconds
+	// (15 minutes) up to the maximum session duration setting for the role. This
+	// setting can have a value from 1 hour to 12 hours. If you specify a value higher
+	// than this setting, the operation fails. For example, if you specify a session
+	// duration of 12 hours, but your administrator set the maximum session duration to
+	// 6 hours, your operation fails. To learn how to view the maximum value for your
+	// role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+	//
+	// By default, the value is set to 3600 seconds.
+	//
+	// The DurationSeconds parameter is separate from the duration of a console
+	// session that you might request using the returned credentials. The request to
+	// the federation endpoint for a console sign-in token takes a SessionDuration
+	// parameter that specifies the maximum length of the console session. For more
+	// information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+	//
+	// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+	// [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
+	DurationSeconds *int32
+
+	// An IAM policy in JSON format that you want to use as an inline session policy.
+	//
+	// This parameter is optional. Passing policies to this operation returns new
+	// temporary credentials. The resulting session's permissions are the intersection
+	// of the role's identity-based policy and the session policies. You can use the
+	// role's temporary credentials in subsequent Amazon Web Services API calls to
+	// access resources in the account that owns the role. You cannot use session
+	// policies to grant more permissions than those allowed by the identity-based
+	// policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+	// User Guide.
+	//
+	// The plaintext that you use for both inline and managed session policies can't
+	// exceed 2,048 characters. The JSON policy characters can be any ASCII character
+	// from the space character to the end of the valid character list (\u0020 through
+	// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+	// return (\u000D) characters.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	Policy *string
+
+	// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+	// use as managed session policies. The policies must exist in the same account as
+	// the role.
+	//
+	// This parameter is optional. You can provide up to 10 managed policy ARNs.
+	// However, the plaintext that you use for both inline and managed session policies
+	// can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+	// Amazon Web Services General Reference.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// Passing policies to this operation returns new temporary credentials. The
+	// resulting session's permissions are the intersection of the role's
+	// identity-based policy and the session policies. You can use the role's temporary
+	// credentials in subsequent Amazon Web Services API calls to access resources in
+	// the account that owns the role. You cannot use session policies to grant more
+	// permissions than those allowed by the identity-based policy of the role that is
+	// being assumed. For more information, see [Session Policies]in the IAM User Guide.
+	//
+	// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+	PolicyArns []types.PolicyDescriptorType
+
+	noSmithyDocumentSerde
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type AssumeRoleWithSAMLOutput struct {
+
+	// The identifiers for the temporary security credentials that the operation
+	// returns.
+	AssumedRoleUser *types.AssumedRoleUser
+
+	//  The value of the Recipient attribute of the SubjectConfirmationData element of
+	// the SAML assertion.
+	Audience *string
+
+	// The temporary security credentials, which include an access key ID, a secret
+	// access key, and a security (or session) token.
+	//
+	// The size of the security token that STS API operations return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size.
+	Credentials *types.Credentials
+
+	// The value of the Issuer element of the SAML assertion.
+	Issuer *string
+
+	// A hash value based on the concatenation of the following:
+	//
+	//   - The Issuer response value.
+	//
+	//   - The Amazon Web Services account ID.
+	//
+	//   - The friendly name (the last part of the ARN) of the SAML provider in IAM.
+	//
+	// The combination of NameQualifier and Subject can be used to uniquely identify a
+	// user.
+	//
+	// The following pseudocode shows how the hash value is calculated:
+	//
+	//     BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) )
+	NameQualifier *string
+
+	// A percentage value that indicates the packed size of the session policies and
+	// session tags combined passed in the request. The request fails if the packed
+	// size is greater than 100 percent, which means the policies and tags exceeded the
+	// allowed space.
+	PackedPolicySize *int32
+
+	// The value in the SourceIdentity attribute in the SAML assertion.
+	//
+	// You can require users to set a source identity value when they assume a role.
+	// You do this by using the sts:SourceIdentity condition key in a role trust
+	// policy. That way, actions that are taken with the role are associated with that
+	// user. After the source identity is set, the value cannot be changed. It is
+	// present in the request for all actions that are taken by the role and persists
+	// across [chained role]sessions. You can configure your SAML identity provider to use an
+	// attribute associated with your users, like user name or email, as the source
+	// identity when calling AssumeRoleWithSAML . You do this by adding an attribute to
+	// the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in
+	// the IAM User Guide.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@-
+	//
+	// [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining
+	// [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+	SourceIdentity *string
+
+	// The value of the NameID element in the Subject element of the SAML assertion.
+	Subject *string
+
+	//  The format of the name ID, as defined by the Format attribute in the NameID
+	// element of the SAML assertion. Typical examples of the format are transient or
+	// persistent .
+	//
+	// If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format ,
+	// that prefix is removed. For example,
+	// urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient .
+	// If the format includes any other prefix, the format is returned with no
+	// modifications.
+	SubjectType *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithSAML"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "AssumeRoleWithSAML",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,447 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials for users who have been
+// authenticated in a mobile or web application with a web identity provider.
+// Example providers include the OAuth 2.0 providers Login with Amazon and
+// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities].
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can use
+// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also
+// supply the user with a consistent identity throughout the lifetime of an
+// application.
+//
+// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web
+// Services security credentials. Therefore, you can distribute an application (for
+// example, on mobile devices) that requests temporary security credentials without
+// including long-term Amazon Web Services credentials in the application. You also
+// don't need to deploy server-based proxy services that use long-term Amazon Web
+// Services credentials. Instead, the identity of the caller is validated by using
+// a token from the web identity provider. For a comparison of
+// AssumeRoleWithWebIdentity with the other API operations that produce temporary
+// credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to Amazon Web Services service API
+// operations.
+//
+// # Session Duration
+//
+// By default, the temporary security credentials created by
+// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional
+// DurationSeconds parameter to specify the duration of your session. You can
+// provide a value from 900 seconds (15 minutes) up to the maximum session duration
+// setting for the role. This setting can have a value from 1 hour to 12 hours. To
+// learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+// The maximum session duration limit applies when you use the AssumeRole* API
+// operations or the assume-role* CLI commands. However the limit does not apply
+// when you use those operations to create a console URL. For more information, see
+// [Using IAM Roles]in the IAM User Guide.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can be
+// used to make API calls to any Amazon Web Services service with the following
+// exception: you cannot call the STS GetFederationToken or GetSessionToken API
+// operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// # Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your web identity
+// token as session tags. Each session tag consists of a key name and an associated
+// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide.
+//
+// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed
+// 128 characters and the values can’t exceed 256 characters. For these and
+// additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+//
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has a
+// separate limit. Your request can fail for this limit even if your plaintext
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the upper
+// size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to the
+// role. When you do, the session tag overrides the role tag with the same key.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # Identities
+//
+// Before your application can call AssumeRoleWithWebIdentity , you must have an
+// identity token from a supported identity provider and create a role that the
+// application can assume. The role that your application assumes must trust the
+// identity provider that is associated with the identity token. In other words,
+// the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail
+// logs. The entry includes the [Subject]of the provided web identity token. We recommend
+// that you avoid using any personally identifiable information (PII) in this
+// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification].
+//
+// For more information about how to use web identity federation and the
+// AssumeRoleWithWebIdentity API, see the following resources:
+//
+// [Using Web Identity Federation API Operations for Mobile Apps]
+//   - and [Federation Through a Web-based Identity Provider].
+//
+// [Web Identity Federation Playground]
+//   - . Walk through the process of authenticating through Login with Amazon,
+//     Facebook, or Google, getting temporary security credentials, and then using
+//     those credentials to make a request to Amazon Web Services.
+//
+// [Amazon Web Services SDK for iOS Developer Guide]
+//   - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the
+//     identity providers. The toolkits then show how to use the information from these
+//     providers to get and use temporary security credentials.
+//
+// [Web Identity Federation with Mobile Applications]
+//   - . This article discusses web identity federation and shows an example of
+//     how to use web identity federation to get access to content in Amazon S3.
+//
+// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/
+// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+// [Web Identity Federation Playground]: https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/
+// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/
+// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html
+// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity
+// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+// [Web Identity Federation with Mobile Applications]: http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications
+// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html
+// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
+func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) {
+	if params == nil {
+		params = &AssumeRoleWithWebIdentityInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*AssumeRoleWithWebIdentityOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+
+	// The Amazon Resource Name (ARN) of the role that the caller is assuming.
+	//
+	// This member is required.
+	RoleArn *string
+
+	// An identifier for the assumed role session. Typically, you pass the name or
+	// identifier that is associated with the user who is using your application. That
+	// way, the temporary security credentials that your application will use are
+	// associated with that user. This session name is included as part of the ARN and
+	// assumed role ID in the AssumedRoleUser response element.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@-
+	//
+	// This member is required.
+	RoleSessionName *string
+
+	// The OAuth 2.0 access token or OpenID Connect ID token that is provided by the
+	// identity provider. Your application must get this token by authenticating the
+	// user who is using your application with a web identity provider before the
+	// application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA
+	// algorithms (RS256) are supported.
+	//
+	// This member is required.
+	WebIdentityToken *string
+
+	// The duration, in seconds, of the role session. The value can range from 900
+	// seconds (15 minutes) up to the maximum session duration setting for the role.
+	// This setting can have a value from 1 hour to 12 hours. If you specify a value
+	// higher than this setting, the operation fails. For example, if you specify a
+	// session duration of 12 hours, but your administrator set the maximum session
+	// duration to 6 hours, your operation fails. To learn how to view the maximum
+	// value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+	//
+	// By default, the value is set to 3600 seconds.
+	//
+	// The DurationSeconds parameter is separate from the duration of a console
+	// session that you might request using the returned credentials. The request to
+	// the federation endpoint for a console sign-in token takes a SessionDuration
+	// parameter that specifies the maximum length of the console session. For more
+	// information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+	//
+	// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+	// [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
+	DurationSeconds *int32
+
+	// An IAM policy in JSON format that you want to use as an inline session policy.
+	//
+	// This parameter is optional. Passing policies to this operation returns new
+	// temporary credentials. The resulting session's permissions are the intersection
+	// of the role's identity-based policy and the session policies. You can use the
+	// role's temporary credentials in subsequent Amazon Web Services API calls to
+	// access resources in the account that owns the role. You cannot use session
+	// policies to grant more permissions than those allowed by the identity-based
+	// policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+	// User Guide.
+	//
+	// The plaintext that you use for both inline and managed session policies can't
+	// exceed 2,048 characters. The JSON policy characters can be any ASCII character
+	// from the space character to the end of the valid character list (\u0020 through
+	// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+	// return (\u000D) characters.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	Policy *string
+
+	// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+	// use as managed session policies. The policies must exist in the same account as
+	// the role.
+	//
+	// This parameter is optional. You can provide up to 10 managed policy ARNs.
+	// However, the plaintext that you use for both inline and managed session policies
+	// can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+	// Amazon Web Services General Reference.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// Passing policies to this operation returns new temporary credentials. The
+	// resulting session's permissions are the intersection of the role's
+	// identity-based policy and the session policies. You can use the role's temporary
+	// credentials in subsequent Amazon Web Services API calls to access resources in
+	// the account that owns the role. You cannot use session policies to grant more
+	// permissions than those allowed by the identity-based policy of the role that is
+	// being assumed. For more information, see [Session Policies]in the IAM User Guide.
+	//
+	// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+	PolicyArns []types.PolicyDescriptorType
+
+	// The fully qualified host component of the domain name of the OAuth 2.0 identity
+	// provider. Do not specify this value for an OpenID Connect identity provider.
+	//
+	// Currently www.amazon.com and graph.facebook.com are the only supported identity
+	// providers for OAuth 2.0 access tokens. Do not include URL schemes and port
+	// numbers.
+	//
+	// Do not specify this value for OpenID Connect ID tokens.
+	ProviderId *string
+
+	noSmithyDocumentSerde
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type AssumeRoleWithWebIdentityOutput struct {
+
+	// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+	// that you can use to refer to the resulting temporary security credentials. For
+	// example, you can reference these credentials as a principal in a resource-based
+	// policy by using the ARN or assumed role ID. The ARN and ID include the
+	// RoleSessionName that you specified when you called AssumeRole .
+	AssumedRoleUser *types.AssumedRoleUser
+
+	// The intended audience (also known as client ID) of the web identity token. This
+	// is traditionally the client identifier issued to the application that requested
+	// the web identity token.
+	Audience *string
+
+	// The temporary security credentials, which include an access key ID, a secret
+	// access key, and a security token.
+	//
+	// The size of the security token that STS API operations return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size.
+	Credentials *types.Credentials
+
+	// A percentage value that indicates the packed size of the session policies and
+	// session tags combined passed in the request. The request fails if the packed
+	// size is greater than 100 percent, which means the policies and tags exceeded the
+	// allowed space.
+	PackedPolicySize *int32
+
+	//  The issuing authority of the web identity token presented. For OpenID Connect
+	// ID tokens, this contains the value of the iss field. For OAuth 2.0 access
+	// tokens, this contains the value of the ProviderId parameter that was passed in
+	// the AssumeRoleWithWebIdentity request.
+	Provider *string
+
+	// The value of the source identity that is returned in the JSON web token (JWT)
+	// from the identity provider.
+	//
+	// You can require users to set a source identity value when they assume a role.
+	// You do this by using the sts:SourceIdentity condition key in a role trust
+	// policy. That way, actions that are taken with the role are associated with that
+	// user. After the source identity is set, the value cannot be changed. It is
+	// present in the request for all actions that are taken by the role and persists
+	// across [chained role]sessions. You can configure your identity provider to use an attribute
+	// associated with your users, like user name or email, as the source identity when
+	// calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON
+	// web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon
+	// Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles]
+	// in the IAM User Guide.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@-
+	//
+	// [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining
+	// [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+	// [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
+	SourceIdentity *string
+
+	// The unique user identifier that is returned by the identity provider. This
+	// identifier is associated with the WebIdentityToken that was submitted with the
+	// AssumeRoleWithWebIdentity call. The identifier is typically unique to the user
+	// and the application that acquired the WebIdentityToken (pairwise identifier).
+	// For OpenID Connect ID tokens, this field contains the value returned by the
+	// identity provider as the token's sub (Subject) claim.
+	SubjectFromWebIdentityToken *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithWebIdentity"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "AssumeRoleWithWebIdentity",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,177 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Decodes additional information about the authorization status of a request from
+// an encoded message returned in response to an Amazon Web Services request.
+//
+// For example, if a user is not authorized to perform an operation that he or she
+// has requested, the request returns a Client.UnauthorizedOperation response (an
+// HTTP 403 response). Some Amazon Web Services operations additionally return an
+// encoded message that can provide details about this authorization failure.
+//
+// Only certain Amazon Web Services operations return an encoded authorization
+// message. The documentation for an individual operation indicates whether that
+// operation returns an encoded message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// contain privileged information that the user who requested the operation should
+// not see. To decode an authorization status message, a user must be granted
+// permissions through an IAM [policy]to request the DecodeAuthorizationMessage (
+// sts:DecodeAuthorizationMessage ) action.
+//
+// The decoded message includes the following type of information:
+//
+//   - Whether the request was denied due to an explicit deny or due to the
+//     absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User
+//     Guide.
+//
+//   - The principal who made the request.
+//
+//   - The requested action.
+//
+//   - The requested resource.
+//
+//   - The values of condition keys in the context of the user's request.
+//
+// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow
+// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
+func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) {
+	if params == nil {
+		params = &DecodeAuthorizationMessageInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*DecodeAuthorizationMessageOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type DecodeAuthorizationMessageInput struct {
+
+	// The encoded message that was returned with the response.
+	//
+	// This member is required.
+	EncodedMessage *string
+
+	noSmithyDocumentSerde
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an Amazon
+// Web Services request.
+type DecodeAuthorizationMessageOutput struct {
+
+	// The API returns a response with the decoded message.
+	DecodedMessage *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "DecodeAuthorizationMessage"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addComputePayloadSHA256(stack); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "DecodeAuthorizationMessage",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,168 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the account identifier for the specified access key ID.
+//
+// Access keys consist of two parts: an access key ID (for example,
+// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example,
+// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access
+// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide.
+//
+// When you pass an access key ID to this operation, it returns the ID of the
+// Amazon Web Services account to which the keys belong. Access key IDs beginning
+// with AKIA are long-term credentials for an IAM user or the Amazon Web Services
+// account root user. Access key IDs beginning with ASIA are temporary credentials
+// that are created using STS operations. If the account in the response belongs to
+// you, you can sign in as the root user and review your root user access keys.
+// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who
+// requested the temporary credentials for an ASIA access key, view the STS events
+// in your [CloudTrail logs]in the IAM User Guide.
+//
+// This operation does not indicate the state of the access key. The key might be
+// active, inactive, or deleted. Active keys might not have permissions to perform
+// an operation. Providing a deleted access key might return an error that the key
+// doesn't exist.
+//
+// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html
+// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html
+// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html
+func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) {
+	if params == nil {
+		params = &GetAccessKeyInfoInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetAccessKeyInfoOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type GetAccessKeyInfoInput struct {
+
+	// The identifier of an access key.
+	//
+	// This parameter allows (through its regex pattern) a string of characters that
+	// can consist of any upper- or lowercase letter or digit.
+	//
+	// This member is required.
+	AccessKeyId *string
+
+	noSmithyDocumentSerde
+}
+
+type GetAccessKeyInfoOutput struct {
+
+	// The number used to identify the Amazon Web Services account.
+	Account *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "GetAccessKeyInfo"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addComputePayloadSHA256(stack); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "GetAccessKeyInfo",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,180 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns details about the IAM user or role whose credentials are used to call
+// the operation.
+//
+// No permissions are required to perform this operation. If an administrator
+// attaches a policy to your identity that explicitly denies access to the
+// sts:GetCallerIdentity action, you can still perform this operation. Permissions
+// are not required because the same information is returned when access is denied.
+// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide.
+//
+// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa
+func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) {
+	if params == nil {
+		params = &GetCallerIdentityInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetCallerIdentityOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type GetCallerIdentityInput struct {
+	noSmithyDocumentSerde
+}
+
+// Contains the response to a successful GetCallerIdentity request, including information about the
+// entity making the request.
+type GetCallerIdentityOutput struct {
+
+	// The Amazon Web Services account ID number of the account that owns or contains
+	// the calling entity.
+	Account *string
+
+	// The Amazon Web Services ARN associated with the calling entity.
+	Arn *string
+
+	// The unique identifier of the calling entity. The exact value depends on the
+	// type of entity that is making the call. The values returned are those listed in
+	// the aws:userid column in the [Principal table]found on the Policy Variables reference page in
+	// the IAM User Guide.
+	//
+	// [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable
+	UserId *string
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "GetCallerIdentity"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addComputePayloadSHA256(stack); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "GetCallerIdentity",
+	}
+}
+
+// PresignGetCallerIdentity is used to generate a presigned HTTP Request which
+// contains presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+	if params == nil {
+		params = &GetCallerIdentityInput{}
+	}
+	options := c.options.copy()
+	for _, fn := range optFns {
+		fn(&options)
+	}
+	clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+	result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns,
+		c.client.addOperationGetCallerIdentityMiddlewares,
+		presignConverter(options).convertToPresignMiddleware,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*v4.PresignedHTTPRequest)
+	return out, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,381 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials (consisting of an access key
+// ID, a secret access key, and a security token) for a user. A typical use is in a
+// proxy application that gets temporary security credentials on behalf of
+// distributed applications inside a corporate network.
+//
+// You must call the GetFederationToken operation using the long-term security
+// credentials of an IAM user. As a result, this call is appropriate in contexts
+// where those credentials can be safeguarded, usually in a server-based
+// application. For a comparison of GetFederationToken with the other API
+// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide.
+//
+// Although it is possible to call GetFederationToken using the security
+// credentials of an Amazon Web Services account root user rather than an IAM user
+// that you create for the purpose of a proxy application, we do not recommend it.
+// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate users
+// using a web identity provider like Login with Amazon, Facebook, Google, or an
+// OpenID Connect-compatible identity provider. In this case, we recommend that you
+// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User
+// Guide.
+//
+// # Session duration
+//
+// The temporary credentials are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
+// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by
+// using the root user credentials have a maximum duration of 3,600 seconds (1
+// hour).
+//
+// # Permissions
+//
+// You can use the temporary credentials created by GetFederationToken in any
+// Amazon Web Services service with the following exceptions:
+//
+//   - You cannot call any IAM operations using the CLI or the Amazon Web Services
+//     API. This limitation does not apply to console sessions.
+//
+//   - You cannot call any STS operations except GetCallerIdentity .
+//
+// You can use temporary credentials for single sign-on (SSO) to the console.
+//
+// You must pass an inline or managed [session policy] to this operation. You can pass a single
+// JSON policy document to use as an inline session policy. You can also specify up
+// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+// policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters.
+//
+// Though the session policy parameters are optional, if you do not pass a policy,
+// then the resulting federated user session has no permissions. When you pass
+// session policies, the session permissions are the intersection of the IAM user
+// policies and the session policies that you pass. This gives you a way to further
+// restrict the permissions for a federated user. You cannot use session policies
+// to grant more permissions than those that are defined in the permissions policy
+// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For
+// information about using GetFederationToken to create temporary security
+// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker].
+//
+// You can use the credentials to access a resource that has a resource-based
+// policy. If that policy specifically references the federated user session in the
+// Principal element of the policy, the session has the permissions allowed by the
+// policy. These permissions are granted in addition to the permissions granted by
+// the session policies.
+//
+// # Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These are called
+// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+// Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate users
+// using a web identity provider like Login with Amazon, Facebook, Google, or an
+// OpenID Connect-compatible identity provider. In this case, we recommend that you
+// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User
+// Guide.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// Tag key–value pairs are not case sensitive, but case is preserved. This means
+// that you cannot have separate Department and department tag keys. Assume that
+// the user that you are federating has the Department = Marketing tag and you
+// pass the department = engineering session tag. Department and department are
+// not saved as separate tags, and the session tag passed in the request takes
+// precedence over the user tag.
+//
+// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity
+// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Amazon Cognito]: http://aws.amazon.com/cognito/
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) {
+	if params == nil {
+		params = &GetFederationTokenInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetFederationTokenOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type GetFederationTokenInput struct {
+
+	// The name of the federated user. The name is used as an identifier for the
+	// temporary security credentials (such as Bob ). For example, you can reference
+	// the federated user name in a resource-based policy, such as in an Amazon S3
+	// bucket policy.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@-
+	//
+	// This member is required.
+	Name *string
+
+	// The duration, in seconds, that the session should last. Acceptable durations
+	// for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
+	// (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
+	// using root user credentials are restricted to a maximum of 3,600 seconds (one
+	// hour). If the specified duration is longer than one hour, the session obtained
+	// by using root user credentials defaults to one hour.
+	DurationSeconds *int32
+
+	// An IAM policy in JSON format that you want to use as an inline session policy.
+	//
+	// You must pass an inline or managed [session policy] to this operation. You can pass a single
+	// JSON policy document to use as an inline session policy. You can also specify up
+	// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+	// policies.
+	//
+	// This parameter is optional. However, if you do not pass any session policies,
+	// then the resulting federated user session has no permissions.
+	//
+	// When you pass session policies, the session permissions are the intersection of
+	// the IAM user policies and the session policies that you pass. This gives you a
+	// way to further restrict the permissions for a federated user. You cannot use
+	// session policies to grant more permissions than those that are defined in the
+	// permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User
+	// Guide.
+	//
+	// The resulting credentials can be used to access a resource that has a
+	// resource-based policy. If that policy specifically references the federated user
+	// session in the Principal element of the policy, the session has the permissions
+	// allowed by the policy. These permissions are granted in addition to the
+	// permissions that are granted by the session policies.
+	//
+	// The plaintext that you use for both inline and managed session policies can't
+	// exceed 2,048 characters. The JSON policy characters can be any ASCII character
+	// from the space character to the end of the valid character list (\u0020 through
+	// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+	// return (\u000D) characters.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	Policy *string
+
+	// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+	// use as a managed session policy. The policies must exist in the same account as
+	// the IAM user that is requesting federated access.
+	//
+	// You must pass an inline or managed [session policy] to this operation. You can pass a single
+	// JSON policy document to use as an inline session policy. You can also specify up
+	// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+	// policies. The plaintext that you use for both inline and managed session
+	// policies can't exceed 2,048 characters. You can provide up to 10 managed policy
+	// ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General
+	// Reference.
+	//
+	// This parameter is optional. However, if you do not pass any session policies,
+	// then the resulting federated user session has no permissions.
+	//
+	// When you pass session policies, the session permissions are the intersection of
+	// the IAM user policies and the session policies that you pass. This gives you a
+	// way to further restrict the permissions for a federated user. You cannot use
+	// session policies to grant more permissions than those that are defined in the
+	// permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User
+	// Guide.
+	//
+	// The resulting credentials can be used to access a resource that has a
+	// resource-based policy. If that policy specifically references the federated user
+	// session in the Principal element of the policy, the session has the permissions
+	// allowed by the policy. These permissions are granted in addition to the
+	// permissions that are granted by the session policies.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+	// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+	PolicyArns []types.PolicyDescriptorType
+
+	// A list of session tags. Each session tag consists of a key name and an
+	// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+	// Guide.
+	//
+	// This parameter is optional. You can pass up to 50 session tags. The plaintext
+	// session tag keys can’t exceed 128 characters and the values can’t exceed 256
+	// characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+	//
+	// An Amazon Web Services conversion compresses the passed inline session policy,
+	// managed policy ARNs, and session tags into a packed binary format that has a
+	// separate limit. Your request can fail for this limit even if your plaintext
+	// meets the other requirements. The PackedPolicySize response element indicates
+	// by percentage how close the policies and tags for your request are to the upper
+	// size limit.
+	//
+	// You can pass a session tag with the same key as a tag that is already attached
+	// to the user you are federating. When you do, session tags override a user tag
+	// with the same key.
+	//
+	// Tag key–value pairs are not case sensitive, but case is preserved. This means
+	// that you cannot have separate Department and department tag keys. Assume that
+	// the role has the Department = Marketing tag and you pass the department =
+	// engineering session tag. Department and department are not saved as separate
+	// tags, and the session tag passed in the request takes precedence over the role
+	// tag.
+	//
+	// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+	// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+	Tags []types.Tag
+
+	noSmithyDocumentSerde
+}
+
+// Contains the response to a successful GetFederationToken request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type GetFederationTokenOutput struct {
+
+	// The temporary security credentials, which include an access key ID, a secret
+	// access key, and a security (or session) token.
+	//
+	// The size of the security token that STS API operations return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size.
+	Credentials *types.Credentials
+
+	// Identifiers for the federated user associated with the credentials (such as
+	// arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use
+	// the federated user's ARN in your resource-based policies, such as an Amazon S3
+	// bucket policy.
+	FederatedUser *types.FederatedUser
+
+	// A percentage value that indicates the packed size of the session policies and
+	// session tags combined passed in the request. The request fails if the packed
+	// size is greater than 100 percent, which means the policies and tags exceeded the
+	// allowed space.
+	PackedPolicySize *int32
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "GetFederationToken"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addComputePayloadSHA256(stack); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "GetFederationToken",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,227 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary credentials for an Amazon Web Services account or
+// IAM user. The credentials consist of an access key ID, a secret access key, and
+// a security token. Typically, you use GetSessionToken if you want to use MFA to
+// protect programmatic calls to specific Amazon Web Services API operations like
+// Amazon EC2 StopInstances .
+//
+// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is
+// associated with their MFA device. Using the temporary security credentials that
+// the call returns, IAM users can then make programmatic calls to API operations
+// that require MFA authentication. An incorrect MFA code causes the API to return
+// an access denied error. For a comparison of GetSessionToken with the other API
+// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide.
+//
+// No permissions are required for users to perform this operation. The purpose of
+// the sts:GetSessionToken operation is to authenticate the user using MFA. You
+// cannot use policies to control authentication operations. For more information,
+// see [Permissions for GetSessionToken]in the IAM User Guide.
+//
+// # Session Duration
+//
+// The GetSessionToken operation must be called by using the long-term Amazon Web
+// Services security credentials of an IAM user. Credentials that are created by
+// IAM users are valid for the duration that you specify. This duration can range
+// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours),
+// with a default of 43,200 seconds (12 hours). Credentials based on account
+// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1
+// hour), with a default of 1 hour.
+//
+// # Permissions
+//
+// The temporary security credentials created by GetSessionToken can be used to
+// make API calls to any Amazon Web Services service with the following exceptions:
+//
+//   - You cannot call any IAM API operations unless MFA authentication
+//     information is included in the request.
+//
+//   - You cannot call any STS API except AssumeRole or GetCallerIdentity .
+//
+// The credentials that GetSessionToken returns are based on permissions
+// associated with the IAM user whose credentials were used to call the operation.
+// The temporary credentials have the same permissions as the IAM user.
+//
+// Although it is possible to call GetSessionToken using the security credentials
+// of an Amazon Web Services account root user rather than an IAM user, we do not
+// recommend it. If GetSessionToken is called using root user credentials, the
+// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in
+// the IAM User Guide
+//
+// For more information about using GetSessionToken to create temporary
+// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide.
+//
+// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken
+// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) {
+	if params == nil {
+		params = &GetSessionTokenInput{}
+	}
+
+	result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares)
+	if err != nil {
+		return nil, err
+	}
+
+	out := result.(*GetSessionTokenOutput)
+	out.ResultMetadata = metadata
+	return out, nil
+}
+
+type GetSessionTokenInput struct {
+
+	// The duration, in seconds, that the credentials should remain valid. Acceptable
+	// durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
+	// seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for
+	// Amazon Web Services account owners are restricted to a maximum of 3,600 seconds
+	// (one hour). If the duration is longer than one hour, the session for Amazon Web
+	// Services account owners defaults to one hour.
+	DurationSeconds *int32
+
+	// The identification number of the MFA device that is associated with the IAM
+	// user who is making the GetSessionToken call. Specify this value if the IAM user
+	// has a policy that requires MFA authentication. The value is either the serial
+	// number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name
+	// (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You
+	// can find the device for an IAM user by going to the Amazon Web Services
+	// Management Console and viewing the user's security credentials.
+	//
+	// The regex used to validate this parameter is a string of characters consisting
+	// of upper- and lower-case alphanumeric characters with no spaces. You can also
+	// include underscores or any of the following characters: =,.@:/-
+	SerialNumber *string
+
+	// The value provided by the MFA device, if MFA is required. If any policy
+	// requires the IAM user to submit an MFA code, specify this value. If MFA
+	// authentication is required, the user must provide a code when requesting a set
+	// of temporary security credentials. A user who fails to provide the code receives
+	// an "access denied" response when requesting resources that require MFA
+	// authentication.
+	//
+	// The format for this parameter, as described by its regex pattern, is a sequence
+	// of six numeric digits.
+	TokenCode *string
+
+	noSmithyDocumentSerde
+}
+
+// Contains the response to a successful GetSessionToken request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type GetSessionTokenOutput struct {
+
+	// The temporary security credentials, which include an access key ID, a secret
+	// access key, and a security (or session) token.
+	//
+	// The size of the security token that STS API operations return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size.
+	Credentials *types.Credentials
+
+	// Metadata pertaining to the operation's result.
+	ResultMetadata middleware.Metadata
+
+	noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+	if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+		return err
+	}
+	err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After)
+	if err != nil {
+		return err
+	}
+	if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionToken"); err != nil {
+		return fmt.Errorf("add protocol finalizers: %v", err)
+	}
+
+	if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+		return err
+	}
+	if err = addSetLoggerMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addClientRequestID(stack); err != nil {
+		return err
+	}
+	if err = addComputeContentLength(stack); err != nil {
+		return err
+	}
+	if err = addResolveEndpointMiddleware(stack, options); err != nil {
+		return err
+	}
+	if err = addComputePayloadSHA256(stack); err != nil {
+		return err
+	}
+	if err = addRetry(stack, options); err != nil {
+		return err
+	}
+	if err = addRawResponseToMetadata(stack); err != nil {
+		return err
+	}
+	if err = addRecordResponseTiming(stack); err != nil {
+		return err
+	}
+	if err = addClientUserAgent(stack, options); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addTimeOffsetBuild(stack, c); err != nil {
+		return err
+	}
+	if err = addUserAgentRetryMode(stack, options); err != nil {
+		return err
+	}
+	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil {
+		return err
+	}
+	if err = addRecursionDetection(stack); err != nil {
+		return err
+	}
+	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addResponseErrorMiddleware(stack); err != nil {
+		return err
+	}
+	if err = addRequestResponseLogging(stack, options); err != nil {
+		return err
+	}
+	if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata {
+	return &awsmiddleware.RegisterServiceMetadata{
+		Region:        region,
+		ServiceID:     ServiceID,
+		OperationName: "GetSessionToken",
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,296 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	smithy "github.com/aws/smithy-go"
+	smithyauth "github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+	params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+	return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	rscheme := getResolvedAuthScheme(ctx)
+	schemeID := rscheme.Scheme.SchemeID()
+
+	if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+		if schemeID == "aws.auth#sigv4" {
+			smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+		} else if schemeID == "aws.auth#sigv4a" {
+			smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+		}
+	}
+
+	if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+		if schemeID == "aws.auth#sigv4" {
+			smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+		} else if schemeID == "aws.auth#sigv4a" {
+			smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+		}
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+	return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+	resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+	opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+	if err != nil {
+		return nil, err
+	}
+
+	opts = append(opts, &smithyauth.Option{
+		SchemeID: smithyauth.SchemeIDAnonymous,
+	})
+	return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+	if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+		return
+	}
+
+	options.AuthSchemeResolver = &withAnonymous{
+		resolver: options.AuthSchemeResolver,
+	}
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+	// The name of the operation being invoked.
+	Operation string
+
+	// The region in which the operation is being invoked.
+	Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+	params := &AuthResolverParameters{
+		Operation: operation,
+	}
+
+	bindAuthParamsRegion(ctx, params, input, options)
+
+	return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+	ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+	if overrides, ok := operationAuthOptions[params.Operation]; ok {
+		return overrides(params), nil
+	}
+	return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+	"AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+
+	"AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option {
+		return []*smithyauth.Option{
+			{SchemeID: smithyauth.SchemeIDAnonymous},
+		}
+	},
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+	return []*smithyauth.Option{
+		{
+			SchemeID: smithyauth.SchemeIDSigV4,
+			SignerProperties: func() smithy.Properties {
+				var props smithy.Properties
+				smithyhttp.SetSigV4SigningName(&props, "sts")
+				smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+				return props
+			}(),
+		},
+	}
+}
+
+type resolveAuthSchemeMiddleware struct {
+	operation string
+	options   Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+	return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+	options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+	if err != nil {
+		return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+	}
+
+	scheme, ok := m.selectScheme(options)
+	if !ok {
+		return out, metadata, fmt.Errorf("could not select an auth scheme")
+	}
+
+	ctx = setResolvedAuthScheme(ctx, scheme)
+	return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+	for _, option := range options {
+		if option.SchemeID == smithyauth.SchemeIDAnonymous {
+			return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+		}
+
+		for _, scheme := range m.options.AuthSchemes {
+			if scheme.SchemeID() != option.SchemeID {
+				continue
+			}
+
+			if scheme.IdentityResolver(m.options) != nil {
+				return newResolvedAuthScheme(scheme, option), true
+			}
+		}
+	}
+
+	return nil, false
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+	Scheme             smithyhttp.AuthScheme
+	IdentityProperties smithy.Properties
+	SignerProperties   smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+	return &resolvedAuthScheme{
+		Scheme:             scheme,
+		IdentityProperties: option.IdentityProperties,
+		SignerProperties:   option.SignerProperties,
+	}
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+	return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+	v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+	return v
+}
+
+type getIdentityMiddleware struct {
+	options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+	return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	resolver := rscheme.Scheme.IdentityResolver(m.options)
+	if resolver == nil {
+		return out, metadata, fmt.Errorf("no identity resolver")
+	}
+
+	identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties)
+	if err != nil {
+		return out, metadata, fmt.Errorf("get identity: %w", err)
+	}
+
+	ctx = setIdentity(ctx, identity)
+	return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+	return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+	v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+	return v
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+	return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+	}
+
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	identity := getIdentity(ctx)
+	if identity == nil {
+		return out, metadata, fmt.Errorf("no identity")
+	}
+
+	signer := rscheme.Scheme.Signer()
+	if signer == nil {
+		return out, metadata, fmt.Errorf("no signer")
+	}
+
+	if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil {
+		return out, metadata, fmt.Errorf("sign request: %w", err)
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2516 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"bytes"
+	"context"
+	"encoding/xml"
+	"fmt"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+	smithy "github.com/aws/smithy-go"
+	smithyxml "github.com/aws/smithy-go/encoding/xml"
+	smithyio "github.com/aws/smithy-go/io"
+	"github.com/aws/smithy-go/middleware"
+	"github.com/aws/smithy-go/ptr"
+	smithytime "github.com/aws/smithy-go/time"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+)
+
+func deserializeS3Expires(v string) (*time.Time, error) {
+	t, err := smithytime.ParseHTTPDate(v)
+	if err != nil {
+		return nil, nil
+	}
+	return &t, nil
+}
+
+type awsAwsquery_deserializeOpAssumeRole struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRole) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata)
+	}
+	output := &AssumeRoleOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(response.Body, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return out, metadata, nil
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("AssumeRoleResult")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+	if err != nil {
+		return err
+	}
+	if reqID := errorComponents.RequestID; len(reqID) != 0 {
+		awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+	}
+	if len(errorComponents.Code) != 0 {
+		errorCode = errorComponents.Code
+	}
+	if len(errorComponents.Message) != 0 {
+		errorMessage = errorComponents.Message
+	}
+	errorBody.Seek(0, io.SeekStart)
+	switch {
+	case strings.EqualFold("ExpiredTokenException", errorCode):
+		return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+	case strings.EqualFold("MalformedPolicyDocument", errorCode):
+		return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+	case strings.EqualFold("PackedPolicyTooLarge", errorCode):
+		return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+	case strings.EqualFold("RegionDisabledException", errorCode):
+		return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+type awsAwsquery_deserializeOpAssumeRoleWithSAML struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata)
+	}
+	output := &AssumeRoleWithSAMLOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(response.Body, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return out, metadata, nil
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("AssumeRoleWithSAMLResult")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+	if err != nil {
+		return err
+	}
+	if reqID := errorComponents.RequestID; len(reqID) != 0 {
+		awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+	}
+	if len(errorComponents.Code) != 0 {
+		errorCode = errorComponents.Code
+	}
+	if len(errorComponents.Message) != 0 {
+		errorMessage = errorComponents.Message
+	}
+	errorBody.Seek(0, io.SeekStart)
+	switch {
+	case strings.EqualFold("ExpiredTokenException", errorCode):
+		return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+	case strings.EqualFold("IDPRejectedClaim", errorCode):
+		return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody)
+
+	case strings.EqualFold("InvalidIdentityToken", errorCode):
+		return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody)
+
+	case strings.EqualFold("MalformedPolicyDocument", errorCode):
+		return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+	case strings.EqualFold("PackedPolicyTooLarge", errorCode):
+		return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+	case strings.EqualFold("RegionDisabledException", errorCode):
+		return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata)
+	}
+	output := &AssumeRoleWithWebIdentityOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(response.Body, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return out, metadata, nil
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+	if err != nil {
+		return err
+	}
+	if reqID := errorComponents.RequestID; len(reqID) != 0 {
+		awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+	}
+	if len(errorComponents.Code) != 0 {
+		errorCode = errorComponents.Code
+	}
+	if len(errorComponents.Message) != 0 {
+		errorMessage = errorComponents.Message
+	}
+	errorBody.Seek(0, io.SeekStart)
+	switch {
+	case strings.EqualFold("ExpiredTokenException", errorCode):
+		return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+	case strings.EqualFold("IDPCommunicationError", errorCode):
+		return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody)
+
+	case strings.EqualFold("IDPRejectedClaim", errorCode):
+		return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody)
+
+	case strings.EqualFold("InvalidIdentityToken", errorCode):
+		return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody)
+
+	case strings.EqualFold("MalformedPolicyDocument", errorCode):
+		return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+	case strings.EqualFold("PackedPolicyTooLarge", errorCode):
+		return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+	case strings.EqualFold("RegionDisabledException", errorCode):
+		return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct {
+}
+
+func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata)
+	}
+	output := &DecodeAuthorizationMessageOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(response.Body, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return out, metadata, nil
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("DecodeAuthorizationMessageResult")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+	if err != nil {
+		return err
+	}
+	if reqID := errorComponents.RequestID; len(reqID) != 0 {
+		awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+	}
+	if len(errorComponents.Code) != 0 {
+		errorCode = errorComponents.Code
+	}
+	if len(errorComponents.Message) != 0 {
+		errorMessage = errorComponents.Message
+	}
+	errorBody.Seek(0, io.SeekStart)
+	switch {
+	case strings.EqualFold("InvalidAuthorizationMessageException", errorCode):
+		return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+type awsAwsquery_deserializeOpGetAccessKeyInfo struct {
+}
+
+func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata)
+	}
+	output := &GetAccessKeyInfoOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(response.Body, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return out, metadata, nil
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("GetAccessKeyInfoResult")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+	if err != nil {
+		return err
+	}
+	if reqID := errorComponents.RequestID; len(reqID) != 0 {
+		awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+	}
+	if len(errorComponents.Code) != 0 {
+		errorCode = errorComponents.Code
+	}
+	if len(errorComponents.Message) != 0 {
+		errorMessage = errorComponents.Message
+	}
+	errorBody.Seek(0, io.SeekStart)
+	switch {
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+type awsAwsquery_deserializeOpGetCallerIdentity struct {
+}
+
+func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata)
+	}
+	output := &GetCallerIdentityOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(response.Body, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return out, metadata, nil
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("GetCallerIdentityResult")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+	if err != nil {
+		return err
+	}
+	if reqID := errorComponents.RequestID; len(reqID) != 0 {
+		awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+	}
+	if len(errorComponents.Code) != 0 {
+		errorCode = errorComponents.Code
+	}
+	if len(errorComponents.Message) != 0 {
+		errorMessage = errorComponents.Message
+	}
+	errorBody.Seek(0, io.SeekStart)
+	switch {
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+type awsAwsquery_deserializeOpGetFederationToken struct {
+}
+
+func (*awsAwsquery_deserializeOpGetFederationToken) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata)
+	}
+	output := &GetFederationTokenOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(response.Body, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return out, metadata, nil
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("GetFederationTokenResult")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+	if err != nil {
+		return err
+	}
+	if reqID := errorComponents.RequestID; len(reqID) != 0 {
+		awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+	}
+	if len(errorComponents.Code) != 0 {
+		errorCode = errorComponents.Code
+	}
+	if len(errorComponents.Message) != 0 {
+		errorMessage = errorComponents.Message
+	}
+	errorBody.Seek(0, io.SeekStart)
+	switch {
+	case strings.EqualFold("MalformedPolicyDocument", errorCode):
+		return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+	case strings.EqualFold("PackedPolicyTooLarge", errorCode):
+		return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+	case strings.EqualFold("RegionDisabledException", errorCode):
+		return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+type awsAwsquery_deserializeOpGetSessionToken struct {
+}
+
+func (*awsAwsquery_deserializeOpGetSessionToken) ID() string {
+	return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*smithyhttp.Response)
+	if !ok {
+		return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+	}
+
+	if response.StatusCode < 200 || response.StatusCode >= 300 {
+		return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata)
+	}
+	output := &GetSessionTokenOutput{}
+	out.Result = output
+
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(response.Body, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return out, metadata, nil
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return out, metadata, &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("GetSessionTokenResult")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		err = &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+		return out, metadata, err
+	}
+
+	return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+	var errorBuffer bytes.Buffer
+	if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+		return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+	}
+	errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+	errorCode := "UnknownError"
+	errorMessage := errorCode
+
+	errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+	if err != nil {
+		return err
+	}
+	if reqID := errorComponents.RequestID; len(reqID) != 0 {
+		awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+	}
+	if len(errorComponents.Code) != 0 {
+		errorCode = errorComponents.Code
+	}
+	if len(errorComponents.Message) != 0 {
+		errorMessage = errorComponents.Message
+	}
+	errorBody.Seek(0, io.SeekStart)
+	switch {
+	case strings.EqualFold("RegionDisabledException", errorCode):
+		return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+	default:
+		genericError := &smithy.GenericAPIError{
+			Code:    errorCode,
+			Message: errorMessage,
+		}
+		return genericError
+
+	}
+}
+
+func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.ExpiredTokenException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(errorBody, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return output
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("Error")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return output
+}
+
+func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.IDPCommunicationErrorException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(errorBody, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return output
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("Error")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return output
+}
+
+func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.IDPRejectedClaimException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(errorBody, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return output
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("Error")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return output
+}
+
+func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidAuthorizationMessageException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(errorBody, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return output
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("Error")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return output
+}
+
+func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.InvalidIdentityTokenException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(errorBody, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return output
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("Error")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return output
+}
+
+func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.MalformedPolicyDocumentException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(errorBody, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return output
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("Error")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return output
+}
+
+func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.PackedPolicyTooLargeException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(errorBody, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return output
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("Error")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return output
+}
+
+func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+	output := &types.RegionDisabledException{}
+	var buff [1024]byte
+	ringBuffer := smithyio.NewRingBuffer(buff[:])
+	body := io.TeeReader(errorBody, ringBuffer)
+	rootDecoder := xml.NewDecoder(body)
+	t, err := smithyxml.FetchRootElement(rootDecoder)
+	if err == io.EOF {
+		return output
+	}
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+	t, err = decoder.GetElement("Error")
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+	err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder)
+	if err != nil {
+		var snapshot bytes.Buffer
+		io.Copy(&snapshot, ringBuffer)
+		return &smithy.DeserializationError{
+			Err:      fmt.Errorf("failed to decode response body, %w", err),
+			Snapshot: snapshot.Bytes(),
+		}
+	}
+
+	return output
+}
+
+func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.AssumedRoleUser
+	if *v == nil {
+		sv = &types.AssumedRoleUser{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("Arn", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Arn = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("AssumedRoleId", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.AssumedRoleId = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.Credentials
+	if *v == nil {
+		sv = &types.Credentials{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("AccessKeyId", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.AccessKeyId = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("Expiration", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				t, err := smithytime.ParseDateTime(xtv)
+				if err != nil {
+					return err
+				}
+				sv.Expiration = ptr.Time(t)
+			}
+
+		case strings.EqualFold("SecretAccessKey", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.SecretAccessKey = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("SessionToken", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.SessionToken = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.ExpiredTokenException
+	if *v == nil {
+		sv = &types.ExpiredTokenException{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("message", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Message = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.FederatedUser
+	if *v == nil {
+		sv = &types.FederatedUser{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("Arn", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Arn = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("FederatedUserId", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.FederatedUserId = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.IDPCommunicationErrorException
+	if *v == nil {
+		sv = &types.IDPCommunicationErrorException{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("message", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Message = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.IDPRejectedClaimException
+	if *v == nil {
+		sv = &types.IDPRejectedClaimException{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("message", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Message = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.InvalidAuthorizationMessageException
+	if *v == nil {
+		sv = &types.InvalidAuthorizationMessageException{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("message", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Message = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.InvalidIdentityTokenException
+	if *v == nil {
+		sv = &types.InvalidIdentityTokenException{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("message", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Message = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.MalformedPolicyDocumentException
+	if *v == nil {
+		sv = &types.MalformedPolicyDocumentException{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("message", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Message = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.PackedPolicyTooLargeException
+	if *v == nil {
+		sv = &types.PackedPolicyTooLargeException{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("message", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Message = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *types.RegionDisabledException
+	if *v == nil {
+		sv = &types.RegionDisabledException{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("message", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Message = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *AssumeRoleOutput
+	if *v == nil {
+		sv = &AssumeRoleOutput{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+				return err
+			}
+
+		case strings.EqualFold("Credentials", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+				return err
+			}
+
+		case strings.EqualFold("PackedPolicySize", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				i64, err := strconv.ParseInt(xtv, 10, 64)
+				if err != nil {
+					return err
+				}
+				sv.PackedPolicySize = ptr.Int32(int32(i64))
+			}
+
+		case strings.EqualFold("SourceIdentity", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.SourceIdentity = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *AssumeRoleWithSAMLOutput
+	if *v == nil {
+		sv = &AssumeRoleWithSAMLOutput{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+				return err
+			}
+
+		case strings.EqualFold("Audience", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Audience = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("Credentials", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+				return err
+			}
+
+		case strings.EqualFold("Issuer", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Issuer = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("NameQualifier", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.NameQualifier = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("PackedPolicySize", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				i64, err := strconv.ParseInt(xtv, 10, 64)
+				if err != nil {
+					return err
+				}
+				sv.PackedPolicySize = ptr.Int32(int32(i64))
+			}
+
+		case strings.EqualFold("SourceIdentity", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.SourceIdentity = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("Subject", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Subject = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("SubjectType", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.SubjectType = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *AssumeRoleWithWebIdentityOutput
+	if *v == nil {
+		sv = &AssumeRoleWithWebIdentityOutput{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+				return err
+			}
+
+		case strings.EqualFold("Audience", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Audience = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("Credentials", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+				return err
+			}
+
+		case strings.EqualFold("PackedPolicySize", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				i64, err := strconv.ParseInt(xtv, 10, 64)
+				if err != nil {
+					return err
+				}
+				sv.PackedPolicySize = ptr.Int32(int32(i64))
+			}
+
+		case strings.EqualFold("Provider", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Provider = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("SourceIdentity", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.SourceIdentity = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.SubjectFromWebIdentityToken = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *DecodeAuthorizationMessageOutput
+	if *v == nil {
+		sv = &DecodeAuthorizationMessageOutput{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("DecodedMessage", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.DecodedMessage = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *GetAccessKeyInfoOutput
+	if *v == nil {
+		sv = &GetAccessKeyInfoOutput{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("Account", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Account = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *GetCallerIdentityOutput
+	if *v == nil {
+		sv = &GetCallerIdentityOutput{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("Account", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Account = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("Arn", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.Arn = ptr.String(xtv)
+			}
+
+		case strings.EqualFold("UserId", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				sv.UserId = ptr.String(xtv)
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *GetFederationTokenOutput
+	if *v == nil {
+		sv = &GetFederationTokenOutput{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("Credentials", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+				return err
+			}
+
+		case strings.EqualFold("FederatedUser", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil {
+				return err
+			}
+
+		case strings.EqualFold("PackedPolicySize", t.Name.Local):
+			val, err := decoder.Value()
+			if err != nil {
+				return err
+			}
+			if val == nil {
+				break
+			}
+			{
+				xtv := string(val)
+				i64, err := strconv.ParseInt(xtv, 10, 64)
+				if err != nil {
+					return err
+				}
+				sv.PackedPolicySize = ptr.Int32(int32(i64))
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error {
+	if v == nil {
+		return fmt.Errorf("unexpected nil of type %T", v)
+	}
+	var sv *GetSessionTokenOutput
+	if *v == nil {
+		sv = &GetSessionTokenOutput{}
+	} else {
+		sv = *v
+	}
+
+	for {
+		t, done, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if done {
+			break
+		}
+		originalDecoder := decoder
+		decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+		switch {
+		case strings.EqualFold("Credentials", t.Name.Local):
+			nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+			if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+				return err
+			}
+
+		default:
+			// Do nothing and ignore the unexpected tag element
+			err = decoder.Decoder.Skip()
+			if err != nil {
+				return err
+			}
+
+		}
+		decoder = originalDecoder
+	}
+	*v = sv
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,13 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package sts provides the API client, operations, and parameter types for AWS
+// Security Token Service.
+//
+// # Security Token Service
+//
+// Security Token Service (STS) enables you to request temporary,
+// limited-privilege credentials for users. This guide provides descriptions of the
+// STS API. For more information about using this service, see [Temporary Security Credentials].
+//
+// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html
+package sts
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,1130 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+	"github.com/aws/aws-sdk-go-v2/internal/endpoints"
+	"github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+	internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints"
+	smithy "github.com/aws/smithy-go"
+	smithyauth "github.com/aws/smithy-go/auth"
+	smithyendpoints "github.com/aws/smithy-go/endpoints"
+	"github.com/aws/smithy-go/middleware"
+	"github.com/aws/smithy-go/ptr"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net/http"
+	"net/url"
+	"os"
+	"strings"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+	ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+	return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+	return fn(region, options)
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+	e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+	for _, fn := range optFns {
+		fn(&e)
+	}
+
+	return EndpointResolverFunc(
+		func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+			if len(e.SigningRegion) == 0 {
+				e.SigningRegion = region
+			}
+			return e, nil
+		},
+	)
+}
+
+type ResolveEndpoint struct {
+	Resolver EndpointResolver
+	Options  EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+	return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+		return next.HandleSerialize(ctx, in)
+	}
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.Resolver == nil {
+		return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+	}
+
+	eo := m.Options
+	eo.Logger = middleware.GetLogger(ctx)
+
+	var endpoint aws.Endpoint
+	endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
+	if err != nil {
+		nf := (&aws.EndpointNotFoundError{})
+		if errors.As(err, &nf) {
+			ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+			return next.HandleSerialize(ctx, in)
+		}
+		return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+	}
+
+	req.URL, err = url.Parse(endpoint.URL)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+	}
+
+	if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+		signingName := endpoint.SigningName
+		if len(signingName) == 0 {
+			signingName = "sts"
+		}
+		ctx = awsmiddleware.SetSigningName(ctx, signingName)
+	}
+	ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+	ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+	ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+	ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+	return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+	return stack.Serialize.Insert(&ResolveEndpoint{
+		Resolver: o.EndpointResolver,
+		Options:  o.EndpointOptions,
+	}, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+	_, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+	return err
+}
+
+type wrappedEndpointResolver struct {
+	awsResolver aws.EndpointResolverWithOptions
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+	return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
+}
+
+type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
+
+func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
+	return a(service, region)
+}
+
+var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
+
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
+//
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
+	var resolver aws.EndpointResolverWithOptions
+
+	if awsResolverWithOptions != nil {
+		resolver = awsResolverWithOptions
+	} else if awsResolver != nil {
+		resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint)
+	}
+
+	return &wrappedEndpointResolver{
+		awsResolver: resolver,
+	}
+}
+
+func finalizeClientEndpointResolverOptions(options *Options) {
+	options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage()
+
+	if len(options.EndpointOptions.ResolvedRegion) == 0 {
+		const fipsInfix = "-fips-"
+		const fipsPrefix = "fips-"
+		const fipsSuffix = "-fips"
+
+		if strings.Contains(options.Region, fipsInfix) ||
+			strings.Contains(options.Region, fipsPrefix) ||
+			strings.Contains(options.Region, fipsSuffix) {
+			options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
+				options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "")
+			options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+		}
+	}
+
+}
+
+func resolveEndpointResolverV2(options *Options) {
+	if options.EndpointResolverV2 == nil {
+		options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+	}
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+	if cfg.BaseEndpoint != nil {
+		o.BaseEndpoint = cfg.BaseEndpoint
+	}
+
+	_, g := os.LookupEnv("AWS_ENDPOINT_URL")
+	_, s := os.LookupEnv("AWS_ENDPOINT_URL_STS")
+
+	if g && !s {
+		return
+	}
+
+	value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources)
+	if found && err == nil {
+		o.BaseEndpoint = &value
+	}
+}
+
+func bindRegion(region string) *string {
+	if region == "" {
+		return nil
+	}
+	return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+	// The AWS region used to dispatch the request.
+	//
+	// Parameter is
+	// required.
+	//
+	// AWS::Region
+	Region *string
+
+	// When true, use the dual-stack endpoint. If the configured endpoint does not
+	// support dual-stack, dispatching the request MAY return an error.
+	//
+	// Defaults to
+	// false if no value is provided.
+	//
+	// AWS::UseDualStack
+	UseDualStack *bool
+
+	// When true, send this request to the FIPS-compliant regional endpoint. If the
+	// configured endpoint does not have a FIPS compliant endpoint, dispatching the
+	// request will return an error.
+	//
+	// Defaults to false if no value is
+	// provided.
+	//
+	// AWS::UseFIPS
+	UseFIPS *bool
+
+	// Override the endpoint used to send this request
+	//
+	// Parameter is
+	// required.
+	//
+	// SDK::Endpoint
+	Endpoint *string
+
+	// Whether the global endpoint should be used, rather then the regional endpoint
+	// for us-east-1.
+	//
+	// Defaults to false if no value is
+	// provided.
+	//
+	// AWS::STS::UseGlobalEndpoint
+	UseGlobalEndpoint *bool
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+	if p.UseDualStack == nil {
+		return fmt.Errorf("parameter UseDualStack is required")
+	}
+
+	if p.UseFIPS == nil {
+		return fmt.Errorf("parameter UseFIPS is required")
+	}
+
+	if p.UseGlobalEndpoint == nil {
+		return fmt.Errorf("parameter UseGlobalEndpoint is required")
+	}
+
+	return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+	if p.UseDualStack == nil {
+		p.UseDualStack = ptr.Bool(false)
+	}
+
+	if p.UseFIPS == nil {
+		p.UseFIPS = ptr.Bool(false)
+	}
+
+	if p.UseGlobalEndpoint == nil {
+		p.UseGlobalEndpoint = ptr.Bool(false)
+	}
+	return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+	if i < 0 || i >= len(s) {
+		return nil
+	}
+
+	v := s[i]
+	return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+	// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+	// returning the endpoint if found. Otherwise an error is returned.
+	ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+		smithyendpoints.Endpoint, error,
+	)
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+	return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+	ctx context.Context, params EndpointParameters,
+) (
+	endpoint smithyendpoints.Endpoint, err error,
+) {
+	params = params.WithDefaults()
+	if err = params.ValidateRequired(); err != nil {
+		return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+	}
+	_UseDualStack := *params.UseDualStack
+	_UseFIPS := *params.UseFIPS
+	_UseGlobalEndpoint := *params.UseGlobalEndpoint
+
+	if _UseGlobalEndpoint == true {
+		if !(params.Endpoint != nil) {
+			if exprVal := params.Region; exprVal != nil {
+				_Region := *exprVal
+				_ = _Region
+				if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+					_PartitionResult := *exprVal
+					_ = _PartitionResult
+					if _UseFIPS == false {
+						if _UseDualStack == false {
+							if _Region == "ap-northeast-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "ap-south-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "ap-southeast-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "ap-southeast-2" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "aws-global" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "ca-central-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "eu-central-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "eu-north-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "eu-west-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "eu-west-2" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "eu-west-3" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "sa-east-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "us-east-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "us-east-2" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "us-west-1" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							if _Region == "us-west-2" {
+								uriString := "https://sts.amazonaws.com"
+
+								uri, err := url.Parse(uriString)
+								if err != nil {
+									return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+								}
+
+								return smithyendpoints.Endpoint{
+									URI:     *uri,
+									Headers: http.Header{},
+									Properties: func() smithy.Properties {
+										var out smithy.Properties
+										smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+											{
+												SchemeID: "aws.auth#sigv4",
+												SignerProperties: func() smithy.Properties {
+													var sp smithy.Properties
+													smithyhttp.SetSigV4SigningName(&sp, "sts")
+													smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+													smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+													return sp
+												}(),
+											},
+										})
+										return out
+									}(),
+								}, nil
+							}
+							uriString := func() string {
+								var out strings.Builder
+								out.WriteString("https://sts.")
+								out.WriteString(_Region)
+								out.WriteString(".")
+								out.WriteString(_PartitionResult.DnsSuffix)
+								return out.String()
+							}()
+
+							uri, err := url.Parse(uriString)
+							if err != nil {
+								return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+							}
+
+							return smithyendpoints.Endpoint{
+								URI:     *uri,
+								Headers: http.Header{},
+								Properties: func() smithy.Properties {
+									var out smithy.Properties
+									smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+										{
+											SchemeID: "aws.auth#sigv4",
+											SignerProperties: func() smithy.Properties {
+												var sp smithy.Properties
+												smithyhttp.SetSigV4SigningName(&sp, "sts")
+												smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+												smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+												return sp
+											}(),
+										},
+									})
+									return out
+								}(),
+							}, nil
+						}
+					}
+				}
+			}
+		}
+	}
+	if exprVal := params.Endpoint; exprVal != nil {
+		_Endpoint := *exprVal
+		_ = _Endpoint
+		if _UseFIPS == true {
+			return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+		}
+		if _UseDualStack == true {
+			return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+		}
+		uriString := _Endpoint
+
+		uri, err := url.Parse(uriString)
+		if err != nil {
+			return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+		}
+
+		return smithyendpoints.Endpoint{
+			URI:     *uri,
+			Headers: http.Header{},
+		}, nil
+	}
+	if exprVal := params.Region; exprVal != nil {
+		_Region := *exprVal
+		_ = _Region
+		if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+			_PartitionResult := *exprVal
+			_ = _PartitionResult
+			if _UseFIPS == true {
+				if _UseDualStack == true {
+					if true == _PartitionResult.SupportsFIPS {
+						if true == _PartitionResult.SupportsDualStack {
+							uriString := func() string {
+								var out strings.Builder
+								out.WriteString("https://sts-fips.")
+								out.WriteString(_Region)
+								out.WriteString(".")
+								out.WriteString(_PartitionResult.DualStackDnsSuffix)
+								return out.String()
+							}()
+
+							uri, err := url.Parse(uriString)
+							if err != nil {
+								return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+							}
+
+							return smithyendpoints.Endpoint{
+								URI:     *uri,
+								Headers: http.Header{},
+							}, nil
+						}
+					}
+					return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+				}
+			}
+			if _UseFIPS == true {
+				if _PartitionResult.SupportsFIPS == true {
+					if _PartitionResult.Name == "aws-us-gov" {
+						uriString := func() string {
+							var out strings.Builder
+							out.WriteString("https://sts.")
+							out.WriteString(_Region)
+							out.WriteString(".amazonaws.com")
+							return out.String()
+						}()
+
+						uri, err := url.Parse(uriString)
+						if err != nil {
+							return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+						}
+
+						return smithyendpoints.Endpoint{
+							URI:     *uri,
+							Headers: http.Header{},
+						}, nil
+					}
+					uriString := func() string {
+						var out strings.Builder
+						out.WriteString("https://sts-fips.")
+						out.WriteString(_Region)
+						out.WriteString(".")
+						out.WriteString(_PartitionResult.DnsSuffix)
+						return out.String()
+					}()
+
+					uri, err := url.Parse(uriString)
+					if err != nil {
+						return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+					}
+
+					return smithyendpoints.Endpoint{
+						URI:     *uri,
+						Headers: http.Header{},
+					}, nil
+				}
+				return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+			}
+			if _UseDualStack == true {
+				if true == _PartitionResult.SupportsDualStack {
+					uriString := func() string {
+						var out strings.Builder
+						out.WriteString("https://sts.")
+						out.WriteString(_Region)
+						out.WriteString(".")
+						out.WriteString(_PartitionResult.DualStackDnsSuffix)
+						return out.String()
+					}()
+
+					uri, err := url.Parse(uriString)
+					if err != nil {
+						return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+					}
+
+					return smithyendpoints.Endpoint{
+						URI:     *uri,
+						Headers: http.Header{},
+					}, nil
+				}
+				return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+			}
+			if _Region == "aws-global" {
+				uriString := "https://sts.amazonaws.com"
+
+				uri, err := url.Parse(uriString)
+				if err != nil {
+					return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+				}
+
+				return smithyendpoints.Endpoint{
+					URI:     *uri,
+					Headers: http.Header{},
+					Properties: func() smithy.Properties {
+						var out smithy.Properties
+						smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+							{
+								SchemeID: "aws.auth#sigv4",
+								SignerProperties: func() smithy.Properties {
+									var sp smithy.Properties
+									smithyhttp.SetSigV4SigningName(&sp, "sts")
+									smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+									smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+									return sp
+								}(),
+							},
+						})
+						return out
+					}(),
+				}, nil
+			}
+			uriString := func() string {
+				var out strings.Builder
+				out.WriteString("https://sts.")
+				out.WriteString(_Region)
+				out.WriteString(".")
+				out.WriteString(_PartitionResult.DnsSuffix)
+				return out.String()
+			}()
+
+			uri, err := url.Parse(uriString)
+			if err != nil {
+				return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+			}
+
+			return smithyendpoints.Endpoint{
+				URI:     *uri,
+				Headers: http.Header{},
+			}, nil
+		}
+		return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+	}
+	return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+	bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+	params := &EndpointParameters{}
+
+	params.Region = bindRegion(options.Region)
+	params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+	params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+	params.Endpoint = options.BaseEndpoint
+
+	if b, ok := input.(endpointParamsBinder); ok {
+		b.bindEndpointParams(params)
+	}
+
+	return params
+}
+
+type resolveEndpointV2Middleware struct {
+	options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+	return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+		return next.HandleFinalize(ctx, in)
+	}
+
+	if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil {
+		return out, metadata, fmt.Errorf("invalid accountID set: %w", err)
+	}
+
+	req, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	if m.options.EndpointResolverV2 == nil {
+		return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+	}
+
+	params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+	endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+	}
+
+	if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+		endpt.URI.RawPath = endpt.URI.Path
+	}
+	req.URL.Scheme = endpt.URI.Scheme
+	req.URL.Host = endpt.URI.Host
+	req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+	req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+	for k := range endpt.Headers {
+		req.Header.Set(k, endpt.Headers.Get(k))
+	}
+
+	rscheme := getResolvedAuthScheme(ctx)
+	if rscheme == nil {
+		return out, metadata, fmt.Errorf("no resolved auth scheme")
+	}
+
+	opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+	for _, o := range opts {
+		rscheme.SignerProperties.SetAll(&o.SignerProperties)
+	}
+
+	return next.HandleFinalize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,41 @@
+{
+    "dependencies": {
+        "github.com/aws/aws-sdk-go-v2": "v1.4.0",
+        "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
+        "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+        "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5",
+        "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7",
+        "github.com/aws/smithy-go": "v1.4.0"
+    },
+    "files": [
+        "api_client.go",
+        "api_client_test.go",
+        "api_op_AssumeRole.go",
+        "api_op_AssumeRoleWithSAML.go",
+        "api_op_AssumeRoleWithWebIdentity.go",
+        "api_op_DecodeAuthorizationMessage.go",
+        "api_op_GetAccessKeyInfo.go",
+        "api_op_GetCallerIdentity.go",
+        "api_op_GetFederationToken.go",
+        "api_op_GetSessionToken.go",
+        "auth.go",
+        "deserializers.go",
+        "doc.go",
+        "endpoints.go",
+        "endpoints_config_test.go",
+        "endpoints_test.go",
+        "generated.json",
+        "internal/endpoints/endpoints.go",
+        "internal/endpoints/endpoints_test.go",
+        "options.go",
+        "protocol_test.go",
+        "serializers.go",
+        "snapshot_test.go",
+        "types/errors.go",
+        "types/types.go",
+        "validators.go"
+    ],
+    "go": "1.15",
+    "module": "github.com/aws/aws-sdk-go-v2/service/sts",
+    "unstable": false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package sts
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.30.3"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,512 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+	"github.com/aws/aws-sdk-go-v2/aws"
+	endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
+	"github.com/aws/smithy-go/logging"
+	"regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+	// Logger is a logging implementation that log events should be sent to.
+	Logger logging.Logger
+
+	// LogDeprecated indicates that deprecated endpoints should be logged to the
+	// provided logger.
+	LogDeprecated bool
+
+	// ResolvedRegion is used to override the region to be resolved, rather then the
+	// using the value passed to the ResolveEndpoint method. This value is used by the
+	// SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
+	// name. You must not set this value directly in your application.
+	ResolvedRegion string
+
+	// DisableHTTPS informs the resolver to return an endpoint that does not use the
+	// HTTPS scheme.
+	DisableHTTPS bool
+
+	// UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
+	UseDualStackEndpoint aws.DualStackEndpointState
+
+	// UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+	UseFIPSEndpoint aws.FIPSEndpointState
+}
+
+func (o Options) GetResolvedRegion() string {
+	return o.ResolvedRegion
+}
+
+func (o Options) GetDisableHTTPS() bool {
+	return o.DisableHTTPS
+}
+
+func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
+	return o.UseDualStackEndpoint
+}
+
+func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
+	return o.UseFIPSEndpoint
+}
+
+func transformToSharedOptions(options Options) endpoints.Options {
+	return endpoints.Options{
+		Logger:               options.Logger,
+		LogDeprecated:        options.LogDeprecated,
+		ResolvedRegion:       options.ResolvedRegion,
+		DisableHTTPS:         options.DisableHTTPS,
+		UseDualStackEndpoint: options.UseDualStackEndpoint,
+		UseFIPSEndpoint:      options.UseFIPSEndpoint,
+	}
+}
+
+// Resolver STS endpoint resolver
+type Resolver struct {
+	partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+	if len(region) == 0 {
+		return endpoint, &aws.MissingRegionError{}
+	}
+
+	opt := transformToSharedOptions(options)
+	return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+	return &Resolver{
+		partitions: defaultPartitions,
+	}
+}
+
+var partitionRegexp = struct {
+	Aws      *regexp.Regexp
+	AwsCn    *regexp.Regexp
+	AwsIso   *regexp.Regexp
+	AwsIsoB  *regexp.Regexp
+	AwsIsoE  *regexp.Regexp
+	AwsIsoF  *regexp.Regexp
+	AwsUsGov *regexp.Regexp
+}{
+
+	Aws:      regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"),
+	AwsCn:    regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+	AwsIso:   regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+	AwsIsoB:  regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+	AwsIsoE:  regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+	AwsIsoF:  regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
+	AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+}
+
+var defaultPartitions = endpoints.Partitions{
+	{
+		ID: "aws",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "sts.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "sts.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.Aws,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "af-south-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-east-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-2",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-northeast-3",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-south-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-south-2",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-2",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-3",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ap-southeast-4",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "aws-global",
+			}: endpoints.Endpoint{
+				Hostname: "sts.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-east-1",
+				},
+			},
+			endpoints.EndpointKey{
+				Region: "ca-central-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ca-west-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "eu-central-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "eu-central-2",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "eu-north-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "eu-south-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "eu-south-2",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "eu-west-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "eu-west-2",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "eu-west-3",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "il-central-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "me-central-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "me-south-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "sa-east-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "us-east-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "us-east-1",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "sts-fips.us-east-1.amazonaws.com",
+			},
+			endpoints.EndpointKey{
+				Region: "us-east-1-fips",
+			}: endpoints.Endpoint{
+				Hostname: "sts-fips.us-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-east-1",
+				},
+				Deprecated: aws.TrueTernary,
+			},
+			endpoints.EndpointKey{
+				Region: "us-east-2",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "us-east-2",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "sts-fips.us-east-2.amazonaws.com",
+			},
+			endpoints.EndpointKey{
+				Region: "us-east-2-fips",
+			}: endpoints.Endpoint{
+				Hostname: "sts-fips.us-east-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-east-2",
+				},
+				Deprecated: aws.TrueTernary,
+			},
+			endpoints.EndpointKey{
+				Region: "us-west-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "us-west-1",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "sts-fips.us-west-1.amazonaws.com",
+			},
+			endpoints.EndpointKey{
+				Region: "us-west-1-fips",
+			}: endpoints.Endpoint{
+				Hostname: "sts-fips.us-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-west-1",
+				},
+				Deprecated: aws.TrueTernary,
+			},
+			endpoints.EndpointKey{
+				Region: "us-west-2",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "us-west-2",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "sts-fips.us-west-2.amazonaws.com",
+			},
+			endpoints.EndpointKey{
+				Region: "us-west-2-fips",
+			}: endpoints.Endpoint{
+				Hostname: "sts-fips.us-west-2.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-west-2",
+				},
+				Deprecated: aws.TrueTernary,
+			},
+		},
+	},
+	{
+		ID: "aws-cn",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "sts.{region}.api.amazonwebservices.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.amazonaws.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.api.amazonwebservices.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "sts.{region}.amazonaws.com.cn",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsCn,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "cn-north-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "cn-northwest-1",
+			}: endpoints.Endpoint{},
+		},
+	},
+	{
+		ID: "aws-iso",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.c2s.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "sts.{region}.c2s.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIso,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "us-iso-east-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "us-iso-west-1",
+			}: endpoints.Endpoint{},
+		},
+	},
+	{
+		ID: "aws-iso-b",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.sc2s.sgov.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "sts.{region}.sc2s.sgov.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoB,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "us-isob-east-1",
+			}: endpoints.Endpoint{},
+		},
+	},
+	{
+		ID: "aws-iso-e",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.cloud.adc-e.uk",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "sts.{region}.cloud.adc-e.uk",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoE,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-iso-f",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.csp.hci.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "sts.{region}.csp.hci.ic.gov",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsIsoF,
+		IsRegionalized: true,
+	},
+	{
+		ID: "aws-us-gov",
+		Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+			{
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname:          "sts.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "sts.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "sts-fips.{region}.api.aws",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+			{
+				Variant: 0,
+			}: {
+				Hostname:          "sts.{region}.amazonaws.com",
+				Protocols:         []string{"https"},
+				SignatureVersions: []string{"v4"},
+			},
+		},
+		RegionRegex:    partitionRegexp.AwsUsGov,
+		IsRegionalized: true,
+		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "us-gov-east-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "us-gov-east-1",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "sts.us-gov-east-1.amazonaws.com",
+			},
+			endpoints.EndpointKey{
+				Region: "us-gov-east-1-fips",
+			}: endpoints.Endpoint{
+				Hostname: "sts.us-gov-east-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-gov-east-1",
+				},
+				Deprecated: aws.TrueTernary,
+			},
+			endpoints.EndpointKey{
+				Region: "us-gov-west-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "us-gov-west-1",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "sts.us-gov-west-1.amazonaws.com",
+			},
+			endpoints.EndpointKey{
+				Region: "us-gov-west-1-fips",
+			}: endpoints.Endpoint{
+				Hostname: "sts.us-gov-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-gov-west-1",
+				},
+				Deprecated: aws.TrueTernary,
+			},
+		},
+	},
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,227 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+	internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+	smithyauth "github.com/aws/smithy-go/auth"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"net/http"
+)
+
+type HTTPClient interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+	// Set of options to modify how an operation is invoked. These apply to all
+	// operations invoked for this client. Use functional options on operation call to
+	// modify this list for per operation behavior.
+	APIOptions []func(*middleware.Stack) error
+
+	// Indicates how aws account ID is applied in endpoint2.0 routing
+	AccountIDEndpointMode aws.AccountIDEndpointMode
+
+	// The optional application specific identifier appended to the User-Agent header.
+	AppID string
+
+	// This endpoint will be given as input to an EndpointResolverV2. It is used for
+	// providing a custom base endpoint that is subject to modifications by the
+	// processing EndpointResolverV2.
+	BaseEndpoint *string
+
+	// Configures the events that will be sent to the configured logger.
+	ClientLogMode aws.ClientLogMode
+
+	// The credentials object to use when signing requests.
+	Credentials aws.CredentialsProvider
+
+	// The configuration DefaultsMode that the SDK should use when constructing the
+	// clients initial default settings.
+	DefaultsMode aws.DefaultsMode
+
+	// The endpoint options to be used when attempting to resolve an endpoint.
+	EndpointOptions EndpointResolverOptions
+
+	// The service endpoint resolver.
+	//
+	// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+	// value for this field will likely prevent you from using any endpoint-related
+	// service features released after the introduction of EndpointResolverV2 and
+	// BaseEndpoint.
+	//
+	// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+	// the client option BaseEndpoint instead.
+	EndpointResolver EndpointResolver
+
+	// Resolves the endpoint used for a particular service operation. This should be
+	// used over the deprecated EndpointResolver.
+	EndpointResolverV2 EndpointResolverV2
+
+	// Signature Version 4 (SigV4) Signer
+	HTTPSignerV4 HTTPSignerV4
+
+	// The logger writer interface to write logging messages to.
+	Logger logging.Logger
+
+	// The region to send requests to. (Required)
+	Region string
+
+	// RetryMaxAttempts specifies the maximum number attempts an API client will call
+	// an operation that fails with a retryable error. A value of 0 is ignored, and
+	// will not be used to configure the API client created default retryer, or modify
+	// per operation call's retry max attempts.
+	//
+	// If specified in an operation call's functional options with a value that is
+	// different than the constructed client's Options, the Client's Retryer will be
+	// wrapped to use the operation's specific RetryMaxAttempts value.
+	RetryMaxAttempts int
+
+	// RetryMode specifies the retry mode the API client will be created with, if
+	// Retryer option is not also specified.
+	//
+	// When creating a new API Clients this member will only be used if the Retryer
+	// Options member is nil. This value will be ignored if Retryer is not nil.
+	//
+	// Currently does not support per operation call overrides, may in the future.
+	RetryMode aws.RetryMode
+
+	// Retryer guides how HTTP requests should be retried in case of recoverable
+	// failures. When nil the API client will use a default retryer. The kind of
+	// default retry created by the API client can be changed with the RetryMode
+	// option.
+	Retryer aws.Retryer
+
+	// The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+	// to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+	// should not populate this structure programmatically, or rely on the values here
+	// within your applications.
+	RuntimeEnvironment aws.RuntimeEnvironment
+
+	// The initial DefaultsMode used when the client options were constructed. If the
+	// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+	// value was at that point in time.
+	//
+	// Currently does not support per operation call overrides, may in the future.
+	resolvedDefaultsMode aws.DefaultsMode
+
+	// The HTTP client to invoke API calls with. Defaults to client's default HTTP
+	// implementation if nil.
+	HTTPClient HTTPClient
+
+	// The auth scheme resolver which determines how to authenticate for each
+	// operation.
+	AuthSchemeResolver AuthSchemeResolver
+
+	// The list of auth schemes supported by the client.
+	AuthSchemes []smithyhttp.AuthScheme
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+	to := o
+	to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+	copy(to.APIOptions, o.APIOptions)
+
+	return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+	if schemeID == "aws.auth#sigv4" {
+		return getSigV4IdentityResolver(o)
+	}
+	if schemeID == "smithy.api#noAuth" {
+		return &smithyauth.AnonymousIdentityResolver{}
+	}
+	return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, optFns...)
+	}
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+	return func(o *Options) {
+		o.EndpointResolver = v
+	}
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+	return func(o *Options) {
+		o.EndpointResolverV2 = v
+	}
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+	if o.Credentials != nil {
+		return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+	}
+	return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+	fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+		out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+	) {
+		return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+	}
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+			return s.Initialize.Add(
+				middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+				middleware.Before,
+			)
+		})
+	}
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+	fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+		out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+	) {
+		return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+	}
+	return func(o *Options) {
+		o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+			return s.Initialize.Add(
+				middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+				middleware.Before,
+			)
+		})
+	}
+}
+
+func ignoreAnonymousAuth(options *Options) {
+	if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+		options.Credentials = nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,862 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/aws/protocol/query"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/encoding/httpbinding"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"path"
+)
+
+type awsAwsquery_serializeOpAssumeRole struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRole) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*AssumeRoleInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	operationPath := "/"
+	if len(request.Request.URL.Path) == 0 {
+		request.Request.URL.Path = operationPath
+	} else {
+		request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+		if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+			request.Request.URL.Path += "/"
+		}
+	}
+	request.Request.Method = "POST"
+	httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+	bodyWriter := bytes.NewBuffer(nil)
+	bodyEncoder := query.NewEncoder(bodyWriter)
+	body := bodyEncoder.Object()
+	body.Key("Action").String("AssumeRole")
+	body.Key("Version").String("2011-06-15")
+
+	if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	err = bodyEncoder.Encode()
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpAssumeRoleWithSAML struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*AssumeRoleWithSAMLInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	operationPath := "/"
+	if len(request.Request.URL.Path) == 0 {
+		request.Request.URL.Path = operationPath
+	} else {
+		request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+		if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+			request.Request.URL.Path += "/"
+		}
+	}
+	request.Request.Method = "POST"
+	httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+	bodyWriter := bytes.NewBuffer(nil)
+	bodyEncoder := query.NewEncoder(bodyWriter)
+	body := bodyEncoder.Object()
+	body.Key("Action").String("AssumeRoleWithSAML")
+	body.Key("Version").String("2011-06-15")
+
+	if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	err = bodyEncoder.Encode()
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	operationPath := "/"
+	if len(request.Request.URL.Path) == 0 {
+		request.Request.URL.Path = operationPath
+	} else {
+		request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+		if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+			request.Request.URL.Path += "/"
+		}
+	}
+	request.Request.Method = "POST"
+	httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+	bodyWriter := bytes.NewBuffer(nil)
+	bodyEncoder := query.NewEncoder(bodyWriter)
+	body := bodyEncoder.Object()
+	body.Key("Action").String("AssumeRoleWithWebIdentity")
+	body.Key("Version").String("2011-06-15")
+
+	if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	err = bodyEncoder.Encode()
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpDecodeAuthorizationMessage struct {
+}
+
+func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*DecodeAuthorizationMessageInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	operationPath := "/"
+	if len(request.Request.URL.Path) == 0 {
+		request.Request.URL.Path = operationPath
+	} else {
+		request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+		if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+			request.Request.URL.Path += "/"
+		}
+	}
+	request.Request.Method = "POST"
+	httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+	bodyWriter := bytes.NewBuffer(nil)
+	bodyEncoder := query.NewEncoder(bodyWriter)
+	body := bodyEncoder.Object()
+	body.Key("Action").String("DecodeAuthorizationMessage")
+	body.Key("Version").String("2011-06-15")
+
+	if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	err = bodyEncoder.Encode()
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetAccessKeyInfo struct {
+}
+
+func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*GetAccessKeyInfoInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	operationPath := "/"
+	if len(request.Request.URL.Path) == 0 {
+		request.Request.URL.Path = operationPath
+	} else {
+		request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+		if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+			request.Request.URL.Path += "/"
+		}
+	}
+	request.Request.Method = "POST"
+	httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+	bodyWriter := bytes.NewBuffer(nil)
+	bodyEncoder := query.NewEncoder(bodyWriter)
+	body := bodyEncoder.Object()
+	body.Key("Action").String("GetAccessKeyInfo")
+	body.Key("Version").String("2011-06-15")
+
+	if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	err = bodyEncoder.Encode()
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetCallerIdentity struct {
+}
+
+func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*GetCallerIdentityInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	operationPath := "/"
+	if len(request.Request.URL.Path) == 0 {
+		request.Request.URL.Path = operationPath
+	} else {
+		request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+		if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+			request.Request.URL.Path += "/"
+		}
+	}
+	request.Request.Method = "POST"
+	httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+	bodyWriter := bytes.NewBuffer(nil)
+	bodyEncoder := query.NewEncoder(bodyWriter)
+	body := bodyEncoder.Object()
+	body.Key("Action").String("GetCallerIdentity")
+	body.Key("Version").String("2011-06-15")
+
+	err = bodyEncoder.Encode()
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetFederationToken struct {
+}
+
+func (*awsAwsquery_serializeOpGetFederationToken) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*GetFederationTokenInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	operationPath := "/"
+	if len(request.Request.URL.Path) == 0 {
+		request.Request.URL.Path = operationPath
+	} else {
+		request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+		if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+			request.Request.URL.Path += "/"
+		}
+	}
+	request.Request.Method = "POST"
+	httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+	bodyWriter := bytes.NewBuffer(nil)
+	bodyEncoder := query.NewEncoder(bodyWriter)
+	body := bodyEncoder.Object()
+	body.Key("Action").String("GetFederationToken")
+	body.Key("Version").String("2011-06-15")
+
+	if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	err = bodyEncoder.Encode()
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetSessionToken struct {
+}
+
+func (*awsAwsquery_serializeOpGetSessionToken) ID() string {
+	return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := in.Request.(*smithyhttp.Request)
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+	}
+
+	input, ok := in.Parameters.(*GetSessionTokenInput)
+	_ = input
+	if !ok {
+		return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+	}
+
+	operationPath := "/"
+	if len(request.Request.URL.Path) == 0 {
+		request.Request.URL.Path = operationPath
+	} else {
+		request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+		if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+			request.Request.URL.Path += "/"
+		}
+	}
+	request.Request.Method = "POST"
+	httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+	bodyWriter := bytes.NewBuffer(nil)
+	bodyEncoder := query.NewEncoder(bodyWriter)
+	body := bodyEncoder.Object()
+	body.Key("Action").String("GetSessionToken")
+	body.Key("Version").String("2011-06-15")
+
+	if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	err = bodyEncoder.Encode()
+	if err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+
+	if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+		return out, metadata, &smithy.SerializationError{Err: err}
+	}
+	in.Request = request
+
+	return next.HandleSerialize(ctx, in)
+}
+func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error {
+	array := value.Array("member")
+
+	for i := range v {
+		av := array.Value()
+		if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.Arn != nil {
+		objectKey := object.Key("arn")
+		objectKey.String(*v.Arn)
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.ContextAssertion != nil {
+		objectKey := object.Key("ContextAssertion")
+		objectKey.String(*v.ContextAssertion)
+	}
+
+	if v.ProviderArn != nil {
+		objectKey := object.Key("ProviderArn")
+		objectKey.String(*v.ProviderArn)
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error {
+	array := value.Array("member")
+
+	for i := range v {
+		av := array.Value()
+		if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.Key != nil {
+		objectKey := object.Key("Key")
+		objectKey.String(*v.Key)
+	}
+
+	if v.Value != nil {
+		objectKey := object.Key("Value")
+		objectKey.String(*v.Value)
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error {
+	array := value.Array("member")
+
+	for i := range v {
+		av := array.Value()
+		av.String(v[i])
+	}
+	return nil
+}
+
+func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error {
+	array := value.Array("member")
+
+	for i := range v {
+		av := array.Value()
+		if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.DurationSeconds != nil {
+		objectKey := object.Key("DurationSeconds")
+		objectKey.Integer(*v.DurationSeconds)
+	}
+
+	if v.ExternalId != nil {
+		objectKey := object.Key("ExternalId")
+		objectKey.String(*v.ExternalId)
+	}
+
+	if v.Policy != nil {
+		objectKey := object.Key("Policy")
+		objectKey.String(*v.Policy)
+	}
+
+	if v.PolicyArns != nil {
+		objectKey := object.Key("PolicyArns")
+		if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+			return err
+		}
+	}
+
+	if v.ProvidedContexts != nil {
+		objectKey := object.Key("ProvidedContexts")
+		if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil {
+			return err
+		}
+	}
+
+	if v.RoleArn != nil {
+		objectKey := object.Key("RoleArn")
+		objectKey.String(*v.RoleArn)
+	}
+
+	if v.RoleSessionName != nil {
+		objectKey := object.Key("RoleSessionName")
+		objectKey.String(*v.RoleSessionName)
+	}
+
+	if v.SerialNumber != nil {
+		objectKey := object.Key("SerialNumber")
+		objectKey.String(*v.SerialNumber)
+	}
+
+	if v.SourceIdentity != nil {
+		objectKey := object.Key("SourceIdentity")
+		objectKey.String(*v.SourceIdentity)
+	}
+
+	if v.Tags != nil {
+		objectKey := object.Key("Tags")
+		if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil {
+			return err
+		}
+	}
+
+	if v.TokenCode != nil {
+		objectKey := object.Key("TokenCode")
+		objectKey.String(*v.TokenCode)
+	}
+
+	if v.TransitiveTagKeys != nil {
+		objectKey := object.Key("TransitiveTagKeys")
+		if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.DurationSeconds != nil {
+		objectKey := object.Key("DurationSeconds")
+		objectKey.Integer(*v.DurationSeconds)
+	}
+
+	if v.Policy != nil {
+		objectKey := object.Key("Policy")
+		objectKey.String(*v.Policy)
+	}
+
+	if v.PolicyArns != nil {
+		objectKey := object.Key("PolicyArns")
+		if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+			return err
+		}
+	}
+
+	if v.PrincipalArn != nil {
+		objectKey := object.Key("PrincipalArn")
+		objectKey.String(*v.PrincipalArn)
+	}
+
+	if v.RoleArn != nil {
+		objectKey := object.Key("RoleArn")
+		objectKey.String(*v.RoleArn)
+	}
+
+	if v.SAMLAssertion != nil {
+		objectKey := object.Key("SAMLAssertion")
+		objectKey.String(*v.SAMLAssertion)
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.DurationSeconds != nil {
+		objectKey := object.Key("DurationSeconds")
+		objectKey.Integer(*v.DurationSeconds)
+	}
+
+	if v.Policy != nil {
+		objectKey := object.Key("Policy")
+		objectKey.String(*v.Policy)
+	}
+
+	if v.PolicyArns != nil {
+		objectKey := object.Key("PolicyArns")
+		if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+			return err
+		}
+	}
+
+	if v.ProviderId != nil {
+		objectKey := object.Key("ProviderId")
+		objectKey.String(*v.ProviderId)
+	}
+
+	if v.RoleArn != nil {
+		objectKey := object.Key("RoleArn")
+		objectKey.String(*v.RoleArn)
+	}
+
+	if v.RoleSessionName != nil {
+		objectKey := object.Key("RoleSessionName")
+		objectKey.String(*v.RoleSessionName)
+	}
+
+	if v.WebIdentityToken != nil {
+		objectKey := object.Key("WebIdentityToken")
+		objectKey.String(*v.WebIdentityToken)
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.EncodedMessage != nil {
+		objectKey := object.Key("EncodedMessage")
+		objectKey.String(*v.EncodedMessage)
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.AccessKeyId != nil {
+		objectKey := object.Key("AccessKeyId")
+		objectKey.String(*v.AccessKeyId)
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.DurationSeconds != nil {
+		objectKey := object.Key("DurationSeconds")
+		objectKey.Integer(*v.DurationSeconds)
+	}
+
+	if v.Name != nil {
+		objectKey := object.Key("Name")
+		objectKey.String(*v.Name)
+	}
+
+	if v.Policy != nil {
+		objectKey := object.Key("Policy")
+		objectKey.String(*v.Policy)
+	}
+
+	if v.PolicyArns != nil {
+		objectKey := object.Key("PolicyArns")
+		if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+			return err
+		}
+	}
+
+	if v.Tags != nil {
+		objectKey := object.Key("Tags")
+		if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error {
+	object := value.Object()
+	_ = object
+
+	if v.DurationSeconds != nil {
+		objectKey := object.Key("DurationSeconds")
+		objectKey.Integer(*v.DurationSeconds)
+	}
+
+	if v.SerialNumber != nil {
+		objectKey := object.Key("SerialNumber")
+		objectKey.String(*v.SerialNumber)
+	}
+
+	if v.TokenCode != nil {
+		objectKey := object.Key("TokenCode")
+		objectKey.String(*v.TokenCode)
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,248 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+	"fmt"
+	smithy "github.com/aws/smithy-go"
+)
+
+// The web identity token that was passed is expired or is not valid. Get a new
+// identity token from the identity provider and then retry the request.
+type ExpiredTokenException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *ExpiredTokenException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ExpiredTokenException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *ExpiredTokenException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "ExpiredTokenException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request could not be fulfilled because the identity provider (IDP) that was
+// asked to verify the incoming identity token could not be reached. This is often
+// a transient error caused by network conditions. Retry the request a limited
+// number of times so that you don't exceed the request rate. If the error
+// persists, the identity provider might be down or not responding.
+type IDPCommunicationErrorException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *IDPCommunicationErrorException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *IDPCommunicationErrorException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *IDPCommunicationErrorException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "IDPCommunicationError"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The identity provider (IdP) reported that authentication failed. This might be
+// because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it can
+// also mean that the claim has expired or has been explicitly revoked.
+type IDPRejectedClaimException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *IDPRejectedClaimException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *IDPRejectedClaimException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *IDPRejectedClaimException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "IDPRejectedClaim"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The error returned if the message passed to DecodeAuthorizationMessage was
+// invalid. This can happen if the token contains invalid characters, such as
+// linebreaks.
+type InvalidAuthorizationMessageException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidAuthorizationMessageException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidAuthorizationMessageException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidAuthorizationMessageException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidAuthorizationMessageException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault {
+	return smithy.FaultClient
+}
+
+// The web identity token that was passed could not be validated by Amazon Web
+// Services. Get a new identity token from the identity provider and then retry the
+// request.
+type InvalidIdentityTokenException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *InvalidIdentityTokenException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidIdentityTokenException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *InvalidIdentityTokenException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "InvalidIdentityToken"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+type MalformedPolicyDocumentException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *MalformedPolicyDocumentException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *MalformedPolicyDocumentException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *MalformedPolicyDocumentException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "MalformedPolicyDocument"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was rejected because the total packed size of the session policies
+// and session tags combined was too large. An Amazon Web Services conversion
+// compresses the session policy document, session policy ARNs, and session tags
+// into a packed binary format that has a separate limit. The error message
+// indicates by percentage how close the policies and tags are to the upper size
+// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide.
+//
+// You could receive this error even though you meet other defined session policy
+// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide.
+//
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length
+type PackedPolicyTooLargeException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *PackedPolicyTooLargeException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *PackedPolicyTooLargeException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *PackedPolicyTooLargeException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "PackedPolicyTooLarge"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]in the IAM
+// User Guide.
+//
+// [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html
+type RegionDisabledException struct {
+	Message *string
+
+	ErrorCodeOverride *string
+
+	noSmithyDocumentSerde
+}
+
+func (e *RegionDisabledException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *RegionDisabledException) ErrorMessage() string {
+	if e.Message == nil {
+		return ""
+	}
+	return *e.Message
+}
+func (e *RegionDisabledException) ErrorCode() string {
+	if e == nil || e.ErrorCodeOverride == nil {
+		return "RegionDisabledException"
+	}
+	return *e.ErrorCodeOverride
+}
+func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,144 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+	smithydocument "github.com/aws/smithy-go/document"
+	"time"
+)
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+
+	// The ARN of the temporary security credentials that are returned from the AssumeRole
+	// action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in
+	// the IAM User Guide.
+	//
+	// [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html
+	//
+	// This member is required.
+	Arn *string
+
+	// A unique identifier that contains the role ID and the role session name of the
+	// role that is being assumed. The role ID is generated by Amazon Web Services when
+	// the role is created.
+	//
+	// This member is required.
+	AssumedRoleId *string
+
+	noSmithyDocumentSerde
+}
+
+// Amazon Web Services credentials for API authentication.
+type Credentials struct {
+
+	// The access key ID that identifies the temporary security credentials.
+	//
+	// This member is required.
+	AccessKeyId *string
+
+	// The date on which the current credentials expire.
+	//
+	// This member is required.
+	Expiration *time.Time
+
+	// The secret access key that can be used to sign requests.
+	//
+	// This member is required.
+	SecretAccessKey *string
+
+	// The token that users must pass to the service API to use the temporary
+	// credentials.
+	//
+	// This member is required.
+	SessionToken *string
+
+	noSmithyDocumentSerde
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+
+	// The ARN that specifies the federated user that is associated with the
+	// credentials. For more information about ARNs and how to use them in policies,
+	// see [IAM Identifiers]in the IAM User Guide.
+	//
+	// [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html
+	//
+	// This member is required.
+	Arn *string
+
+	// The string that identifies the federated user associated with the credentials,
+	// similar to the unique ID of an IAM user.
+	//
+	// This member is required.
+	FederatedUserId *string
+
+	noSmithyDocumentSerde
+}
+
+// A reference to the IAM managed policy that is passed as a session policy for a
+// role session or a federated user session.
+type PolicyDescriptorType struct {
+
+	// The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
+	// policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web
+	// Services General Reference.
+	//
+	// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+	Arn *string
+
+	noSmithyDocumentSerde
+}
+
+// Contains information about the provided context. This includes the signed and
+// encrypted trusted context assertion and the context provider ARN from which the
+// trusted context assertion was generated.
+type ProvidedContext struct {
+
+	// The signed and encrypted trusted context assertion generated by the context
+	// provider. The trusted context assertion is signed and encrypted by Amazon Web
+	// Services STS.
+	ContextAssertion *string
+
+	// The context provider ARN from which the trusted context assertion was generated.
+	ProviderArn *string
+
+	noSmithyDocumentSerde
+}
+
+// You can pass custom key-value pair attributes when you assume a role or
+// federate a user. These are called session tags. You can then use the session
+// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User
+// Guide.
+//
+// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+type Tag struct {
+
+	// The key for a session tag.
+	//
+	// You can pass up to 50 session tags. The plain text session tag keys can’t
+	// exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User
+	// Guide.
+	//
+	// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+	//
+	// This member is required.
+	Key *string
+
+	// The value for a session tag.
+	//
+	// You can pass up to 50 session tags. The plain text session tag values can’t
+	// exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User
+	// Guide.
+	//
+	// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+	//
+	// This member is required.
+	Value *string
+
+	noSmithyDocumentSerde
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go
--- 0.19.3+ds1-4/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,305 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+	"context"
+	"fmt"
+	"github.com/aws/aws-sdk-go-v2/service/sts/types"
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type validateOpAssumeRole struct {
+}
+
+func (*validateOpAssumeRole) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*AssumeRoleInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpAssumeRoleInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpAssumeRoleWithSAML struct {
+}
+
+func (*validateOpAssumeRoleWithSAML) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*AssumeRoleWithSAMLInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpAssumeRoleWithSAMLInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*validateOpAssumeRoleWithWebIdentity) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDecodeAuthorizationMessage struct {
+}
+
+func (*validateOpDecodeAuthorizationMessage) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*DecodeAuthorizationMessageInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpDecodeAuthorizationMessageInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetAccessKeyInfo struct {
+}
+
+func (*validateOpGetAccessKeyInfo) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*GetAccessKeyInfoInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpGetAccessKeyInfoInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetFederationToken struct {
+}
+
+func (*validateOpGetFederationToken) ID() string {
+	return "OperationInputValidation"
+}
+
+func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+	out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+	input, ok := in.Parameters.(*GetFederationTokenInput)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+	}
+	if err := validateOpGetFederationTokenInput(input); err != nil {
+		return out, metadata, err
+	}
+	return next.HandleInitialize(ctx, in)
+}
+
+func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After)
+}
+
+func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After)
+}
+
+func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After)
+}
+
+func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After)
+}
+
+func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After)
+}
+
+func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error {
+	return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After)
+}
+
+func validateTag(v *types.Tag) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "Tag"}
+	if v.Key == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("Key"))
+	}
+	if v.Value == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("Value"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateTagListType(v []types.Tag) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "TagListType"}
+	for i := range v {
+		if err := validateTag(&v[i]); err != nil {
+			invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+		}
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpAssumeRoleInput(v *AssumeRoleInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"}
+	if v.RoleArn == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+	}
+	if v.RoleSessionName == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName"))
+	}
+	if v.Tags != nil {
+		if err := validateTagListType(v.Tags); err != nil {
+			invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+		}
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"}
+	if v.RoleArn == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+	}
+	if v.PrincipalArn == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn"))
+	}
+	if v.SAMLAssertion == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"}
+	if v.RoleArn == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+	}
+	if v.RoleSessionName == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName"))
+	}
+	if v.WebIdentityToken == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"}
+	if v.EncodedMessage == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"}
+	if v.AccessKeyId == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId"))
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
+
+func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error {
+	if v == nil {
+		return nil
+	}
+	invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"}
+	if v.Name == nil {
+		invalidParams.Add(smithy.NewErrParamRequired("Name"))
+	}
+	if v.Tags != nil {
+		if err := validateTagListType(v.Tags); err != nil {
+			invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+		}
+	}
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	} else {
+		return nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/.gitignore 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/.gitignore
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/.gitignore	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/.gitignore	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,29 @@
+# Eclipse
+.classpath
+.project
+.settings/
+
+# Intellij
+.idea/
+*.iml
+*.iws
+
+# Mac
+.DS_Store
+
+# Maven
+target/
+**/dependency-reduced-pom.xml
+
+# Gradle
+/.gradle
+build/
+*/out/
+*/*/out/
+
+# VS Code
+bin/
+.vscode/
+
+# make
+c.out
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/.travis.yml 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/.travis.yml
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/.travis.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/.travis.yml	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,28 @@
+language: go
+sudo: true
+dist: bionic
+
+branches:
+  only:
+    - main
+
+os:
+  - linux
+  - osx
+  # Travis doesn't work with windows and Go tip
+  #- windows
+
+go:
+  - tip
+
+matrix:
+  allow_failures:
+    - go: tip
+
+before_install:
+  - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi
+  - (cd /tmp/; go get golang.org/x/lint/golint)
+
+script:
+  - make go test -v ./...;
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/CHANGELOG.md 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/CHANGELOG.md
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/CHANGELOG.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/CHANGELOG.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,239 @@
+# Release (2024-06-27)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.3
+  * **Bug Fix**: Fix encoding/cbor test overflow on x86.
+
+# Release (2024-03-29)
+
+* No change notes available for this release.
+
+# Release (2024-02-21)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.1
+  * **Bug Fix**: Remove runtime dependency on go-cmp.
+
+# Release (2024-02-13)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.0
+  * **Feature**: Add codegen definition for sigv4a trait.
+  * **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# Release (2023-12-07)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.19.0
+  * **Feature**: Support modeled request compression.
+
+# Release (2023-11-30)
+
+* No change notes available for this release.
+
+# Release (2023-11-29)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.18.0
+  * **Feature**: Expose Options() method on generated service clients.
+
+# Release (2023-11-15)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.17.0
+  * **Feature**: Support identity/auth components of client reference architecture.
+
+# Release (2023-10-31)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.16.0
+  * **Feature**: **LANG**: Bump minimum go version to 1.19.
+
+# Release (2023-10-06)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.15.0
+  * **Feature**: Add `http.WithHeaderComment` middleware.
+
+# Release (2023-08-18)
+
+* No change notes available for this release.
+
+# Release (2023-08-07)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.14.1
+  * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation.
+
+# Release (2023-07-31)
+
+## General Highlights
+* **Feature**: Adds support for smithy-modeled endpoint resolution.
+
+# Release (2022-12-02)
+
+* No change notes available for this release.
+
+# Release (2022-10-24)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.13.4
+  * **Bug Fix**: fixed document type checking for encoding nested types
+
+# Release (2022-09-14)
+
+* No change notes available for this release.
+
+# Release (v1.13.2)
+
+* No change notes available for this release.
+
+# Release (v1.13.1)
+
+* No change notes available for this release.
+
+# Release (v1.13.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.13.0
+  * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation.
+
+# Release (v1.12.1)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.12.1
+  * **Bug Fix**: Fixes a bug where JSON object keys were not escaped.
+
+# Release (v1.12.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.12.0
+  * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value.
+
+# Release (v1.11.3)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.11.3
+  * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8.
+
+# Release (v1.11.2)
+
+* No change notes available for this release.
+
+# Release (v1.11.1)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.11.1
+  * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583)
+
+# Release (v1.11.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.11.0
+  * **Feature**: Updates deserialization of header list to supported quoted strings
+
+# Release (v1.10.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.10.0
+  * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type.
+
+# Release (v1.9.1)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.9.1
+  * **Documentation**: Fixes various typos in Go package documentation.
+
+# Release (v1.9.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.9.0
+  * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred.
+  * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing.
+
+# Release v1.8.1
+
+### Smithy Go Module
+* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set.
+  * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418)
+
+# Release v1.8.0
+
+### Smithy Go Module
+
+* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324))
+  * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset.
+  * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387)
+
+# Release v1.7.0
+
+### Smithy Go Module
+* `ptr`:  Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314))
+  * Handle error for defer close call
+* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318))
+  * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata.
+* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310))
+
+### Codegen
+* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310))
+  * Adds support for Smithy Document shapes and supporting types for protocols to implement support
+
+# Release v1.6.0 (2021-07-15)
+
+### Smithy Go Module
+* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316))
+
+### Codegen
+* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316))
+* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315))
+
+# Release v1.5.0 (2021-06-25)
+
+### Smithy Go module
+* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307))
+  * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost.
+
+### Codegen
+* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301))
+* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296))
+* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283))
+* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298))
+* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304))
+* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303))
+
+# Release v1.4.0 (2021-05-06)
+
+### Smithy Go module
+* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267))
+
+### Codegen
+* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289))
+* Add support for httpQueryParams location
+* Add support for model renaming conflict resolution with service closure
+
+# Release v1.3.1 (2021-04-08)
+
+### Smithy Go module
+* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279))
+* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282))
+
+# Release v1.3.0 (2021-04-01)
+
+### Smithy Go module
+* `transport/http`: Add utility to safely join string to url path, and url raw query.
+
+### Codegen
+* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility.
+
+# Release v1.2.0 (2021-03-12)
+
+### Smithy Go module
+* Fix support for parsing shortened year format in HTTP Date header.
+* Fix GitHub APIDiff action workflow to get gorelease tool correctly.
+* Fix codegen artifact unit test for Go 1.16
+
+### Codegen
+* Fix generating paginator nil parameter handling before usage.
+* Fix Serialize unboxed members decorated as required.
+* Add ability to define resolvers at both client construction and operation invocation.
+* Support for extending paginators with custom runtime trait
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/CONTRIBUTING.md 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/CONTRIBUTING.md
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/CONTRIBUTING.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/CONTRIBUTING.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,59 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/LICENSE 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/Makefile 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/Makefile
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/Makefile	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/Makefile	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,102 @@
+PRE_RELEASE_VERSION ?=
+
+RELEASE_MANIFEST_FILE ?=
+RELEASE_CHGLOG_DESC_FILE ?=
+
+REPOTOOLS_VERSION ?= latest
+REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools
+REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?=
+REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION}
+
+UNIT_TEST_TAGS=
+BUILD_TAGS=
+
+ifneq ($(PRE_RELEASE_VERSION),)
+	REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION}
+endif
+
+smithy-publish-local:
+	cd codegen && ./gradlew publishToMavenLocal
+
+smithy-build:
+	cd codegen && ./gradlew build
+
+smithy-clean:
+	cd codegen && ./gradlew clean
+
+##################
+# Linting/Verify #
+##################
+.PHONY: verify vet cover
+
+verify: vet
+
+vet:
+	go vet ${BUILD_TAGS} --all ./...
+
+cover:
+	go test ${BUILD_TAGS} -coverprofile c.out ./...
+	@cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \
+		echo "total (statements): $$cover%";
+
+################
+# Unit Testing #
+################
+.PHONY: unit unit-race unit-test unit-race-test
+
+unit: verify
+	go vet ${BUILD_TAGS} --all ./... && \
+	go test ${BUILD_TAGS} ${RUN_NONE} ./... && \
+	go test -timeout=1m ${UNIT_TEST_TAGS} ./...
+
+unit-race: verify
+	go vet ${BUILD_TAGS} --all ./... && \
+	go test ${BUILD_TAGS} ${RUN_NONE} ./... && \
+	go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./...
+
+unit-test: verify
+	go test -timeout=1m ${UNIT_TEST_TAGS} ./...
+
+unit-race-test: verify
+	go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./...
+
+#####################
+#  Release Process  #
+#####################
+.PHONY: preview-release pre-release-validation release
+
+preview-release:
+	go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS}
+
+pre-release-validation:
+	@if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \
+		echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \
+	fi
+	@if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \
+		echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \
+	fi
+
+release: pre-release-validation
+	go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS}
+	go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE}
+	go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE}
+	go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE}
+	go run ${REPOTOOLS_CMD_CHANGELOG} rm -all
+	go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE}
+
+module-version:
+	@go run ${REPOTOOLS_CMD_MODULE_VERSION} .
+
+##############
+# Repo Tools #
+##############
+.PHONY: install-changelog
+
+install-changelog:
+	go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/NOTICE 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/NOTICE
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/NOTICE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/NOTICE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1 @@
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/README.md 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/README.md
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,27 @@
+## Smithy Go
+
+[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml)
+
+[Smithy](https://smithy.io/) code generators for Go.
+
+**WARNING: All interfaces are subject to change.**
+
+## Can I use this?
+
+In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java),
+such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html),
+in order to generate transport mechanisms and serialization/deserialization
+code ("serde") accordingly.
+
+The code generator does not currently support any protocols out of the box,
+therefore the useability of this project on its own is currently limited.
+Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html)
+exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are
+tracking the movement of those out of the SDK into smithy-go in
+[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no
+timeline for doing so.
+
+## License
+
+This project is licensed under the Apache-2.0 License.
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/auth.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/auth.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/auth.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/auth.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,3 @@
+// Package auth defines protocol-agnostic authentication types for smithy
+// clients.
+package auth
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/bearer/docs.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/bearer/docs.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/bearer/docs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/bearer/docs.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,3 @@
+// Package bearer provides middleware and utilities for authenticating API
+// operation calls with a Bearer Token.
+package bearer
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,104 @@
+package bearer
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Message is the middleware stack's request transport message value.
+type Message interface{}
+
+// Signer provides an interface for implementations to decorate a request
+// message with a bearer token. The signer is responsible for validating the
+// message type is compatible with the signer.
+type Signer interface {
+	SignWithBearerToken(context.Context, Token, Message) (Message, error)
+}
+
+// AuthenticationMiddleware provides the Finalize middleware step for signing
+// an request message with a bearer token.
+type AuthenticationMiddleware struct {
+	signer        Signer
+	tokenProvider TokenProvider
+}
+
+// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the
+// middleware Stack in the Finalize step with the options provided.
+func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error {
+	return s.Finalize.Add(
+		NewAuthenticationMiddleware(signer, tokenProvider),
+		middleware.After,
+	)
+}
+
+// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware.
+func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware {
+	return &AuthenticationMiddleware{
+		signer:        signer,
+		tokenProvider: tokenProvider,
+	}
+}
+
+const authenticationMiddlewareID = "BearerTokenAuthentication"
+
+// ID returns the resolver identifier
+func (m *AuthenticationMiddleware) ID() string {
+	return authenticationMiddlewareID
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface in order to
+// update the request with bearer token authentication.
+func (m *AuthenticationMiddleware) HandleFinalize(
+	ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	token, err := m.tokenProvider.RetrieveBearerToken(ctx)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err)
+	}
+
+	signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request)
+	if err != nil {
+		return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err)
+	}
+
+	in.Request = signedMessage
+	return next.HandleFinalize(ctx, in)
+}
+
+// SignHTTPSMessage provides a bearer token authentication implementation that
+// will sign the message with the provided bearer token.
+//
+// Will fail if the message is not a smithy-go HTTP request or the request is
+// not HTTPS.
+type SignHTTPSMessage struct{}
+
+// NewSignHTTPSMessage returns an initialized signer for HTTP messages.
+func NewSignHTTPSMessage() *SignHTTPSMessage {
+	return &SignHTTPSMessage{}
+}
+
+// SignWithBearerToken returns a copy of the HTTP request with the bearer token
+// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750.
+//
+// Returns an error if the request's URL scheme is not HTTPS, or the request
+// message is not an smithy-go HTTP Request pointer type.
+func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) {
+	req, ok := message.(*smithyhttp.Request)
+	if !ok {
+		return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message)
+	}
+
+	if !req.IsHTTPS() {
+		return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS")
+	}
+
+	reqClone := req.Clone()
+	reqClone.Header.Set("Authorization", "Bearer "+token.Value)
+
+	return reqClone, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/bearer/token.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/bearer/token.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/bearer/token.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/bearer/token.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,50 @@
+package bearer
+
+import (
+	"context"
+	"time"
+)
+
+// Token provides a type wrapping a bearer token and expiration metadata.
+type Token struct {
+	Value string
+
+	CanExpire bool
+	Expires   time.Time
+}
+
+// Expired returns if the token's Expires time is before or equal to the time
+// provided. If CanExpires is false, Expired will always return false.
+func (t Token) Expired(now time.Time) bool {
+	if !t.CanExpire {
+		return false
+	}
+	now = now.Round(0)
+	return now.Equal(t.Expires) || now.After(t.Expires)
+}
+
+// TokenProvider provides interface for retrieving bearer tokens.
+type TokenProvider interface {
+	RetrieveBearerToken(context.Context) (Token, error)
+}
+
+// TokenProviderFunc provides a helper utility to wrap a function as a type
+// that implements the TokenProvider interface.
+type TokenProviderFunc func(context.Context) (Token, error)
+
+// RetrieveBearerToken calls the wrapped function, returning the Token or
+// error.
+func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) {
+	return fn(ctx)
+}
+
+// StaticTokenProvider provides a utility for wrapping a static bearer token
+// value within an implementation of a token provider.
+type StaticTokenProvider struct {
+	Token Token
+}
+
+// RetrieveBearerToken returns the static token specified.
+func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) {
+	return s.Token, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,208 @@
+package bearer
+
+import (
+	"context"
+	"fmt"
+	"sync/atomic"
+	"time"
+
+	smithycontext "github.com/aws/smithy-go/context"
+	"github.com/aws/smithy-go/internal/sync/singleflight"
+)
+
+// package variable that can be override in unit tests.
+var timeNow = time.Now
+
+// TokenCacheOptions provides a set of optional configuration options for the
+// TokenCache TokenProvider.
+type TokenCacheOptions struct {
+	// The duration before the token will expire when the credentials will be
+	// refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls
+	// will be blocking.
+	//
+	// Asynchronous refreshes are deduplicated, and only one will be in-flight
+	// at a time. If the token expires while an asynchronous refresh is in
+	// flight, the next call to RetrieveBearerToken will block on that refresh
+	// to return.
+	RefreshBeforeExpires time.Duration
+
+	// The timeout the underlying TokenProvider's RetrieveBearerToken call must
+	// return within, or will be canceled. Defaults to 0, no timeout.
+	//
+	// If 0 timeout, its possible for the underlying tokenProvider's
+	// RetrieveBearerToken call to block forever. Preventing subsequent
+	// TokenCache attempts to refresh the token.
+	//
+	// If this timeout is reached all pending deduplicated calls to
+	// TokenCache RetrieveBearerToken will fail with an error.
+	RetrieveBearerTokenTimeout time.Duration
+
+	// The minimum duration between asynchronous refresh attempts. If the next
+	// asynchronous recent refresh attempt was within the minimum delay
+	// duration, the call to retrieve will return the current cached token, if
+	// not expired.
+	//
+	// The asynchronous retrieve is deduplicated across multiple calls when
+	// RetrieveBearerToken is called. The asynchronous retrieve is not a
+	// periodic task. It is only performed when the token has not yet expired,
+	// and the current item is within the RefreshBeforeExpires window, and the
+	// TokenCache's RetrieveBearerToken method is called.
+	//
+	// If 0, (default) there will be no minimum delay between asynchronous
+	// refresh attempts.
+	//
+	// If DisableAsyncRefresh is true, this option is ignored.
+	AsyncRefreshMinimumDelay time.Duration
+
+	// Sets if the TokenCache will attempt to refresh the token in the
+	// background asynchronously instead of blocking for credentials to be
+	// refreshed. If disabled token refresh will be blocking.
+	//
+	// The first call to RetrieveBearerToken will always be blocking, because
+	// there is no cached token.
+	DisableAsyncRefresh bool
+}
+
+// TokenCache provides an utility to cache Bearer Authentication tokens from a
+// wrapped TokenProvider. The TokenCache can be has options to configure the
+// cache's early and asynchronous refresh of the token.
+type TokenCache struct {
+	options  TokenCacheOptions
+	provider TokenProvider
+
+	cachedToken            atomic.Value
+	lastRefreshAttemptTime atomic.Value
+	sfGroup                singleflight.Group
+}
+
+// NewTokenCache returns a initialized TokenCache that implements the
+// TokenProvider interface. Wrapping the provider passed in. Also taking a set
+// of optional functional option parameters to configure the token cache.
+func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache {
+	var options TokenCacheOptions
+	for _, fn := range optFns {
+		fn(&options)
+	}
+
+	return &TokenCache{
+		options:  options,
+		provider: provider,
+	}
+}
+
+// RetrieveBearerToken returns the token if it could be obtained, or error if a
+// valid token could not be retrieved.
+//
+// The passed in Context's cancel/deadline/timeout will impacting only this
+// individual retrieve call and not any other already queued up calls. This
+// means underlying provider's RetrieveBearerToken calls could block for ever,
+// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to
+// provide a timeout, preventing the underlying TokenProvider blocking forever.
+//
+// By default, if the passed in Context is canceled, all of its values will be
+// considered expired. The wrapped TokenProvider will not be able to lookup the
+// values from the Context once it is expired. This is done to protect against
+// expired values no longer being valid. To disable this behavior, use
+// smithy-go's context.WithPreserveExpiredValues to add a value to the Context
+// before calling RetrieveBearerToken to enable support for expired values.
+//
+// Without RetrieveBearerTokenTimeout there is the potential for a underlying
+// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent
+// attempts at refreshing the token.
+func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) {
+	cachedToken, ok := p.getCachedToken()
+	if !ok || cachedToken.Expired(timeNow()) {
+		return p.refreshBearerToken(ctx)
+	}
+
+	// Check if the token should be refreshed before it expires.
+	refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires))
+	if !refreshToken {
+		return cachedToken, nil
+	}
+
+	if p.options.DisableAsyncRefresh {
+		return p.refreshBearerToken(ctx)
+	}
+
+	p.tryAsyncRefresh(ctx)
+
+	return cachedToken, nil
+}
+
+// tryAsyncRefresh attempts to asynchronously refresh the token returning the
+// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and
+// the duration since the last refresh is less than that value, nothing will be
+// done.
+func (p *TokenCache) tryAsyncRefresh(ctx context.Context) {
+	if p.options.AsyncRefreshMinimumDelay != 0 {
+		var lastRefreshAttempt time.Time
+		if v := p.lastRefreshAttemptTime.Load(); v != nil {
+			lastRefreshAttempt = v.(time.Time)
+		}
+
+		if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) {
+			return
+		}
+	}
+
+	// Ignore the returned channel so this won't be blocking, and limit the
+	// number of additional goroutines created.
+	p.sfGroup.DoChan("async-refresh", func() (interface{}, error) {
+		res, err := p.refreshBearerToken(ctx)
+		if p.options.AsyncRefreshMinimumDelay != 0 {
+			var refreshAttempt time.Time
+			if err != nil {
+				refreshAttempt = timeNow()
+			}
+			p.lastRefreshAttemptTime.Store(refreshAttempt)
+		}
+
+		return res, err
+	})
+}
+
+func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) {
+	resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) {
+		ctx := smithycontext.WithSuppressCancel(ctx)
+		if v := p.options.RetrieveBearerTokenTimeout; v != 0 {
+			var cancel func()
+			ctx, cancel = context.WithTimeout(ctx, v)
+			defer cancel()
+		}
+		return p.singleRetrieve(ctx)
+	})
+
+	select {
+	case res := <-resCh:
+		return res.Val.(Token), res.Err
+	case <-ctx.Done():
+		return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err())
+	}
+}
+
+func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) {
+	token, err := p.provider.RetrieveBearerToken(ctx)
+	if err != nil {
+		return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err)
+	}
+
+	p.cachedToken.Store(&token)
+	return token, nil
+}
+
+// getCachedToken returns the currently cached token and true if found. Returns
+// false if no token is cached.
+func (p *TokenCache) getCachedToken() (Token, bool) {
+	v := p.cachedToken.Load()
+	if v == nil {
+		return Token{}, false
+	}
+
+	t := v.(*Token)
+	if t == nil || t.Value == "" {
+		return Token{}, false
+	}
+
+	return *t, true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/identity.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/identity.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/identity.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/identity.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,47 @@
+package auth
+
+import (
+	"context"
+	"time"
+
+	"github.com/aws/smithy-go"
+)
+
+// Identity contains information that identifies who the user making the
+// request is.
+type Identity interface {
+	Expiration() time.Time
+}
+
+// IdentityResolver defines the interface through which an Identity is
+// retrieved.
+type IdentityResolver interface {
+	GetIdentity(context.Context, smithy.Properties) (Identity, error)
+}
+
+// IdentityResolverOptions defines the interface through which an entity can be
+// queried to retrieve an IdentityResolver for a given auth scheme.
+type IdentityResolverOptions interface {
+	GetIdentityResolver(schemeID string) IdentityResolver
+}
+
+// AnonymousIdentity is a sentinel to indicate no identity.
+type AnonymousIdentity struct{}
+
+var _ Identity = (*AnonymousIdentity)(nil)
+
+// Expiration returns the zero value for time, as anonymous identity never
+// expires.
+func (*AnonymousIdentity) Expiration() time.Time {
+	return time.Time{}
+}
+
+// AnonymousIdentityResolver returns AnonymousIdentity.
+type AnonymousIdentityResolver struct{}
+
+var _ IdentityResolver = (*AnonymousIdentityResolver)(nil)
+
+// GetIdentity returns AnonymousIdentity.
+func (*AnonymousIdentityResolver) GetIdentity(_ context.Context, _ smithy.Properties) (Identity, error) {
+	return &AnonymousIdentity{}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/option.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/option.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/option.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/option.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,25 @@
+package auth
+
+import "github.com/aws/smithy-go"
+
+type (
+	authOptionsKey struct{}
+)
+
+// Option represents a possible authentication method for an operation.
+type Option struct {
+	SchemeID           string
+	IdentityProperties smithy.Properties
+	SignerProperties   smithy.Properties
+}
+
+// GetAuthOptions gets auth Options from Properties.
+func GetAuthOptions(p *smithy.Properties) ([]*Option, bool) {
+	v, ok := p.Get(authOptionsKey{}).([]*Option)
+	return v, ok
+}
+
+// SetAuthOptions sets auth Options on Properties.
+func SetAuthOptions(p *smithy.Properties, options []*Option) {
+	p.Set(authOptionsKey{}, options)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/scheme_id.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/scheme_id.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/auth/scheme_id.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/auth/scheme_id.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+package auth
+
+// Anonymous
+const (
+	SchemeIDAnonymous = "smithy.api#noAuth"
+)
+
+// HTTP auth schemes
+const (
+	SchemeIDHTTPBasic  = "smithy.api#httpBasicAuth"
+	SchemeIDHTTPDigest = "smithy.api#httpDigestAuth"
+	SchemeIDHTTPBearer = "smithy.api#httpBearerAuth"
+	SchemeIDHTTPAPIKey = "smithy.api#httpApiKeyAuth"
+)
+
+// AWS auth schemes
+const (
+	SchemeIDSigV4  = "aws.auth#sigv4"
+	SchemeIDSigV4A = "aws.auth#sigv4a"
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/context/suppress_expired.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/context/suppress_expired.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/context/suppress_expired.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/context/suppress_expired.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,81 @@
+package context
+
+import "context"
+
+// valueOnlyContext provides a utility to preserve only the values of a
+// Context. Suppressing any cancellation or deadline on that context being
+// propagated downstream of this value.
+//
+// If preserveExpiredValues is false (default), and the valueCtx is canceled,
+// calls to lookup values with the Values method, will always return nil. Setting
+// preserveExpiredValues to true, will allow the valueOnlyContext to lookup
+// values in valueCtx even if valueCtx is canceled.
+//
+// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility.
+// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go
+type valueOnlyContext struct {
+	context.Context
+
+	preserveExpiredValues bool
+	valuesCtx             context.Context
+}
+
+var _ context.Context = (*valueOnlyContext)(nil)
+
+// Value looks up the key, returning its value. If configured to not preserve
+// values of expired context, and the wrapping context is canceled, nil will be
+// returned.
+func (v *valueOnlyContext) Value(key interface{}) interface{} {
+	if !v.preserveExpiredValues {
+		select {
+		case <-v.valuesCtx.Done():
+			return nil
+		default:
+		}
+	}
+
+	return v.valuesCtx.Value(key)
+}
+
+// WithSuppressCancel wraps the Context value, suppressing its deadline and
+// cancellation events being propagated downstream to consumer of the returned
+// context.
+//
+// By default the wrapped Context's Values are available downstream until the
+// wrapped Context is canceled. Once the wrapped Context is canceled, Values
+// method called on the context return will no longer lookup any key. As they
+// are now considered expired.
+//
+// To override this behavior, use WithPreserveExpiredValues on the Context
+// before it is wrapped by WithSuppressCancel. This will make the Context
+// returned by WithSuppressCancel allow lookup of expired values.
+func WithSuppressCancel(ctx context.Context) context.Context {
+	return &valueOnlyContext{
+		Context:   context.Background(),
+		valuesCtx: ctx,
+
+		preserveExpiredValues: GetPreserveExpiredValues(ctx),
+	}
+}
+
+type preserveExpiredValuesKey struct{}
+
+// WithPreserveExpiredValues adds a Value to the Context if expired values
+// should be preserved, and looked up by a Context wrapped by
+// WithSuppressCancel.
+//
+// WithPreserveExpiredValues must be added as a value to a Context, before that
+// Context is wrapped by WithSuppressCancel
+func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context {
+	return context.WithValue(ctx, preserveExpiredValuesKey{}, enable)
+}
+
+// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues
+// value in the context. Returning true if enabled, false otherwise.
+func GetPreserveExpiredValues(ctx context.Context) bool {
+	v := ctx.Value(preserveExpiredValuesKey{})
+	if v != nil {
+		return v.(bool)
+	}
+	return false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2 @@
+// Package smithy provides the core components for a Smithy SDK.
+package smithy
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/document/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/document/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/document/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/document/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,12 @@
+// Package document provides interface definitions and error types for document types.
+//
+// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send
+// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8
+// strings to these values.
+//
+// API Clients expose document constructors in their respective client document packages which must be used to
+// Marshal and Unmarshal Go types to and from their respective protocol representations.
+//
+// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from
+// document types.
+package document
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/document/document.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/document/document.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/document/document.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/document/document.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,153 @@
+package document
+
+import (
+	"fmt"
+	"math/big"
+	"strconv"
+)
+
+// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and
+// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling.
+//
+// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs.
+// Anonymous nested types are flattened based on Go anonymous type visibility.
+//
+// When defining struct types. the `document` struct tag can be used to control how the value will be
+// marshaled into the resulting protocol document.
+//
+//		// Field is ignored
+//		Field int `document:"-"`
+//
+//		// Field object of key "myName"
+//		Field int `document:"myName"`
+//
+//		// Field object key of key "myName", and
+//		// Field is omitted if the field is a zero value for the type.
+//		Field int `document:"myName,omitempty"`
+//
+//		// Field object key of "Field", and
+//		// Field is omitted if the field is a zero value for the type.
+//		Field int `document:",omitempty"`
+//
+// All struct fields, including anonymous fields, are marshaled unless the
+// any of the following conditions are meet.
+//
+//		- the field is not exported
+//		- document field tag is "-"
+//		- document field tag specifies "omitempty", and is a zero value.
+//
+// Pointer and interface values are encoded as the value pointed to or
+// contained in the interface. A nil value encodes as a null
+// value unless `omitempty` struct tag is provided.
+//
+// Channel, complex, and function values are not encoded and will be skipped
+// when walking the value to be marshaled.
+//
+// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented
+// by your application as a string or numerical representation.
+//
+// Errors that occur when marshaling will stop the marshaler, and return the error.
+//
+// Marshal cannot represent cyclic data structures and will not handle them.
+// Passing cyclic structures to Marshal will result in an infinite recursion.
+type Marshaler interface {
+	MarshalSmithyDocument() ([]byte, error)
+}
+
+// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and
+// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be
+// returned.
+//
+// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document`
+// struct field tag for controlling how struct fields are unmarshaled.
+//
+// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document
+// into an empty interface the Unmarshaler will store one of these values:
+//   bool,                   for boolean values
+//   document.Number,        for arbitrary-precision numbers (int64, float64, big.Int, big.Float)
+//   string,                 for string values
+//   []interface{},          for array values
+//   map[string]interface{}, for objects
+//   nil,                    for null values
+//
+// When unmarshaling, any error that occurs will halt the unmarshal and return the error.
+type Unmarshaler interface {
+	UnmarshalSmithyDocument(v interface{}) error
+}
+
+type noSerde interface {
+	noSmithyDocumentSerde()
+}
+
+// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled
+// into a protocol document.
+type NoSerde struct{}
+
+func (n NoSerde) noSmithyDocumentSerde() {}
+
+var _ noSerde = (*NoSerde)(nil)
+
+// IsNoSerde returns whether the given type implements the no smithy document serde interface.
+func IsNoSerde(x interface{}) bool {
+	_, ok := x.(noSerde)
+	return ok
+}
+
+// Number is an arbitrary precision numerical value
+type Number string
+
+// Int64 returns the number as a string.
+func (n Number) String() string {
+	return string(n)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+	return n.intOfBitSize(64)
+}
+
+func (n Number) intOfBitSize(bitSize int) (int64, error) {
+	return strconv.ParseInt(string(n), 10, bitSize)
+}
+
+// Uint64 returns the number as a uint64.
+func (n Number) Uint64() (uint64, error) {
+	return n.uintOfBitSize(64)
+}
+
+func (n Number) uintOfBitSize(bitSize int) (uint64, error) {
+	return strconv.ParseUint(string(n), 10, bitSize)
+}
+
+// Float32 returns the number parsed as a 32-bit float, returns a float64.
+func (n Number) Float32() (float64, error) {
+	return n.floatOfBitSize(32)
+}
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+	return n.floatOfBitSize(64)
+}
+
+// Float64 returns the number as a float64.
+func (n Number) floatOfBitSize(bitSize int) (float64, error) {
+	return strconv.ParseFloat(string(n), bitSize)
+}
+
+// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails.
+func (n Number) BigFloat() (*big.Float, error) {
+	f, ok := (&big.Float{}).SetString(string(n))
+	if !ok {
+		return nil, fmt.Errorf("failed to convert to big.Float")
+	}
+	return f, nil
+}
+
+// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails.
+func (n Number) BigInt() (*big.Int, error) {
+	f, ok := (&big.Int{}).SetString(string(n), 10)
+	if !ok {
+		return nil, fmt.Errorf("failed to convert to big.Float")
+	}
+	return f, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/document/errors.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/document/errors.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/document/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/document/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,75 @@
+package document
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// UnmarshalTypeError is an error type representing an error
+// unmarshaling a Smithy document to a Go value type. This is different
+// from UnmarshalError in that it does not wrap an underlying error type.
+type UnmarshalTypeError struct {
+	Value string
+	Type  reflect.Type
+}
+
+// Error returns the string representation of the error.
+// Satisfying the error interface.
+func (e *UnmarshalTypeError) Error() string {
+	return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s",
+		e.Value, e.Type.String())
+}
+
+// An InvalidUnmarshalError is an error type representing an invalid type
+// encountered while unmarshaling a Smithy document to a Go value type.
+type InvalidUnmarshalError struct {
+	Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// Satisfying the error interface.
+func (e *InvalidUnmarshalError) Error() string {
+	var msg string
+	if e.Type == nil {
+		msg = "cannot unmarshal to nil value"
+	} else if e.Type.Kind() != reflect.Ptr {
+		msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String())
+	} else {
+		msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String())
+	}
+
+	return fmt.Sprintf("unmarshal failed, %s", msg)
+}
+
+// An UnmarshalError wraps an error that occurred while unmarshaling a
+// Smithy document into a Go type. This is different from
+// UnmarshalTypeError in that it wraps the underlying error that occurred.
+type UnmarshalError struct {
+	Err   error
+	Value string
+	Type  reflect.Type
+}
+
+// Unwrap returns the underlying unmarshaling error
+func (e *UnmarshalError) Unwrap() error {
+	return e.Err
+}
+
+// Error returns the string representation of the error.
+// Satisfying the error interface.
+func (e *UnmarshalError) Error() string {
+	return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v",
+		e.Value, e.Type.String(), e.Err)
+}
+
+// An InvalidMarshalError is an error type representing an error
+// occurring when marshaling a Go value type.
+type InvalidMarshalError struct {
+	Message string
+}
+
+// Error returns the string representation of the error.
+// Satisfying the error interface.
+func (e *InvalidMarshalError) Error() string {
+	return fmt.Sprintf("marshal failed, %s", e.Message)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/document.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/document.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/document.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/document.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,10 @@
+package smithy
+
+// Document provides access to loosely structured data in a document-like
+// format.
+//
+// Deprecated: See the github.com/aws/smithy-go/document package.
+type Document interface {
+	UnmarshalDocument(interface{}) error
+	GetValue() (interface{}, error)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,4 @@
+// Package encoding provides utilities for encoding values for specific
+// document encodings.
+
+package encoding
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/encoding.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/encoding.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/encoding.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/encoding.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,40 @@
+package encoding
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+)
+
+// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol
+// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers
+//
+// Based on encoding/json floatEncoder from the Go Standard Library
+// https://golang.org/src/encoding/json/encode.go
+func EncodeFloat(dst []byte, v float64, bits int) []byte {
+	if math.IsInf(v, 0) || math.IsNaN(v) {
+		panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits)))
+	}
+
+	abs := math.Abs(v)
+	fmt := byte('f')
+
+	if abs != 0 {
+		if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
+			fmt = 'e'
+		}
+	}
+
+	dst = strconv.AppendFloat(dst, v, fmt, -1, bits)
+
+	if fmt == 'e' {
+		// clean up e-09 to e-9
+		n := len(dst)
+		if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' {
+			dst[n-2] = dst[n-1]
+			dst = dst[:n-1]
+		}
+	}
+
+	return dst
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,123 @@
+package httpbinding
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+)
+
+const (
+	contentLengthHeader = "Content-Length"
+	floatNaN            = "NaN"
+	floatInfinity       = "Infinity"
+	floatNegInfinity    = "-Infinity"
+)
+
+// An Encoder provides encoding of REST URI path, query, and header components
+// of an HTTP request. Can also encode a stream as the payload.
+//
+// Does not support SetFields.
+type Encoder struct {
+	path, rawPath, pathBuffer []byte
+
+	query  url.Values
+	header http.Header
+}
+
+// NewEncoder creates a new encoder from the passed in request. It assumes that
+// raw path contains no valuable information at this point, so it passes in path
+// as path and raw path for subsequent trans
+func NewEncoder(path, query string, headers http.Header) (*Encoder, error) {
+	return NewEncoderWithRawPath(path, path, query, headers)
+}
+
+// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and
+// header values will be added on top of the request's existing values. Overwriting
+// duplicate values.
+func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) {
+	parseQuery, err := url.ParseQuery(query)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse query string: %w", err)
+	}
+
+	e := &Encoder{
+		path:    []byte(path),
+		rawPath: []byte(rawPath),
+		query:   parseQuery,
+		header:  headers.Clone(),
+	}
+
+	return e, nil
+}
+
+// Encode returns a REST protocol encoder for encoding HTTP bindings.
+//
+// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode
+// will look for whether the header is present, and if so will remove it and set the respective value on http.Request.
+//
+// Returns any error occurring during encoding.
+func (e *Encoder) Encode(req *http.Request) (*http.Request, error) {
+	req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath)
+	req.URL.RawQuery = e.query.Encode()
+
+	// net/http ignores Content-Length header and requires it to be set on http.Request
+	if v := e.header.Get(contentLengthHeader); len(v) > 0 {
+		iv, err := strconv.ParseInt(v, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		req.ContentLength = iv
+		e.header.Del(contentLengthHeader)
+	}
+
+	req.Header = e.header
+
+	return req, nil
+}
+
+// AddHeader returns a HeaderValue for appending to the given header name
+func (e *Encoder) AddHeader(key string) HeaderValue {
+	return newHeaderValue(e.header, key, true)
+}
+
+// SetHeader returns a HeaderValue for setting the given header name
+func (e *Encoder) SetHeader(key string) HeaderValue {
+	return newHeaderValue(e.header, key, false)
+}
+
+// Headers returns a Header used for encoding headers with the given prefix
+func (e *Encoder) Headers(prefix string) Headers {
+	return Headers{
+		header: e.header,
+		prefix: strings.TrimSpace(prefix),
+	}
+}
+
+// HasHeader returns if a header with the key specified exists with one or
+// more value.
+func (e Encoder) HasHeader(key string) bool {
+	return len(e.header[key]) != 0
+}
+
+// SetURI returns a URIValue used for setting the given path key
+func (e *Encoder) SetURI(key string) URIValue {
+	return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key)
+}
+
+// SetQuery returns a QueryValue used for setting the given query key
+func (e *Encoder) SetQuery(key string) QueryValue {
+	return NewQueryValue(e.query, key, false)
+}
+
+// AddQuery returns a QueryValue used for appending the given query key
+func (e *Encoder) AddQuery(key string) QueryValue {
+	return NewQueryValue(e.query, key, true)
+}
+
+// HasQuery returns if a query with the key specified exists with one or
+// more values.
+func (e *Encoder) HasQuery(key string) bool {
+	return len(e.query.Get(key)) != 0
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,122 @@
+package httpbinding
+
+import (
+	"encoding/base64"
+	"math"
+	"math/big"
+	"net/http"
+	"strconv"
+	"strings"
+)
+
+// Headers is used to encode header keys using a provided prefix
+type Headers struct {
+	header http.Header
+	prefix string
+}
+
+// AddHeader returns a HeaderValue used to append values to prefix+key
+func (h Headers) AddHeader(key string) HeaderValue {
+	return h.newHeaderValue(key, true)
+}
+
+// SetHeader returns a HeaderValue used to set the value of prefix+key
+func (h Headers) SetHeader(key string) HeaderValue {
+	return h.newHeaderValue(key, false)
+}
+
+func (h Headers) newHeaderValue(key string, append bool) HeaderValue {
+	return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append)
+}
+
+// HeaderValue is used to encode values to an HTTP header
+type HeaderValue struct {
+	header http.Header
+	key    string
+	append bool
+}
+
+func newHeaderValue(header http.Header, key string, append bool) HeaderValue {
+	return HeaderValue{header: header, key: strings.TrimSpace(key), append: append}
+}
+
+func (h HeaderValue) modifyHeader(value string) {
+	if h.append {
+		h.header[h.key] = append(h.header[h.key], value)
+	} else {
+		h.header[h.key] = append(h.header[h.key][:0], value)
+	}
+}
+
+// String encodes the value v as the header string value
+func (h HeaderValue) String(v string) {
+	h.modifyHeader(v)
+}
+
+// Byte encodes the value v as a query string value
+func (h HeaderValue) Byte(v int8) {
+	h.Long(int64(v))
+}
+
+// Short encodes the value v as a query string value
+func (h HeaderValue) Short(v int16) {
+	h.Long(int64(v))
+}
+
+// Integer encodes the value v as the header string value
+func (h HeaderValue) Integer(v int32) {
+	h.Long(int64(v))
+}
+
+// Long encodes the value v as the header string value
+func (h HeaderValue) Long(v int64) {
+	h.modifyHeader(strconv.FormatInt(v, 10))
+}
+
+// Boolean encodes the value v as a query string value
+func (h HeaderValue) Boolean(v bool) {
+	h.modifyHeader(strconv.FormatBool(v))
+}
+
+// Float encodes the value v as a query string value
+func (h HeaderValue) Float(v float32) {
+	h.float(float64(v), 32)
+}
+
+// Double encodes the value v as a query string value
+func (h HeaderValue) Double(v float64) {
+	h.float(v, 64)
+}
+
+func (h HeaderValue) float(v float64, bitSize int) {
+	switch {
+	case math.IsNaN(v):
+		h.String(floatNaN)
+	case math.IsInf(v, 1):
+		h.String(floatInfinity)
+	case math.IsInf(v, -1):
+		h.String(floatNegInfinity)
+	default:
+		h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize))
+	}
+}
+
+// BigInteger encodes the value v as a query string value
+func (h HeaderValue) BigInteger(v *big.Int) {
+	h.modifyHeader(v.String())
+}
+
+// BigDecimal encodes the value v as a query string value
+func (h HeaderValue) BigDecimal(v *big.Float) {
+	if i, accuracy := v.Int64(); accuracy == big.Exact {
+		h.Long(i)
+		return
+	}
+	h.modifyHeader(v.Text('e', -1))
+}
+
+// Blob encodes the value v as a base64 header string value
+func (h HeaderValue) Blob(v []byte) {
+	encodeToString := base64.StdEncoding.EncodeToString(v)
+	h.modifyHeader(encodeToString)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,108 @@
+package httpbinding
+
+import (
+	"bytes"
+	"fmt"
+)
+
+const (
+	uriTokenStart = '{'
+	uriTokenStop  = '}'
+	uriTokenSkip  = '+'
+)
+
+func bufCap(b []byte, n int) []byte {
+	if cap(b) < n {
+		return make([]byte, 0, n)
+	}
+
+	return b[0:0]
+}
+
+// replacePathElement replaces a single element in the path []byte.
+// Escape is used to control whether the value will be escaped using Amazon path escape style.
+func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) {
+	fieldBuf = bufCap(fieldBuf, len(key)+3) // { <key> [+] }
+	fieldBuf = append(fieldBuf, uriTokenStart)
+	fieldBuf = append(fieldBuf, key...)
+
+	start := bytes.Index(path, fieldBuf)
+	end := start + len(fieldBuf)
+	if start < 0 || len(path[end:]) == 0 {
+		// TODO what to do about error?
+		return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path)
+	}
+
+	encodeSep := true
+	if path[end] == uriTokenSkip {
+		// '+' token means do not escape slashes
+		encodeSep = false
+		end++
+	}
+
+	if escape {
+		val = EscapePath(val, encodeSep)
+	}
+
+	if path[end] != uriTokenStop {
+		return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path)
+	}
+	end++
+
+	fieldBuf = bufCap(fieldBuf, len(val))
+	fieldBuf = append(fieldBuf, val...)
+
+	keyLen := end - start
+	valLen := len(fieldBuf)
+
+	if keyLen == valLen {
+		copy(path[start:], fieldBuf)
+		return path, fieldBuf, nil
+	}
+
+	newLen := len(path) + (valLen - keyLen)
+	if len(path) < newLen {
+		path = path[:cap(path)]
+	}
+	if cap(path) < newLen {
+		newURI := make([]byte, newLen)
+		copy(newURI, path)
+		path = newURI
+	}
+
+	// shift
+	copy(path[start+valLen:], path[end:])
+	path = path[:newLen]
+	copy(path[start:], fieldBuf)
+
+	return path, fieldBuf, nil
+}
+
+// EscapePath escapes part of a URL path in Amazon style.
+func EscapePath(path string, encodeSep bool) string {
+	var buf bytes.Buffer
+	for i := 0; i < len(path); i++ {
+		c := path[i]
+		if noEscape[c] || (c == '/' && !encodeSep) {
+			buf.WriteByte(c)
+		} else {
+			fmt.Fprintf(&buf, "%%%02X", c)
+		}
+	}
+	return buf.String()
+}
+
+var noEscape [256]bool
+
+func init() {
+	for i := 0; i < len(noEscape); i++ {
+		// AWS expects every character except these to be escaped
+		noEscape[i] = (i >= 'A' && i <= 'Z') ||
+			(i >= 'a' && i <= 'z') ||
+			(i >= '0' && i <= '9') ||
+			i == '-' ||
+			i == '.' ||
+			i == '_' ||
+			i == '~'
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,107 @@
+package httpbinding
+
+import (
+	"encoding/base64"
+	"math"
+	"math/big"
+	"net/url"
+	"strconv"
+)
+
+// QueryValue is used to encode query key values
+type QueryValue struct {
+	query  url.Values
+	key    string
+	append bool
+}
+
+// NewQueryValue creates a new QueryValue which enables encoding
+// a query value into the given url.Values.
+func NewQueryValue(query url.Values, key string, append bool) QueryValue {
+	return QueryValue{
+		query:  query,
+		key:    key,
+		append: append,
+	}
+}
+
+func (qv QueryValue) updateKey(value string) {
+	if qv.append {
+		qv.query.Add(qv.key, value)
+	} else {
+		qv.query.Set(qv.key, value)
+	}
+}
+
+// Blob encodes v as a base64 query string value
+func (qv QueryValue) Blob(v []byte) {
+	encodeToString := base64.StdEncoding.EncodeToString(v)
+	qv.updateKey(encodeToString)
+}
+
+// Boolean encodes v as a query string value
+func (qv QueryValue) Boolean(v bool) {
+	qv.updateKey(strconv.FormatBool(v))
+}
+
+// String encodes v as a query string value
+func (qv QueryValue) String(v string) {
+	qv.updateKey(v)
+}
+
+// Byte encodes v as a query string value
+func (qv QueryValue) Byte(v int8) {
+	qv.Long(int64(v))
+}
+
+// Short encodes v as a query string value
+func (qv QueryValue) Short(v int16) {
+	qv.Long(int64(v))
+}
+
+// Integer encodes v as a query string value
+func (qv QueryValue) Integer(v int32) {
+	qv.Long(int64(v))
+}
+
+// Long encodes v as a query string value
+func (qv QueryValue) Long(v int64) {
+	qv.updateKey(strconv.FormatInt(v, 10))
+}
+
+// Float encodes v as a query string value
+func (qv QueryValue) Float(v float32) {
+	qv.float(float64(v), 32)
+}
+
+// Double encodes v as a query string value
+func (qv QueryValue) Double(v float64) {
+	qv.float(v, 64)
+}
+
+func (qv QueryValue) float(v float64, bitSize int) {
+	switch {
+	case math.IsNaN(v):
+		qv.String(floatNaN)
+	case math.IsInf(v, 1):
+		qv.String(floatInfinity)
+	case math.IsInf(v, -1):
+		qv.String(floatNegInfinity)
+	default:
+		qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize))
+	}
+}
+
+// BigInteger encodes v as a query string value
+func (qv QueryValue) BigInteger(v *big.Int) {
+	qv.updateKey(v.String())
+}
+
+// BigDecimal encodes v as a query string value
+func (qv QueryValue) BigDecimal(v *big.Float) {
+	if i, accuracy := v.Int64(); accuracy == big.Exact {
+		qv.Long(i)
+		return
+	}
+	qv.updateKey(v.Text('e', -1))
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,111 @@
+package httpbinding
+
+import (
+	"math"
+	"math/big"
+	"strconv"
+	"strings"
+)
+
+// URIValue is used to encode named URI parameters
+type URIValue struct {
+	path, rawPath, buffer *[]byte
+
+	key string
+}
+
+func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue {
+	return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key}
+}
+
+func (u URIValue) modifyURI(value string) (err error) {
+	*u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false)
+	if err != nil {
+		return err
+	}
+	*u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true)
+	return err
+}
+
+// Boolean encodes v as a URI string value
+func (u URIValue) Boolean(v bool) error {
+	return u.modifyURI(strconv.FormatBool(v))
+}
+
+// String encodes v as a URI string value
+func (u URIValue) String(v string) error {
+	return u.modifyURI(v)
+}
+
+// Byte encodes v as a URI string value
+func (u URIValue) Byte(v int8) error {
+	return u.Long(int64(v))
+}
+
+// Short encodes v as a URI string value
+func (u URIValue) Short(v int16) error {
+	return u.Long(int64(v))
+}
+
+// Integer encodes v as a URI string value
+func (u URIValue) Integer(v int32) error {
+	return u.Long(int64(v))
+}
+
+// Long encodes v as a URI string value
+func (u URIValue) Long(v int64) error {
+	return u.modifyURI(strconv.FormatInt(v, 10))
+}
+
+// Float encodes v as a query string value
+func (u URIValue) Float(v float32) error {
+	return u.float(float64(v), 32)
+}
+
+// Double encodes v as a query string value
+func (u URIValue) Double(v float64) error {
+	return u.float(v, 64)
+}
+
+func (u URIValue) float(v float64, bitSize int) error {
+	switch {
+	case math.IsNaN(v):
+		return u.String(floatNaN)
+	case math.IsInf(v, 1):
+		return u.String(floatInfinity)
+	case math.IsInf(v, -1):
+		return u.String(floatNegInfinity)
+	default:
+		return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize))
+	}
+}
+
+// BigInteger encodes v as a query string value
+func (u URIValue) BigInteger(v *big.Int) error {
+	return u.modifyURI(v.String())
+}
+
+// BigDecimal encodes v as a query string value
+func (u URIValue) BigDecimal(v *big.Float) error {
+	if i, accuracy := v.Int64(); accuracy == big.Exact {
+		return u.Long(i)
+	}
+	return u.modifyURI(v.Text('e', -1))
+}
+
+// SplitURI parses a Smithy HTTP binding trait URI
+func SplitURI(uri string) (path, query string) {
+	queryStart := strings.IndexRune(uri, '?')
+	if queryStart == -1 {
+		path = uri
+		return path, query
+	}
+
+	path = uri[:queryStart]
+	if queryStart+1 >= len(uri) {
+		return path, query
+	}
+	query = uri[queryStart+1:]
+
+	return path, query
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/array.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/array.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/array.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/array.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,35 @@
+package json
+
+import (
+	"bytes"
+)
+
+// Array represents the encoding of a JSON Array
+type Array struct {
+	w          *bytes.Buffer
+	writeComma bool
+	scratch    *[]byte
+}
+
+func newArray(w *bytes.Buffer, scratch *[]byte) *Array {
+	w.WriteRune(leftBracket)
+	return &Array{w: w, scratch: scratch}
+}
+
+// Value adds a new element to the JSON Array.
+// Returns a Value type that is used to encode
+// the array element.
+func (a *Array) Value() Value {
+	if a.writeComma {
+		a.w.WriteRune(comma)
+	} else {
+		a.writeComma = true
+	}
+
+	return newValue(a.w, a.scratch)
+}
+
+// Close encodes the end of the JSON Array
+func (a *Array) Close() {
+	a.w.WriteRune(rightBracket)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/constants.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/constants.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/constants.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/constants.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,15 @@
+package json
+
+const (
+	leftBrace  = '{'
+	rightBrace = '}'
+
+	leftBracket  = '['
+	rightBracket = ']'
+
+	comma = ','
+	quote = '"'
+	colon = ':'
+
+	null = "null"
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,139 @@
+package json
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+)
+
+// DiscardUnknownField discards unknown fields from a decoder body.
+// This function is useful while deserializing a JSON body with additional
+// unknown information that should be discarded.
+func DiscardUnknownField(decoder *json.Decoder) error {
+	// This deliberately does not share logic with CollectUnknownField, even
+	// though it could, because if we were to delegate to that then we'd incur
+	// extra allocations and general memory usage.
+	v, err := decoder.Token()
+	if err == io.EOF {
+		return nil
+	}
+	if err != nil {
+		return err
+	}
+
+	if _, ok := v.(json.Delim); ok {
+		for decoder.More() {
+			err = DiscardUnknownField(decoder)
+		}
+		endToken, err := decoder.Token()
+		if err != nil {
+			return err
+		}
+		if _, ok := endToken.(json.Delim); !ok {
+			return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v",
+				endToken, endToken)
+		}
+	}
+
+	return nil
+}
+
+// CollectUnknownField grabs the contents of unknown fields from the decoder body
+// and returns them as a byte slice. This is useful for skipping unknown fields without
+// completely discarding them.
+func CollectUnknownField(decoder *json.Decoder) ([]byte, error) {
+	result, err := collectUnknownField(decoder)
+	if err != nil {
+		return nil, err
+	}
+
+	buff := bytes.NewBuffer(nil)
+	encoder := json.NewEncoder(buff)
+
+	if err := encoder.Encode(result); err != nil {
+		return nil, err
+	}
+
+	return buff.Bytes(), nil
+}
+
+func collectUnknownField(decoder *json.Decoder) (interface{}, error) {
+	// Grab the initial value. This could either be a concrete value like a string or a a
+	// delimiter.
+	token, err := decoder.Token()
+	if err == io.EOF {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	// If it's an array or object, we'll need to recurse.
+	delim, ok := token.(json.Delim)
+	if ok {
+		var result interface{}
+		if delim == '{' {
+			result, err = collectUnknownObject(decoder)
+			if err != nil {
+				return nil, err
+			}
+		} else {
+			result, err = collectUnknownArray(decoder)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Discard the closing token. decoder.Token handles checking for matching delimiters
+		if _, err := decoder.Token(); err != nil {
+			return nil, err
+		}
+		return result, nil
+	}
+
+	return token, nil
+}
+
+func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) {
+	// We need to create an empty array here instead of a nil array, since by getting
+	// into this function at all we necessarily have seen a non-nil list.
+	array := []interface{}{}
+
+	for decoder.More() {
+		value, err := collectUnknownField(decoder)
+		if err != nil {
+			return nil, err
+		}
+		array = append(array, value)
+	}
+
+	return array, nil
+}
+
+func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) {
+	object := make(map[string]interface{})
+
+	for decoder.More() {
+		key, err := collectUnknownField(decoder)
+		if err != nil {
+			return nil, err
+		}
+
+		// Keys have to be strings, which is particularly important as the encoder
+		// won't except a map with interface{} keys
+		stringKey, ok := key.(string)
+		if !ok {
+			return nil, fmt.Errorf("expected string key, found %T", key)
+		}
+
+		value, err := collectUnknownField(decoder)
+		if err != nil {
+			return nil, err
+		}
+
+		object[stringKey] = value
+	}
+
+	return object, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/encoder.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/encoder.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/encoder.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/encoder.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,30 @@
+package json
+
+import (
+	"bytes"
+)
+
+// Encoder is JSON encoder that supports construction of JSON values
+// using methods.
+type Encoder struct {
+	w *bytes.Buffer
+	Value
+}
+
+// NewEncoder returns a new JSON encoder
+func NewEncoder() *Encoder {
+	writer := bytes.NewBuffer(nil)
+	scratch := make([]byte, 64)
+
+	return &Encoder{w: writer, Value: newValue(writer, &scratch)}
+}
+
+// String returns the String output of the JSON encoder
+func (e Encoder) String() string {
+	return e.w.String()
+}
+
+// Bytes returns the []byte slice of the JSON encoder
+func (e Encoder) Bytes() []byte {
+	return e.w.Bytes()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/escape.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/escape.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/escape.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/escape.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,198 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet
+
+package json
+
+import (
+	"bytes"
+	"unicode/utf8"
+)
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// copied from Go 1.8 stdlib's encoding/json/#hex
+var hex = "0123456789abcdef"
+
+// escapeStringBytes escapes and writes the passed in string bytes to the dst
+// buffer
+//
+// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes
+func escapeStringBytes(e *bytes.Buffer, s []byte) {
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if safeSet[b] {
+				i++
+				continue
+			}
+			if start < i {
+				e.Write(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.Write(s[start:])
+	}
+	e.WriteByte('"')
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/object.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/object.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/object.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/object.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,40 @@
+package json
+
+import (
+	"bytes"
+)
+
+// Object represents the encoding of a JSON Object type
+type Object struct {
+	w          *bytes.Buffer
+	writeComma bool
+	scratch    *[]byte
+}
+
+func newObject(w *bytes.Buffer, scratch *[]byte) *Object {
+	w.WriteRune(leftBrace)
+	return &Object{w: w, scratch: scratch}
+}
+
+func (o *Object) writeKey(key string) {
+	escapeStringBytes(o.w, []byte(key))
+	o.w.WriteRune(colon)
+}
+
+// Key adds the given named key to the JSON object.
+// Returns a Value encoder that should be used to encode
+// a JSON value type.
+func (o *Object) Key(name string) Value {
+	if o.writeComma {
+		o.w.WriteRune(comma)
+	} else {
+		o.writeComma = true
+	}
+	o.writeKey(name)
+	return newValue(o.w, o.scratch)
+}
+
+// Close encodes the end of the JSON Object
+func (o *Object) Close() {
+	o.w.WriteRune(rightBrace)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/value.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/value.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/json/value.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/json/value.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,149 @@
+package json
+
+import (
+	"bytes"
+	"encoding/base64"
+	"math/big"
+	"strconv"
+
+	"github.com/aws/smithy-go/encoding"
+)
+
+// Value represents a JSON Value type
+// JSON Value types: Object, Array, String, Number, Boolean, and Null
+type Value struct {
+	w       *bytes.Buffer
+	scratch *[]byte
+}
+
+// newValue returns a new Value encoder
+func newValue(w *bytes.Buffer, scratch *[]byte) Value {
+	return Value{w: w, scratch: scratch}
+}
+
+// String encodes v as a JSON string
+func (jv Value) String(v string) {
+	escapeStringBytes(jv.w, []byte(v))
+}
+
+// Byte encodes v as a JSON number
+func (jv Value) Byte(v int8) {
+	jv.Long(int64(v))
+}
+
+// Short encodes v as a JSON number
+func (jv Value) Short(v int16) {
+	jv.Long(int64(v))
+}
+
+// Integer encodes v as a JSON number
+func (jv Value) Integer(v int32) {
+	jv.Long(int64(v))
+}
+
+// Long encodes v as a JSON number
+func (jv Value) Long(v int64) {
+	*jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10)
+	jv.w.Write(*jv.scratch)
+}
+
+// ULong encodes v as a JSON number
+func (jv Value) ULong(v uint64) {
+	*jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10)
+	jv.w.Write(*jv.scratch)
+}
+
+// Float encodes v as a JSON number
+func (jv Value) Float(v float32) {
+	jv.float(float64(v), 32)
+}
+
+// Double encodes v as a JSON number
+func (jv Value) Double(v float64) {
+	jv.float(v, 64)
+}
+
+func (jv Value) float(v float64, bits int) {
+	*jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits)
+	jv.w.Write(*jv.scratch)
+}
+
+// Boolean encodes v as a JSON boolean
+func (jv Value) Boolean(v bool) {
+	*jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v)
+	jv.w.Write(*jv.scratch)
+}
+
+// Base64EncodeBytes writes v as a base64 value in JSON string
+func (jv Value) Base64EncodeBytes(v []byte) {
+	encodeByteSlice(jv.w, (*jv.scratch)[:0], v)
+}
+
+// Write writes v directly to the JSON document
+func (jv Value) Write(v []byte) {
+	jv.w.Write(v)
+}
+
+// Array returns a new Array encoder
+func (jv Value) Array() *Array {
+	return newArray(jv.w, jv.scratch)
+}
+
+// Object returns a new Object encoder
+func (jv Value) Object() *Object {
+	return newObject(jv.w, jv.scratch)
+}
+
+// Null encodes a null JSON value
+func (jv Value) Null() {
+	jv.w.WriteString(null)
+}
+
+// BigInteger encodes v as JSON value
+func (jv Value) BigInteger(v *big.Int) {
+	jv.w.Write([]byte(v.Text(10)))
+}
+
+// BigDecimal encodes v as JSON value
+func (jv Value) BigDecimal(v *big.Float) {
+	if i, accuracy := v.Int64(); accuracy == big.Exact {
+		jv.Long(i)
+		return
+	}
+	// TODO: Should this try to match ES6 ToString similar to stdlib JSON?
+	jv.w.Write([]byte(v.Text('e', -1)))
+}
+
+// Based on encoding/json encodeByteSlice from the Go Standard Library
+// https://golang.org/src/encoding/json/encode.go
+func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) {
+	if v == nil {
+		w.WriteString(null)
+		return
+	}
+
+	w.WriteRune(quote)
+
+	encodedLen := base64.StdEncoding.EncodedLen(len(v))
+	if encodedLen <= len(scratch) {
+		// If the encoded bytes fit in e.scratch, avoid an extra
+		// allocation and use the cheaper Encoding.Encode.
+		dst := scratch[:encodedLen]
+		base64.StdEncoding.Encode(dst, v)
+		w.Write(dst)
+	} else if encodedLen <= 1024 {
+		// The encoded bytes are short enough to allocate for, and
+		// Encoding.Encode is still cheaper.
+		dst := make([]byte, encodedLen)
+		base64.StdEncoding.Encode(dst, v)
+		w.Write(dst)
+	} else {
+		// The encoded bytes are too long to cheaply allocate, and
+		// Encoding.Encode is no longer noticeably cheaper.
+		enc := base64.NewEncoder(base64.StdEncoding, w)
+		enc.Write(v)
+		enc.Close()
+	}
+
+	w.WriteRune(quote)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/array.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/array.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/array.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/array.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,49 @@
+package xml
+
+// arrayMemberWrapper is the default member wrapper tag name for XML Array type
+var arrayMemberWrapper = StartElement{
+	Name: Name{Local: "member"},
+}
+
+// Array represents the encoding of a XML array type
+type Array struct {
+	w       writer
+	scratch *[]byte
+
+	// member start element is the array member wrapper start element
+	memberStartElement StartElement
+
+	// isFlattened indicates if the array is a flattened array.
+	isFlattened bool
+}
+
+// newArray returns an array encoder.
+// It also takes in the  member start element, array start element.
+// It takes in a isFlattened bool, indicating that an array is flattened array.
+//
+// A wrapped array ["value1", "value2"] is represented as
+// `<List><member>value1</member><member>value2</member></List>`.
+
+// A flattened array `someList: ["value1", "value2"]` is represented as
+// `<someList>value1</someList><someList>value2</someList>`.
+func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array {
+	var memberWrapper = memberStartElement
+	if isFlattened {
+		memberWrapper = arrayStartElement
+	}
+
+	return &Array{
+		w:                  w,
+		scratch:            scratch,
+		memberStartElement: memberWrapper,
+		isFlattened:        isFlattened,
+	}
+}
+
+// Member adds a new member to the XML array.
+// It returns a Value encoder.
+func (a *Array) Member() Value {
+	v := newValue(a.w, a.scratch, a.memberStartElement)
+	v.isFlattened = a.isFlattened
+	return v
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/constants.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/constants.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/constants.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/constants.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,10 @@
+package xml
+
+const (
+	leftAngleBracket  = '<'
+	rightAngleBracket = '>'
+	forwardSlash      = '/'
+	colon             = ':'
+	equals            = '='
+	quote             = '"'
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,49 @@
+/*
+Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to
+shape serializer function in which a xml.Value will be passed around.
+
+Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings
+
+Member Element
+
+Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element
+write their own element start tag. These elements should always be closed.
+
+Flattened Element
+
+Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element
+do not write a start tag, and thus should not be closed.
+
+Simple types encoding
+
+All simple type methods on value such as String(), Long() etc; auto close the associated member element.
+
+Array
+
+Array returns the collection encoder. It has two modes, wrapped and flattened encoding.
+
+Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping.
+By default, a wrapped array members are wrapped with `member` named start element.
+
+	<wrappedArray><member>apple</member><member>tree</member></wrappedArray>
+
+Flattened arrays rely on Value being marked as flattened.
+If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements.
+
+	<flattenedAarray>apple</flattenedArray><flattenedArray>tree</flattenedArray>
+
+Map
+
+Map is the map encoder. It has two modes, wrapped and flattened encoding.
+
+Wrapped map has Array() method, which facilitate map member wrapping.
+By default, a wrapped map members are wrapped with `entry` named start element.
+
+	<wrappedMap><entry><Key>apple</Key><Value>tree</Value></entry><entry><Key>snow</Key><Value>ice</Value></entry></wrappedMap>
+
+Flattened map rely on Value being marked as flattened.
+If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements.
+
+	<flattenedMap><Key>apple</Key><Value>tree</Value></flattenedMap><flattenedMap><Key>snow</Key><Value>ice</Value></flattenedMap>
+*/
+package xml
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/element.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/element.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/element.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/element.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,91 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied and modified from Go 1.14 stdlib's encoding/xml
+
+package xml
+
+// A Name represents an XML name (Local) annotated
+// with a name space identifier (Space).
+// In tokens returned by Decoder.Token, the Space identifier
+// is given as a canonical URL, not the short prefix used
+// in the document being parsed.
+type Name struct {
+	Space, Local string
+}
+
+// An Attr represents an attribute in an XML element (Name=Value).
+type Attr struct {
+	Name  Name
+	Value string
+}
+
+/*
+NewAttribute returns a pointer to an attribute.
+It takes in a local name aka attribute name, and value
+representing the attribute value.
+*/
+func NewAttribute(local, value string) Attr {
+	return Attr{
+		Name: Name{
+			Local: local,
+		},
+		Value: value,
+	}
+}
+
+/*
+NewNamespaceAttribute returns a pointer to an attribute.
+It takes in a local name aka attribute name, and value
+representing the attribute value.
+
+NewNamespaceAttribute appends `xmlns:` in front of namespace
+prefix.
+
+For creating a name space attribute representing
+`xmlns:prefix="http://example.com`, the breakdown would be:
+local = "prefix"
+value = "http://example.com"
+*/
+func NewNamespaceAttribute(local, value string) Attr {
+	attr := NewAttribute(local, value)
+
+	// default name space identifier
+	attr.Name.Space = "xmlns"
+	return attr
+}
+
+// A StartElement represents an XML start element.
+type StartElement struct {
+	Name Name
+	Attr []Attr
+}
+
+// Copy creates a new copy of StartElement.
+func (e StartElement) Copy() StartElement {
+	attrs := make([]Attr, len(e.Attr))
+	copy(attrs, e.Attr)
+	e.Attr = attrs
+	return e
+}
+
+// End returns the corresponding XML end element.
+func (e StartElement) End() EndElement {
+	return EndElement{e.Name}
+}
+
+// returns true if start element local name is empty
+func (e StartElement) isZero() bool {
+	return len(e.Name.Local) == 0
+}
+
+// An EndElement represents an XML end element.
+type EndElement struct {
+	Name Name
+}
+
+// returns true if end element local name is empty
+func (e EndElement) isZero() bool {
+	return len(e.Name.Local) == 0
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,51 @@
+package xml
+
+// writer interface used by the xml encoder to write an encoded xml
+// document in a writer.
+type writer interface {
+
+	// Write takes in a byte slice and returns number of bytes written and error
+	Write(p []byte) (n int, err error)
+
+	// WriteRune takes in a rune and returns number of bytes written and error
+	WriteRune(r rune) (n int, err error)
+
+	// WriteString takes in a string and returns number of bytes written and error
+	WriteString(s string) (n int, err error)
+
+	// String method returns a string
+	String() string
+
+	// Bytes return a byte slice.
+	Bytes() []byte
+}
+
+// Encoder is an XML encoder that supports construction of XML values
+// using methods. The encoder takes in a writer and maintains a scratch buffer.
+type Encoder struct {
+	w       writer
+	scratch *[]byte
+}
+
+// NewEncoder returns an XML encoder
+func NewEncoder(w writer) *Encoder {
+	scratch := make([]byte, 64)
+
+	return &Encoder{w: w, scratch: &scratch}
+}
+
+// String returns the string output of the XML encoder
+func (e Encoder) String() string {
+	return e.w.String()
+}
+
+// Bytes returns the []byte slice of the XML encoder
+func (e Encoder) Bytes() []byte {
+	return e.w.Bytes()
+}
+
+// RootElement builds a root element encoding
+// It writes it's start element tag. The value should be closed.
+func (e Encoder) RootElement(element StartElement) Value {
+	return newValue(e.w, e.scratch, element)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,51 @@
+package xml
+
+import (
+	"encoding/xml"
+	"fmt"
+	"io"
+)
+
+// ErrorComponents represents the error response fields
+// that will be deserialized from an xml error response body
+type ErrorComponents struct {
+	Code    string
+	Message string
+}
+
+// GetErrorResponseComponents returns the error fields from an xml error response body
+func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) {
+	if noErrorWrapping {
+		var errResponse noWrappedErrorResponse
+		if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+			return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+		}
+		return ErrorComponents{
+			Code:    errResponse.Code,
+			Message: errResponse.Message,
+		}, nil
+	}
+
+	var errResponse wrappedErrorResponse
+	if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+		return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+	}
+	return ErrorComponents{
+		Code:    errResponse.Code,
+		Message: errResponse.Message,
+	}, nil
+}
+
+// noWrappedErrorResponse represents the error response body with
+// no internal <Error></Error wrapping
+type noWrappedErrorResponse struct {
+	Code    string `xml:"Code"`
+	Message string `xml:"Message"`
+}
+
+// wrappedErrorResponse represents the error response body
+// wrapped within <Error>...</Error>
+type wrappedErrorResponse struct {
+	Code    string `xml:"Error>Code"`
+	Message string `xml:"Error>Message"`
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/escape.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/escape.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/escape.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/escape.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,137 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied and modified from Go 1.14 stdlib's encoding/xml
+
+package xml
+
+import (
+	"unicode/utf8"
+)
+
+// Copied from Go 1.14 stdlib's encoding/xml
+var (
+	escQuot = []byte("&#34;") // shorter than "&quot;"
+	escApos = []byte("&#39;") // shorter than "&apos;"
+	escAmp  = []byte("&amp;")
+	escLT   = []byte("&lt;")
+	escGT   = []byte("&gt;")
+	escTab  = []byte("&#x9;")
+	escNL   = []byte("&#xA;")
+	escCR   = []byte("&#xD;")
+	escFFFD = []byte("\uFFFD") // Unicode replacement character
+
+	// Additional Escapes
+	escNextLine = []byte("&#x85;")
+	escLS       = []byte("&#x2028;")
+)
+
+// Decide whether the given rune is in the XML Character Range, per
+// the Char production of https://www.xml.com/axml/testaxml.htm,
+// Section 2.2 Characters.
+func isInCharacterRange(r rune) (inrange bool) {
+	return r == 0x09 ||
+		r == 0x0A ||
+		r == 0x0D ||
+		r >= 0x20 && r <= 0xD7FF ||
+		r >= 0xE000 && r <= 0xFFFD ||
+		r >= 0x10000 && r <= 0x10FFFF
+}
+
+// TODO: When do we need to escape the string?
+// Based on encoding/xml escapeString from the Go Standard Library.
+// https://golang.org/src/encoding/xml/xml.go
+func escapeString(e writer, s string) {
+	var esc []byte
+	last := 0
+	for i := 0; i < len(s); {
+		r, width := utf8.DecodeRuneInString(s[i:])
+		i += width
+		switch r {
+		case '"':
+			esc = escQuot
+		case '\'':
+			esc = escApos
+		case '&':
+			esc = escAmp
+		case '<':
+			esc = escLT
+		case '>':
+			esc = escGT
+		case '\t':
+			esc = escTab
+		case '\n':
+			esc = escNL
+		case '\r':
+			esc = escCR
+		case '\u0085':
+			// Not escaped by stdlib
+			esc = escNextLine
+		case '\u2028':
+			// Not escaped by stdlib
+			esc = escLS
+		default:
+			if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+				esc = escFFFD
+				break
+			}
+			continue
+		}
+		e.WriteString(s[last : i-width])
+		e.Write(esc)
+		last = i
+	}
+	e.WriteString(s[last:])
+}
+
+// escapeText writes to w the properly escaped XML equivalent
+// of the plain text data s. If escapeNewline is true, newline
+// characters will be escaped.
+//
+// Based on encoding/xml escapeText from the Go Standard Library.
+// https://golang.org/src/encoding/xml/xml.go
+func escapeText(e writer, s []byte) {
+	var esc []byte
+	last := 0
+	for i := 0; i < len(s); {
+		r, width := utf8.DecodeRune(s[i:])
+		i += width
+		switch r {
+		case '"':
+			esc = escQuot
+		case '\'':
+			esc = escApos
+		case '&':
+			esc = escAmp
+		case '<':
+			esc = escLT
+		case '>':
+			esc = escGT
+		case '\t':
+			esc = escTab
+		case '\n':
+			// This always escapes newline, which is different than stdlib's optional
+			// escape of new line.
+			esc = escNL
+		case '\r':
+			esc = escCR
+		case '\u0085':
+			// Not escaped by stdlib
+			esc = escNextLine
+		case '\u2028':
+			// Not escaped by stdlib
+			esc = escLS
+		default:
+			if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+				esc = escFFFD
+				break
+			}
+			continue
+		}
+		e.Write(s[last : i-width])
+		e.Write(esc)
+		last = i
+	}
+	e.Write(s[last:])
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/map.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/map.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/map.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/map.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,53 @@
+package xml
+
+// mapEntryWrapper is the default member wrapper start element for XML Map entry
+var mapEntryWrapper = StartElement{
+	Name: Name{Local: "entry"},
+}
+
+// Map represents the encoding of a XML map type
+type Map struct {
+	w       writer
+	scratch *[]byte
+
+	// member start element is the map entry wrapper start element
+	memberStartElement StartElement
+
+	// isFlattened returns true if the map is a flattened map
+	isFlattened bool
+}
+
+// newMap returns a map encoder which sets the default map
+// entry wrapper to `entry`.
+//
+// A map `someMap : {{key:"abc", value:"123"}}` is represented as
+// `<someMap><entry><key>abc<key><value>123</value></entry></someMap>`.
+func newMap(w writer, scratch *[]byte) *Map {
+	return &Map{
+		w:                  w,
+		scratch:            scratch,
+		memberStartElement: mapEntryWrapper,
+	}
+}
+
+// newFlattenedMap returns a map encoder which sets the map
+// entry wrapper to the passed in memberWrapper`.
+//
+// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as
+// `<someMap><key>abc<key><value>123</value></someMap>`.
+func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map {
+	return &Map{
+		w:                  w,
+		scratch:            scratch,
+		memberStartElement: memberWrapper,
+		isFlattened:        true,
+	}
+}
+
+// Entry returns a Value encoder with map's element.
+// It writes the member wrapper start tag for each entry.
+func (m *Map) Entry() Value {
+	v := newValue(m.w, m.scratch, m.memberStartElement)
+	v.isFlattened = m.isFlattened
+	return v
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/value.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/value.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/value.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/value.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,302 @@
+package xml
+
+import (
+	"encoding/base64"
+	"fmt"
+	"math/big"
+	"strconv"
+
+	"github.com/aws/smithy-go/encoding"
+)
+
+// Value represents an XML Value type
+// XML Value types: Object, Array, Map, String, Number, Boolean.
+type Value struct {
+	w       writer
+	scratch *[]byte
+
+	// xml start element is the associated start element for the Value
+	startElement StartElement
+
+	// indicates if the Value represents a flattened shape
+	isFlattened bool
+}
+
+// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag
+func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value {
+	return Value{
+		w:            w,
+		scratch:      scratch,
+		startElement: startElement,
+	}
+}
+
+// newValue writes the start element xml tag and returns a Value
+func newValue(w writer, scratch *[]byte, startElement StartElement) Value {
+	writeStartElement(w, startElement)
+	return Value{w: w, scratch: scratch, startElement: startElement}
+}
+
+// writeStartElement takes in a start element and writes it.
+// It handles namespace, attributes in start element.
+func writeStartElement(w writer, el StartElement) error {
+	if el.isZero() {
+		return fmt.Errorf("xml start element cannot be nil")
+	}
+
+	w.WriteRune(leftAngleBracket)
+
+	if len(el.Name.Space) != 0 {
+		escapeString(w, el.Name.Space)
+		w.WriteRune(colon)
+	}
+	escapeString(w, el.Name.Local)
+	for _, attr := range el.Attr {
+		w.WriteRune(' ')
+		writeAttribute(w, &attr)
+	}
+
+	w.WriteRune(rightAngleBracket)
+	return nil
+}
+
+// writeAttribute writes an attribute from a provided Attribute
+// For a namespace attribute, the attr.Name.Space must be defined as "xmlns".
+// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName
+func writeAttribute(w writer, attr *Attr) {
+	// if local, space both are not empty
+	if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 {
+		escapeString(w, attr.Name.Space)
+		w.WriteRune(colon)
+	}
+
+	// if prefix is empty, the default `xmlns` space should be used as prefix.
+	if len(attr.Name.Local) == 0 {
+		attr.Name.Local = attr.Name.Space
+	}
+
+	escapeString(w, attr.Name.Local)
+	w.WriteRune(equals)
+	w.WriteRune(quote)
+	escapeString(w, attr.Value)
+	w.WriteRune(quote)
+}
+
+// writeEndElement takes in a end element and writes it.
+func writeEndElement(w writer, el EndElement) error {
+	if el.isZero() {
+		return fmt.Errorf("xml end element cannot be nil")
+	}
+
+	w.WriteRune(leftAngleBracket)
+	w.WriteRune(forwardSlash)
+
+	if len(el.Name.Space) != 0 {
+		escapeString(w, el.Name.Space)
+		w.WriteRune(colon)
+	}
+	escapeString(w, el.Name.Local)
+	w.WriteRune(rightAngleBracket)
+
+	return nil
+}
+
+// String encodes v as a XML string.
+// It will auto close the parent xml element tag.
+func (xv Value) String(v string) {
+	escapeString(xv.w, v)
+	xv.Close()
+}
+
+// Byte encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Byte(v int8) {
+	xv.Long(int64(v))
+}
+
+// Short encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Short(v int16) {
+	xv.Long(int64(v))
+}
+
+// Integer encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Integer(v int32) {
+	xv.Long(int64(v))
+}
+
+// Long encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Long(v int64) {
+	*xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10)
+	xv.w.Write(*xv.scratch)
+
+	xv.Close()
+}
+
+// Float encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Float(v float32) {
+	xv.float(float64(v), 32)
+	xv.Close()
+}
+
+// Double encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Double(v float64) {
+	xv.float(v, 64)
+	xv.Close()
+}
+
+func (xv Value) float(v float64, bits int) {
+	*xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits)
+	xv.w.Write(*xv.scratch)
+}
+
+// Boolean encodes v as a XML boolean.
+// It will auto close the parent xml element tag.
+func (xv Value) Boolean(v bool) {
+	*xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v)
+	xv.w.Write(*xv.scratch)
+
+	xv.Close()
+}
+
+// Base64EncodeBytes writes v as a base64 value in XML string.
+// It will auto close the parent xml element tag.
+func (xv Value) Base64EncodeBytes(v []byte) {
+	encodeByteSlice(xv.w, (*xv.scratch)[:0], v)
+	xv.Close()
+}
+
+// BigInteger encodes v big.Int as XML value.
+// It will auto close the parent xml element tag.
+func (xv Value) BigInteger(v *big.Int) {
+	xv.w.Write([]byte(v.Text(10)))
+	xv.Close()
+}
+
+// BigDecimal encodes v big.Float as XML value.
+// It will auto close the parent xml element tag.
+func (xv Value) BigDecimal(v *big.Float) {
+	if i, accuracy := v.Int64(); accuracy == big.Exact {
+		xv.Long(i)
+		return
+	}
+
+	xv.w.Write([]byte(v.Text('e', -1)))
+	xv.Close()
+}
+
+// Write writes v directly to the xml document
+// if escapeXMLText is set to true, write will escape text.
+// It will auto close the parent xml element tag.
+func (xv Value) Write(v []byte, escapeXMLText bool) {
+	// escape and write xml text
+	if escapeXMLText {
+		escapeText(xv.w, v)
+	} else {
+		// write xml directly
+		xv.w.Write(v)
+	}
+
+	xv.Close()
+}
+
+// MemberElement does member element encoding. It returns a Value.
+// Member Element method should be used for all shapes except flattened shapes.
+//
+// A call to MemberElement will write nested element tags directly using the
+// provided start element. The value returned by MemberElement should be closed.
+func (xv Value) MemberElement(element StartElement) Value {
+	return newValue(xv.w, xv.scratch, element)
+}
+
+// FlattenedElement returns flattened element encoding. It returns a Value.
+// This method should be used for flattened shapes.
+//
+// Unlike MemberElement, flattened element will NOT write element tags
+// directly for the associated start element.
+//
+// The value returned by the FlattenedElement does not need to be closed.
+func (xv Value) FlattenedElement(element StartElement) Value {
+	v := newFlattenedValue(xv.w, xv.scratch, element)
+	v.isFlattened = true
+	return v
+}
+
+// Array returns an array encoder. By default, the members of array are
+// wrapped with `<member>` element tag.
+// If value is marked as flattened, the start element is used to wrap the members instead of
+// the `<member>` element.
+func (xv Value) Array() *Array {
+	return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened)
+}
+
+/*
+ArrayWithCustomName returns an array encoder.
+
+It takes named start element as an argument, the named start element will used to wrap xml array entries.
+for eg, `<someList><customName>entry1</customName></someList>`
+Here `customName` named start element will be wrapped on each array member.
+*/
+func (xv Value) ArrayWithCustomName(element StartElement) *Array {
+	return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened)
+}
+
+/*
+Map returns a map encoder. By default, the map entries are
+wrapped with `<entry>` element tag.
+
+If value is marked as flattened, the start element is used to wrap the entry instead of
+the `<member>` element.
+*/
+func (xv Value) Map() *Map {
+	// flattened map
+	if xv.isFlattened {
+		return newFlattenedMap(xv.w, xv.scratch, xv.startElement)
+	}
+
+	// un-flattened map
+	return newMap(xv.w, xv.scratch)
+}
+
+// encodeByteSlice is modified copy of json encoder's encodeByteSlice.
+// It is used to base64 encode a byte slice.
+func encodeByteSlice(w writer, scratch []byte, v []byte) {
+	if v == nil {
+		return
+	}
+
+	encodedLen := base64.StdEncoding.EncodedLen(len(v))
+	if encodedLen <= len(scratch) {
+		// If the encoded bytes fit in e.scratch, avoid an extra
+		// allocation and use the cheaper Encoding.Encode.
+		dst := scratch[:encodedLen]
+		base64.StdEncoding.Encode(dst, v)
+		w.Write(dst)
+	} else if encodedLen <= 1024 {
+		// The encoded bytes are short enough to allocate for, and
+		// Encoding.Encode is still cheaper.
+		dst := make([]byte, encodedLen)
+		base64.StdEncoding.Encode(dst, v)
+		w.Write(dst)
+	} else {
+		// The encoded bytes are too long to cheaply allocate, and
+		// Encoding.Encode is no longer noticeably cheaper.
+		enc := base64.NewEncoder(base64.StdEncoding, w)
+		enc.Write(v)
+		enc.Close()
+	}
+}
+
+// IsFlattened returns true if value is for flattened shape.
+func (xv Value) IsFlattened() bool {
+	return xv.isFlattened
+}
+
+// Close closes the value.
+func (xv Value) Close() {
+	writeEndElement(xv.w, xv.startElement.End())
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,154 @@
+package xml
+
+import (
+	"encoding/xml"
+	"fmt"
+	"strings"
+)
+
+// NodeDecoder is a XML decoder wrapper that is responsible to decoding
+// a single XML Node element and it's nested member elements. This wrapper decoder
+// takes in the start element of the top level node being decoded.
+type NodeDecoder struct {
+	Decoder *xml.Decoder
+	StartEl xml.StartElement
+}
+
+// WrapNodeDecoder returns an initialized XMLNodeDecoder
+func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder {
+	return NodeDecoder{
+		Decoder: decoder,
+		StartEl: startEl,
+	}
+}
+
+// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the
+// a token is the node decoder's end node token; and an error which indicates any error
+// that occurred while retrieving the start element
+func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) {
+	for {
+		token, e := d.Decoder.Token()
+		if e != nil {
+			return t, done, e
+		}
+
+		// check if we reach end of the node being decoded
+		if el, ok := token.(xml.EndElement); ok {
+			return t, el == d.StartEl.End(), err
+		}
+
+		if t, ok := token.(xml.StartElement); ok {
+			return restoreAttrNamespaces(t), false, err
+		}
+
+		// skip token if it is a comment or preamble or empty space value due to indentation
+		// or if it's a value and is not expected
+	}
+}
+
+// restoreAttrNamespaces update XML attributes to restore the short namespaces found within
+// the raw XML document.
+func restoreAttrNamespaces(node xml.StartElement) xml.StartElement {
+	if len(node.Attr) == 0 {
+		return node
+	}
+
+	// Generate a mapping of XML namespace values to their short names.
+	ns := map[string]string{}
+	for _, a := range node.Attr {
+		if a.Name.Space == "xmlns" {
+			ns[a.Value] = a.Name.Local
+			break
+		}
+	}
+
+	for i, a := range node.Attr {
+		if a.Name.Space == "xmlns" {
+			continue
+		}
+		// By default, xml.Decoder will fully resolve these namespaces. So if you had <foo xmlns:bar=baz bar:bin=hi/>
+		// then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to
+		// continue to resolve as `bar` so we can easily identify it later on.
+		if v, ok := ns[node.Attr[i].Name.Space]; ok {
+			node.Attr[i].Name.Space = v
+		}
+	}
+	return node
+}
+
+// GetElement looks for the given tag name at the current level, and returns the element if found, and
+// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking
+// the document.
+func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) {
+	for {
+		token, done, err := d.Token()
+		if err != nil {
+			return t, err
+		}
+		if done {
+			return t, fmt.Errorf("%s node not found", name)
+		}
+		switch {
+		case strings.EqualFold(name, token.Name.Local):
+			return token, nil
+		default:
+			err = d.Decoder.Skip()
+			if err != nil {
+				return t, err
+			}
+		}
+	}
+}
+
+// Value provides an abstraction to retrieve char data value within an xml element.
+// The method will return an error if it encounters a nested xml element instead of char data.
+// This method should only be used to retrieve simple type or blob shape values as []byte.
+func (d NodeDecoder) Value() (c []byte, err error) {
+	t, e := d.Decoder.Token()
+	if e != nil {
+		return c, e
+	}
+
+	endElement := d.StartEl.End()
+
+	switch ev := t.(type) {
+	case xml.CharData:
+		c = ev.Copy()
+	case xml.EndElement: // end tag or self-closing
+		if ev == endElement {
+			return []byte{}, err
+		}
+		return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t)
+	default:
+		return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t)
+	}
+
+	t, e = d.Decoder.Token()
+	if e != nil {
+		return c, e
+	}
+
+	if ev, ok := t.(xml.EndElement); ok {
+		if ev == endElement {
+			return c, err
+		}
+	}
+
+	return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t)
+}
+
+// FetchRootElement takes in a decoder and returns the first start element within the xml body.
+// This function is useful in fetching the start element of an XML response and ignore the
+// comments and preamble
+func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) {
+	for {
+		t, e := decoder.Token()
+		if e != nil {
+			return startElement, e
+		}
+
+		if startElement, ok := t.(xml.StartElement); ok {
+			return startElement, err
+		}
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/endpoints/endpoint.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/endpoints/endpoint.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/endpoints/endpoint.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/endpoints/endpoint.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,23 @@
+package transport
+
+import (
+	"net/http"
+	"net/url"
+
+	"github.com/aws/smithy-go"
+)
+
+// Endpoint is the endpoint object returned by Endpoint resolution V2
+type Endpoint struct {
+	// The complete URL minimally specfiying the scheme and host.
+	// May optionally specify the port and base path component.
+	URI url.URL
+
+	// An optional set of headers to be sent using transport layer headers.
+	Headers http.Header
+
+	// A grab-bag property map of endpoint attributes. The
+	// values present here are subject to change, or being add/removed at any
+	// time.
+	Properties smithy.Properties
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/errors.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/errors.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,137 @@
+package smithy
+
+import "fmt"
+
+// APIError provides the generic API and protocol agnostic error type all SDK
+// generated exception types will implement.
+type APIError interface {
+	error
+
+	// ErrorCode returns the error code for the API exception.
+	ErrorCode() string
+	// ErrorMessage returns the error message for the API exception.
+	ErrorMessage() string
+	// ErrorFault returns the fault for the API exception.
+	ErrorFault() ErrorFault
+}
+
+// GenericAPIError provides a generic concrete API error type that SDKs can use
+// to deserialize error responses into. Should be used for unmodeled or untyped
+// errors.
+type GenericAPIError struct {
+	Code    string
+	Message string
+	Fault   ErrorFault
+}
+
+// ErrorCode returns the error code for the API exception.
+func (e *GenericAPIError) ErrorCode() string { return e.Code }
+
+// ErrorMessage returns the error message for the API exception.
+func (e *GenericAPIError) ErrorMessage() string { return e.Message }
+
+// ErrorFault returns the fault for the API exception.
+func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault }
+
+func (e *GenericAPIError) Error() string {
+	return fmt.Sprintf("api error %s: %s", e.Code, e.Message)
+}
+
+var _ APIError = (*GenericAPIError)(nil)
+
+// OperationError decorates an underlying error which occurred while invoking
+// an operation with names of the operation and API.
+type OperationError struct {
+	ServiceID     string
+	OperationName string
+	Err           error
+}
+
+// Service returns the name of the API service the error occurred with.
+func (e *OperationError) Service() string { return e.ServiceID }
+
+// Operation returns the name of the API operation the error occurred with.
+func (e *OperationError) Operation() string { return e.OperationName }
+
+// Unwrap returns the nested error if any, or nil.
+func (e *OperationError) Unwrap() error { return e.Err }
+
+func (e *OperationError) Error() string {
+	return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err)
+}
+
+// DeserializationError provides a wrapper for an error that occurs during
+// deserialization.
+type DeserializationError struct {
+	Err      error //  original error
+	Snapshot []byte
+}
+
+// Error returns a formatted error for DeserializationError
+func (e *DeserializationError) Error() string {
+	const msg = "deserialization failed"
+	if e.Err == nil {
+		return msg
+	}
+	return fmt.Sprintf("%s, %v", msg, e.Err)
+}
+
+// Unwrap returns the underlying Error in DeserializationError
+func (e *DeserializationError) Unwrap() error { return e.Err }
+
+// ErrorFault provides the type for a Smithy API error fault.
+type ErrorFault int
+
+// ErrorFault enumeration values
+const (
+	FaultUnknown ErrorFault = iota
+	FaultServer
+	FaultClient
+)
+
+func (f ErrorFault) String() string {
+	switch f {
+	case FaultServer:
+		return "server"
+	case FaultClient:
+		return "client"
+	default:
+		return "unknown"
+	}
+}
+
+// SerializationError represents an error that occurred while attempting to serialize a request
+type SerializationError struct {
+	Err error // original error
+}
+
+// Error returns a formatted error for SerializationError
+func (e *SerializationError) Error() string {
+	const msg = "serialization failed"
+	if e.Err == nil {
+		return msg
+	}
+	return fmt.Sprintf("%s: %v", msg, e.Err)
+}
+
+// Unwrap returns the underlying Error in SerializationError
+func (e *SerializationError) Unwrap() error { return e.Err }
+
+// CanceledError is the error that will be returned by an API request that was
+// canceled. API operations given a Context may return this error when
+// canceled.
+type CanceledError struct {
+	Err error
+}
+
+// CanceledError returns true to satisfy interfaces checking for canceled errors.
+func (*CanceledError) CanceledError() bool { return true }
+
+// Unwrap returns the underlying error, if there was one.
+func (e *CanceledError) Unwrap() error {
+	return e.Err
+}
+
+func (e *CanceledError) Error() string {
+	return fmt.Sprintf("canceled, %v", e.Err)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/go_module_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/go_module_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/go_module_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/go_module_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package smithy
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.20.3"
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,28 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,8 @@
+// Package singleflight provides a duplicate function call suppression
+// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight
+// package. The package is forked, because the package a part of the unstable
+// and unversioned golang.org/x/sync module.
+//
+// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight
+
+package singleflight
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,210 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package singleflight
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"runtime"
+	"runtime/debug"
+	"sync"
+)
+
+// errGoexit indicates the runtime.Goexit was called in
+// the user given function.
+var errGoexit = errors.New("runtime.Goexit was called")
+
+// A panicError is an arbitrary value recovered from a panic
+// with the stack trace during the execution of given function.
+type panicError struct {
+	value interface{}
+	stack []byte
+}
+
+// Error implements error interface.
+func (p *panicError) Error() string {
+	return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
+}
+
+func newPanicError(v interface{}) error {
+	stack := debug.Stack()
+
+	// The first line of the stack trace is of the form "goroutine N [status]:"
+	// but by the time the panic reaches Do the goroutine may no longer exist
+	// and its status will have changed. Trim out the misleading line.
+	if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
+		stack = stack[line+1:]
+	}
+	return &panicError{value: v, stack: stack}
+}
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+	wg sync.WaitGroup
+
+	// These fields are written once before the WaitGroup is done
+	// and are only read after the WaitGroup is done.
+	val interface{}
+	err error
+
+	// forgotten indicates whether Forget was called with this call's key
+	// while the call was still in flight.
+	forgotten bool
+
+	// These fields are read and written with the singleflight
+	// mutex held before the WaitGroup is done, and are read but
+	// not written after the WaitGroup is done.
+	dups  int
+	chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+	mu sync.Mutex       // protects m
+	m  map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+	Val    interface{}
+	Err    error
+	Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+	g.mu.Lock()
+	if g.m == nil {
+		g.m = make(map[string]*call)
+	}
+	if c, ok := g.m[key]; ok {
+		c.dups++
+		g.mu.Unlock()
+		c.wg.Wait()
+
+		if e, ok := c.err.(*panicError); ok {
+			panic(e)
+		} else if c.err == errGoexit {
+			runtime.Goexit()
+		}
+		return c.val, c.err, true
+	}
+	c := new(call)
+	c.wg.Add(1)
+	g.m[key] = c
+	g.mu.Unlock()
+
+	g.doCall(c, key, fn)
+	return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+//
+// The returned channel will not be closed.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+	ch := make(chan Result, 1)
+	g.mu.Lock()
+	if g.m == nil {
+		g.m = make(map[string]*call)
+	}
+	if c, ok := g.m[key]; ok {
+		c.dups++
+		c.chans = append(c.chans, ch)
+		g.mu.Unlock()
+		return ch
+	}
+	c := &call{chans: []chan<- Result{ch}}
+	c.wg.Add(1)
+	g.m[key] = c
+	g.mu.Unlock()
+
+	go g.doCall(c, key, fn)
+
+	return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+	normalReturn := false
+	recovered := false
+
+	// use double-defer to distinguish panic from runtime.Goexit,
+	// more details see https://golang.org/cl/134395
+	defer func() {
+		// the given function invoked runtime.Goexit
+		if !normalReturn && !recovered {
+			c.err = errGoexit
+		}
+
+		c.wg.Done()
+		g.mu.Lock()
+		defer g.mu.Unlock()
+		if !c.forgotten {
+			delete(g.m, key)
+		}
+
+		if e, ok := c.err.(*panicError); ok {
+			// In order to prevent the waiting channels from being blocked forever,
+			// needs to ensure that this panic cannot be recovered.
+			if len(c.chans) > 0 {
+				go panic(e)
+				select {} // Keep this goroutine around so that it will appear in the crash dump.
+			} else {
+				panic(e)
+			}
+		} else if c.err == errGoexit {
+			// Already in the process of goexit, no need to call again
+		} else {
+			// Normal return
+			for _, ch := range c.chans {
+				ch <- Result{c.val, c.err, c.dups > 0}
+			}
+		}
+	}()
+
+	func() {
+		defer func() {
+			if !normalReturn {
+				// Ideally, we would wait to take a stack trace until we've determined
+				// whether this is a panic or a runtime.Goexit.
+				//
+				// Unfortunately, the only way we can distinguish the two is to see
+				// whether the recover stopped the goroutine from terminating, and by
+				// the time we know that, the part of the stack trace relevant to the
+				// panic has been discarded.
+				if r := recover(); r != nil {
+					c.err = newPanicError(r)
+				}
+			}
+		}()
+
+		c.val, c.err = fn()
+		normalReturn = true
+	}()
+
+	if !normalReturn {
+		recovered = true
+	}
+}
+
+// Forget tells the singleflight to forget about a key.  Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+	g.mu.Lock()
+	if c, ok := g.m[key]; ok {
+		c.forgotten = true
+	}
+	delete(g.m, key)
+	g.mu.Unlock()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/io/byte.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/io/byte.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/io/byte.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/io/byte.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,12 @@
+package io
+
+const (
+	// Byte is 8 bits
+	Byte int64 = 1
+	// KibiByte (KiB) is 1024 Bytes
+	KibiByte = Byte * 1024
+	// MebiByte (MiB) is 1024 KiB
+	MebiByte = KibiByte * 1024
+	// GibiByte (GiB) is 1024 MiB
+	GibiByte = MebiByte * 1024
+)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/io/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/io/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/io/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/io/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2 @@
+// Package io provides utilities for Smithy generated API clients.
+package io
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/io/reader.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/io/reader.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/io/reader.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/io/reader.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,16 @@
+package io
+
+import (
+	"io"
+)
+
+// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method
+// that does nothing.
+type ReadSeekNopCloser struct {
+	io.ReadSeeker
+}
+
+// Close does nothing.
+func (ReadSeekNopCloser) Close() error {
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/io/ringbuffer.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/io/ringbuffer.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/io/ringbuffer.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/io/ringbuffer.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,94 @@
+package io
+
+import (
+	"bytes"
+	"io"
+)
+
+// RingBuffer struct satisfies io.ReadWrite interface.
+//
+// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a
+// revolving window.
+type RingBuffer struct {
+	slice []byte
+	start int
+	end   int
+	size  int
+}
+
+// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer.
+func NewRingBuffer(slice []byte) *RingBuffer {
+	ringBuf := RingBuffer{
+		slice: slice,
+	}
+	return &ringBuf
+}
+
+// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error.
+func (r *RingBuffer) Write(p []byte) (int, error) {
+	for _, b := range p {
+		// check if end points to invalid index, we need to circle back
+		if r.end == len(r.slice) {
+			r.end = 0
+		}
+		// check if start points to invalid index, we need to circle back
+		if r.start == len(r.slice) {
+			r.start = 0
+		}
+		// if ring buffer is filled, increment the start index
+		if r.size == len(r.slice) {
+			r.size--
+			r.start++
+		}
+
+		r.slice[r.end] = b
+		r.end++
+		r.size++
+	}
+	return len(p), nil
+}
+
+// Read copies the data on the ring buffer into the byte slice provided to the method.
+// Returns the read count along with any error encountered while reading.
+func (r *RingBuffer) Read(p []byte) (int, error) {
+	// readCount keeps track of the number of bytes read
+	var readCount int
+	for j := 0; j < len(p); j++ {
+		// if ring buffer is empty or completely read
+		// return EOF error.
+		if r.size == 0 {
+			return readCount, io.EOF
+		}
+
+		if r.start == len(r.slice) {
+			r.start = 0
+		}
+
+		p[j] = r.slice[r.start]
+		readCount++
+		// increment the start pointer for ring buffer
+		r.start++
+		// decrement the size of ring buffer
+		r.size--
+	}
+	return readCount, nil
+}
+
+// Len returns the number of unread bytes in the buffer.
+func (r *RingBuffer) Len() int {
+	return r.size
+}
+
+// Bytes returns a copy of the RingBuffer's bytes.
+func (r RingBuffer) Bytes() []byte {
+	var b bytes.Buffer
+	io.Copy(&b, &r)
+	return b.Bytes()
+}
+
+// Reset resets the ring buffer.
+func (r *RingBuffer) Reset() {
+	*r = RingBuffer{
+		slice: r.slice,
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/local-mod-replace.sh 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/local-mod-replace.sh
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/local-mod-replace.sh	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/local-mod-replace.sh	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,39 @@
+#1/usr/bin/env bash
+
+PROJECT_DIR=""
+SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd)
+
+usage() {
+  echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2
+  exit 1
+}
+
+while getopts "hs:d:" options; do
+  case "${options}" in
+  s)
+    SMITHY_SOURCE_DIR=${OPTARG}
+    if [ "$SMITHY_SOURCE_DIR" == "" ]; then
+      echo "path to smithy-go source directory is required" || exit
+      usage
+    fi
+    ;;
+  d)
+    PROJECT_DIR=${OPTARG}
+    ;;
+  h)
+    usage
+    ;;
+  *)
+    usage
+    ;;
+  esac
+done
+
+if [ "$PROJECT_DIR" != "" ]; then
+  cd $PROJECT_DIR || exit
+fi
+
+go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do
+  repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}}
+  echo -replace $x=$repPath
+done | xargs go mod edit
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/logging/logger.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/logging/logger.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/logging/logger.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/logging/logger.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,82 @@
+package logging
+
+import (
+	"context"
+	"io"
+	"log"
+)
+
+// Classification is the type of the log entry's classification name.
+type Classification string
+
+// Set of standard classifications that can be used by clients and middleware
+const (
+	Warn  Classification = "WARN"
+	Debug Classification = "DEBUG"
+)
+
+// Logger is an interface for logging entries at certain classifications.
+type Logger interface {
+	// Logf is expected to support the standard fmt package "verbs".
+	Logf(classification Classification, format string, v ...interface{})
+}
+
+// LoggerFunc is a wrapper around a function to satisfy the Logger interface.
+type LoggerFunc func(classification Classification, format string, v ...interface{})
+
+// Logf delegates the logging request to the wrapped function.
+func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) {
+	f(classification, format, v...)
+}
+
+// ContextLogger is an optional interface a Logger implementation may expose that provides
+// the ability to create context aware log entries.
+type ContextLogger interface {
+	WithContext(context.Context) Logger
+}
+
+// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting
+// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will
+// be returned to the caller.
+func WithContext(ctx context.Context, logger Logger) Logger {
+	if logger == nil {
+		return Nop{}
+	}
+
+	cl, ok := logger.(ContextLogger)
+	if !ok {
+		return logger
+	}
+
+	return cl.WithContext(ctx)
+}
+
+// Nop is a Logger implementation that simply does not perform any logging.
+type Nop struct{}
+
+// Logf simply returns without performing any action
+func (n Nop) Logf(Classification, string, ...interface{}) {
+	return
+}
+
+// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's
+// Printf method.
+type StandardLogger struct {
+	Logger *log.Logger
+}
+
+// Logf logs the given classification and message to the underlying logger.
+func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) {
+	if len(classification) != 0 {
+		format = string(classification) + " " + format
+	}
+
+	s.Logger.Printf(format, v...)
+}
+
+// NewStandardLogger returns a new StandardLogger
+func NewStandardLogger(writer io.Writer) *StandardLogger {
+	return &StandardLogger{
+		Logger: log.New(writer, "SDK ", log.LstdFlags),
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,67 @@
+// Package middleware provides transport agnostic middleware for decorating SDK
+// handlers.
+//
+// The Smithy middleware stack provides ordered behavior to be invoked on an
+// underlying handler. The stack is separated into steps that are invoked in a
+// static order. A step is a collection of middleware that are injected into a
+// ordered list defined by the user. The user may add, insert, swap, and remove a
+// step's middleware. When the stack is invoked the step middleware become static,
+// and their order cannot be modified.
+//
+// A stack and its step middleware are **not** safe to modify concurrently.
+//
+// A stack will use the ordered list of middleware to decorate a underlying
+// handler. A handler could be something like an HTTP Client that round trips an
+// API operation over HTTP.
+//
+// Smithy Middleware Stack
+//
+// A Stack is a collection of middleware that wrap a handler. The stack can be
+// broken down into discreet steps. Each step may contain zero or more middleware
+// specific to that stack's step.
+//
+// A Stack Step is a predefined set of middleware that are invoked in a static
+// order by the Stack. These steps represent fixed points in the middleware stack
+// for organizing specific behavior, such as serialize and build. A Stack Step is
+// composed of zero or more middleware that are specific to that step. A step may
+// define its own set of input/output parameters the generic input/output
+// parameters are cast from. A step calls its middleware recursively, before
+// calling the next step in the stack returning the result or error of the step
+// middleware decorating the underlying handler.
+//
+// * Initialize: Prepares the input, and sets any default parameters as needed,
+// (e.g. idempotency token, and presigned URLs).
+//
+// * Serialize: Serializes the prepared input into a data structure that can be
+// consumed by the target transport's message, (e.g. REST-JSON serialization).
+//
+// * Build: Adds additional metadata to the serialized transport message, (e.g.
+// HTTP's Content-Length header, or body checksum). Decorations and
+// modifications to the message should be copied to all message attempts.
+//
+// * Finalize: Performs final preparations needed before sending the message. The
+// message should already be complete by this stage, and is only alternated to
+// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request
+// signing).
+//
+// * Deserialize: Reacts to the handler's response returned by the recipient of
+// the request message. Deserializes the response into a structured type or
+// error above stacks can react to.
+//
+// Adding Middleware to a Stack Step
+//
+// Middleware can be added to a step front or back, or relative, by name, to an
+// existing middleware in that stack. If a middleware does not have a name, a
+// unique name will be generated at the middleware and be added to the step.
+//
+//     // Create middleware stack
+//     stack := middleware.NewStack()
+//
+//     // Add middleware to stack steps
+//     stack.Initialize.Add(paramValidationMiddleware, middleware.After)
+//     stack.Serialize.Add(marshalOperationFoo, middleware.After)
+//     stack.Deserialize.Add(unmarshalOperationFoo, middleware.After)
+//
+//     // Invoke middleware on handler.
+//     resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler)
+package middleware
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/logging.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/logging.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/logging.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/logging.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,46 @@
+package middleware
+
+import (
+	"context"
+
+	"github.com/aws/smithy-go/logging"
+)
+
+// loggerKey is the context value key for which the logger is associated with.
+type loggerKey struct{}
+
+// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger
+// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed
+// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is.
+func GetLogger(ctx context.Context) logging.Logger {
+	logger, ok := ctx.Value(loggerKey{}).(logging.Logger)
+	if !ok || logger == nil {
+		return logging.Nop{}
+	}
+
+	return logging.WithContext(ctx, logger)
+}
+
+// SetLogger sets the provided logger value on the provided ctx.
+func SetLogger(ctx context.Context, logger logging.Logger) context.Context {
+	return context.WithValue(ctx, loggerKey{}, logger)
+}
+
+type setLogger struct {
+	Logger logging.Logger
+}
+
+// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context.
+func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error {
+	return stack.Initialize.Add(&setLogger{Logger: logger}, After)
+}
+
+func (a *setLogger) ID() string {
+	return "SetLogger"
+}
+
+func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+	out InitializeOutput, metadata Metadata, err error,
+) {
+	return next.HandleInitialize(SetLogger(ctx, a.Logger), in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,65 @@
+package middleware
+
+// MetadataReader provides an interface for reading metadata from the
+// underlying metadata container.
+type MetadataReader interface {
+	Get(key interface{}) interface{}
+}
+
+// Metadata provides storing and reading metadata values. Keys may be any
+// comparable value type. Get and set will panic if key is not a comparable
+// value type.
+//
+// Metadata uses lazy initialization, and Set method must be called as an
+// addressable value, or pointer. Not doing so may cause key/value pair to not
+// be set.
+type Metadata struct {
+	values map[interface{}]interface{}
+}
+
+// Get attempts to retrieve the value the key points to. Returns nil if the
+// key was not found.
+//
+// Panics if key type is not comparable.
+func (m Metadata) Get(key interface{}) interface{} {
+	return m.values[key]
+}
+
+// Clone creates a shallow copy of Metadata entries, returning a new Metadata
+// value with the original entries copied into it.
+func (m Metadata) Clone() Metadata {
+	vs := make(map[interface{}]interface{}, len(m.values))
+	for k, v := range m.values {
+		vs[k] = v
+	}
+
+	return Metadata{
+		values: vs,
+	}
+}
+
+// Set stores the value pointed to by the key. If a value already exists at
+// that key it will be replaced with the new value.
+//
+// Set method must be called as an addressable value, or pointer. If Set is not
+// called as an addressable value or pointer, the key value pair being set may
+// be lost.
+//
+// Panics if the key type is not comparable.
+func (m *Metadata) Set(key, value interface{}) {
+	if m.values == nil {
+		m.values = map[interface{}]interface{}{}
+	}
+	m.values[key] = value
+}
+
+// Has returns whether the key exists in the metadata.
+//
+// Panics if the key type is not comparable.
+func (m Metadata) Has(key interface{}) bool {
+	if m.values == nil {
+		return false
+	}
+	_, ok := m.values[key]
+	return ok
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,71 @@
+package middleware
+
+import (
+	"context"
+)
+
+// Handler provides the interface for performing the logic to obtain an output,
+// or error for the given input.
+type Handler interface {
+	// Handle performs logic to obtain an output for the given input. Handler
+	// should be decorated with middleware to perform input specific behavior.
+	Handle(ctx context.Context, input interface{}) (
+		output interface{}, metadata Metadata, err error,
+	)
+}
+
+// HandlerFunc provides a wrapper around a function pointer to be used as a
+// middleware handler.
+type HandlerFunc func(ctx context.Context, input interface{}) (
+	output interface{}, metadata Metadata, err error,
+)
+
+// Handle invokes the underlying function, returning the result.
+func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) (
+	output interface{}, metadata Metadata, err error,
+) {
+	return fn(ctx, input)
+}
+
+// Middleware provides the interface to call handlers in a chain.
+type Middleware interface {
+	// ID provides a unique identifier for the middleware.
+	ID() string
+
+	// Performs the middleware's handling of the input, returning the output,
+	// or error. The middleware can invoke the next Handler if handling should
+	// continue.
+	HandleMiddleware(ctx context.Context, input interface{}, next Handler) (
+		output interface{}, metadata Metadata, err error,
+	)
+}
+
+// decoratedHandler wraps a middleware in order to to call the next handler in
+// the chain.
+type decoratedHandler struct {
+	// The next handler to be called.
+	Next Handler
+
+	// The current middleware decorating the handler.
+	With Middleware
+}
+
+// Handle implements the Handler interface to handle a operation invocation.
+func (m decoratedHandler) Handle(ctx context.Context, input interface{}) (
+	output interface{}, metadata Metadata, err error,
+) {
+	return m.With.HandleMiddleware(ctx, input, m.Next)
+}
+
+// DecorateHandler decorates a handler with a middleware. Wrapping the handler
+// with the middleware.
+func DecorateHandler(h Handler, with ...Middleware) Handler {
+	for i := len(with) - 1; i >= 0; i-- {
+		h = decoratedHandler{
+			Next: h,
+			With: with[i],
+		}
+	}
+
+	return h
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/ordered_group.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/ordered_group.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/ordered_group.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/ordered_group.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,268 @@
+package middleware
+
+import "fmt"
+
+// RelativePosition provides specifying the relative position of a middleware
+// in an ordered group.
+type RelativePosition int
+
+// Relative position for middleware in steps.
+const (
+	After RelativePosition = iota
+	Before
+)
+
+type ider interface {
+	ID() string
+}
+
+// orderedIDs provides an ordered collection of items with relative ordering
+// by name.
+type orderedIDs struct {
+	order *relativeOrder
+	items map[string]ider
+}
+
+const baseOrderedItems = 5
+
+func newOrderedIDs() *orderedIDs {
+	return &orderedIDs{
+		order: newRelativeOrder(),
+		items: make(map[string]ider, baseOrderedItems),
+	}
+}
+
+// Add injects the item to the relative position of the item group. Returns an
+// error if the item already exists.
+func (g *orderedIDs) Add(m ider, pos RelativePosition) error {
+	id := m.ID()
+	if len(id) == 0 {
+		return fmt.Errorf("empty ID, ID must not be empty")
+	}
+
+	if err := g.order.Add(pos, id); err != nil {
+		return err
+	}
+
+	g.items[id] = m
+	return nil
+}
+
+// Insert injects the item relative to an existing item id. Returns an error if
+// the original item does not exist, or the item being added already exists.
+func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error {
+	if len(m.ID()) == 0 {
+		return fmt.Errorf("insert ID must not be empty")
+	}
+	if len(relativeTo) == 0 {
+		return fmt.Errorf("relative to ID must not be empty")
+	}
+
+	if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil {
+		return err
+	}
+
+	g.items[m.ID()] = m
+	return nil
+}
+
+// Get returns the ider identified by id. If ider is not present, returns false.
+func (g *orderedIDs) Get(id string) (ider, bool) {
+	v, ok := g.items[id]
+	return v, ok
+}
+
+// Swap removes the item by id, replacing it with the new item. Returns an error
+// if the original item doesn't exist.
+func (g *orderedIDs) Swap(id string, m ider) (ider, error) {
+	if len(id) == 0 {
+		return nil, fmt.Errorf("swap from ID must not be empty")
+	}
+
+	iderID := m.ID()
+	if len(iderID) == 0 {
+		return nil, fmt.Errorf("swap to ID must not be empty")
+	}
+
+	if err := g.order.Swap(id, iderID); err != nil {
+		return nil, err
+	}
+
+	removed := g.items[id]
+
+	delete(g.items, id)
+	g.items[iderID] = m
+
+	return removed, nil
+}
+
+// Remove removes the item by id. Returns an error if the item
+// doesn't exist.
+func (g *orderedIDs) Remove(id string) (ider, error) {
+	if len(id) == 0 {
+		return nil, fmt.Errorf("remove ID must not be empty")
+	}
+
+	if err := g.order.Remove(id); err != nil {
+		return nil, err
+	}
+
+	removed := g.items[id]
+	delete(g.items, id)
+	return removed, nil
+}
+
+func (g *orderedIDs) List() []string {
+	items := g.order.List()
+	order := make([]string, len(items))
+	copy(order, items)
+	return order
+}
+
+// Clear removes all entries and slots.
+func (g *orderedIDs) Clear() {
+	g.order.Clear()
+	g.items = map[string]ider{}
+}
+
+// GetOrder returns the item in the order it should be invoked in.
+func (g *orderedIDs) GetOrder() []interface{} {
+	order := g.order.List()
+	ordered := make([]interface{}, len(order))
+	for i := 0; i < len(order); i++ {
+		ordered[i] = g.items[order[i]]
+	}
+
+	return ordered
+}
+
+// relativeOrder provides ordering of item
+type relativeOrder struct {
+	order []string
+}
+
+func newRelativeOrder() *relativeOrder {
+	return &relativeOrder{
+		order: make([]string, 0, baseOrderedItems),
+	}
+}
+
+// Add inserts an item into the order relative to the position provided.
+func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error {
+	if len(ids) == 0 {
+		return nil
+	}
+
+	for _, id := range ids {
+		if _, ok := s.has(id); ok {
+			return fmt.Errorf("already exists, %v", id)
+		}
+	}
+
+	switch pos {
+	case Before:
+		return s.insert(0, Before, ids...)
+
+	case After:
+		s.order = append(s.order, ids...)
+
+	default:
+		return fmt.Errorf("invalid position, %v", int(pos))
+	}
+
+	return nil
+}
+
+// Insert injects an item before or after the relative item. Returns
+// an error if the relative item does not exist.
+func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error {
+	if len(ids) == 0 {
+		return nil
+	}
+
+	for _, id := range ids {
+		if _, ok := s.has(id); ok {
+			return fmt.Errorf("already exists, %v", id)
+		}
+	}
+
+	i, ok := s.has(relativeTo)
+	if !ok {
+		return fmt.Errorf("not found, %v", relativeTo)
+	}
+
+	return s.insert(i, pos, ids...)
+}
+
+// Swap will replace the item id with the to item. Returns an
+// error if the original item id does not exist. Allows swapping out an
+// item for another item with the same id.
+func (s *relativeOrder) Swap(id, to string) error {
+	i, ok := s.has(id)
+	if !ok {
+		return fmt.Errorf("not found, %v", id)
+	}
+
+	if _, ok = s.has(to); ok && id != to {
+		return fmt.Errorf("already exists, %v", to)
+	}
+
+	s.order[i] = to
+	return nil
+}
+
+func (s *relativeOrder) Remove(id string) error {
+	i, ok := s.has(id)
+	if !ok {
+		return fmt.Errorf("not found, %v", id)
+	}
+
+	s.order = append(s.order[:i], s.order[i+1:]...)
+	return nil
+}
+
+func (s *relativeOrder) List() []string {
+	return s.order
+}
+
+func (s *relativeOrder) Clear() {
+	s.order = s.order[0:0]
+}
+
+func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error {
+	switch pos {
+	case Before:
+		n := len(ids)
+		var src []string
+		if n <= cap(s.order)-len(s.order) {
+			s.order = s.order[:len(s.order)+n]
+			src = s.order
+		} else {
+			src = s.order
+			s.order = make([]string, len(s.order)+n)
+			copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half
+		}
+		copy(s.order[i+n:], src[i:])
+		copy(s.order[i:], ids)
+	case After:
+		if i == len(s.order)-1 || len(s.order) == 0 {
+			s.order = append(s.order, ids...)
+		} else {
+			s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...)
+		}
+
+	default:
+		return fmt.Errorf("invalid position, %v", int(pos))
+	}
+
+	return nil
+}
+
+func (s *relativeOrder) has(id string) (i int, found bool) {
+	for i := 0; i < len(s.order); i++ {
+		if s.order[i] == id {
+			return i, true
+		}
+	}
+	return 0, false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/stack.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/stack.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/stack.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/stack.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,209 @@
+package middleware
+
+import (
+	"context"
+	"io"
+	"strings"
+)
+
+// Stack provides protocol and transport agnostic set of middleware split into
+// distinct steps. Steps have specific transitions between them, that are
+// managed by the individual step.
+//
+// Steps are composed as middleware around the underlying handler in the
+// following order:
+//
+//   Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler
+//
+// Any middleware within the chain may choose to stop and return an error or
+// response. Since the middleware decorate the handler like a call stack, each
+// middleware will receive the result of the next middleware in the chain.
+// Middleware that does not need to react to an input, or result must forward
+// along the input down the chain, or return the result back up the chain.
+//
+//   Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler
+type Stack struct {
+	// Initialize prepares the input, and sets any default parameters as
+	// needed, (e.g. idempotency token, and presigned URLs).
+	//
+	// Takes Input Parameters, and returns result or error.
+	//
+	// Receives result or error from Serialize step.
+	Initialize *InitializeStep
+
+	// Serialize serializes the prepared input into a data structure that can be consumed
+	// by the target transport's message, (e.g. REST-JSON serialization)
+	//
+	// Converts Input Parameters into a Request, and returns the result or error.
+	//
+	// Receives result or error from Build step.
+	Serialize *SerializeStep
+
+	// Build adds additional metadata to the serialized transport message
+	// (e.g. HTTP's Content-Length header, or body checksum). Decorations and
+	// modifications to the message should be copied to all message attempts.
+	//
+	// Takes Request, and returns result or error.
+	//
+	// Receives result or error from Finalize step.
+	Build *BuildStep
+
+	// Finalize performs final preparations needed before sending the message. The
+	// message should already be complete by this stage, and is only alternated
+	// to meet the expectations of the recipient (e.g. Retry and AWS SigV4
+	// request signing)
+	//
+	// Takes Request, and returns result or error.
+	//
+	// Receives result or error from Deserialize step.
+	Finalize *FinalizeStep
+
+	// Deserialize reacts to the handler's response returned by the recipient of the request
+	// message. Deserializes the response into a structured type or error above
+	// stacks can react to.
+	//
+	// Should only forward Request to underlying handler.
+	//
+	// Takes Request, and returns result or error.
+	//
+	// Receives raw response, or error from underlying handler.
+	Deserialize *DeserializeStep
+
+	id string
+}
+
+// NewStack returns an initialize empty stack.
+func NewStack(id string, newRequestFn func() interface{}) *Stack {
+	return &Stack{
+		id:          id,
+		Initialize:  NewInitializeStep(),
+		Serialize:   NewSerializeStep(newRequestFn),
+		Build:       NewBuildStep(),
+		Finalize:    NewFinalizeStep(),
+		Deserialize: NewDeserializeStep(),
+	}
+}
+
+// ID returns the unique ID for the stack as a middleware.
+func (s *Stack) ID() string { return s.id }
+
+// HandleMiddleware invokes the middleware stack decorating the next handler.
+// Each step of stack will be invoked in order before calling the next step.
+// With the next handler call last.
+//
+// The input value must be the input parameters of the operation being
+// performed.
+//
+// Will return the result of the operation, or error.
+func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) (
+	output interface{}, metadata Metadata, err error,
+) {
+	h := DecorateHandler(next,
+		s.Initialize,
+		s.Serialize,
+		s.Build,
+		s.Finalize,
+		s.Deserialize,
+	)
+
+	return h.Handle(ctx, input)
+}
+
+// List returns a list of all middleware in the stack by step.
+func (s *Stack) List() []string {
+	var l []string
+	l = append(l, s.id)
+
+	l = append(l, s.Initialize.ID())
+	l = append(l, s.Initialize.List()...)
+
+	l = append(l, s.Serialize.ID())
+	l = append(l, s.Serialize.List()...)
+
+	l = append(l, s.Build.ID())
+	l = append(l, s.Build.List()...)
+
+	l = append(l, s.Finalize.ID())
+	l = append(l, s.Finalize.List()...)
+
+	l = append(l, s.Deserialize.ID())
+	l = append(l, s.Deserialize.List()...)
+
+	return l
+}
+
+func (s *Stack) String() string {
+	var b strings.Builder
+
+	w := &indentWriter{w: &b}
+
+	w.WriteLine(s.id)
+	w.Push()
+
+	writeStepItems(w, s.Initialize)
+	writeStepItems(w, s.Serialize)
+	writeStepItems(w, s.Build)
+	writeStepItems(w, s.Finalize)
+	writeStepItems(w, s.Deserialize)
+
+	return b.String()
+}
+
+type stackStepper interface {
+	ID() string
+	List() []string
+}
+
+func writeStepItems(w *indentWriter, s stackStepper) {
+	type lister interface {
+		List() []string
+	}
+
+	w.WriteLine(s.ID())
+	w.Push()
+
+	defer w.Pop()
+
+	// ignore stack to prevent circular iterations
+	if _, ok := s.(*Stack); ok {
+		return
+	}
+
+	for _, id := range s.List() {
+		w.WriteLine(id)
+	}
+}
+
+type stringWriter interface {
+	io.Writer
+	WriteString(string) (int, error)
+	WriteRune(rune) (int, error)
+}
+
+type indentWriter struct {
+	w     stringWriter
+	depth int
+}
+
+const indentDepth = "\t\t\t\t\t\t\t\t\t\t"
+
+func (w *indentWriter) Push() {
+	w.depth++
+}
+
+func (w *indentWriter) Pop() {
+	w.depth--
+	if w.depth < 0 {
+		w.depth = 0
+	}
+}
+
+func (w *indentWriter) WriteLine(v string) {
+	w.w.WriteString(indentDepth[:w.depth])
+
+	v = strings.ReplaceAll(v, "\n", "\\n")
+	v = strings.ReplaceAll(v, "\r", "\\r")
+
+	w.w.WriteString(v)
+	w.w.WriteRune('\n')
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/stack_values.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/stack_values.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/stack_values.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/stack_values.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,100 @@
+package middleware
+
+import (
+	"context"
+	"reflect"
+	"strings"
+)
+
+// WithStackValue adds a key value pair to the context that is intended to be
+// scoped to a stack. Use ClearStackValues to get a new context with all stack
+// values cleared.
+func WithStackValue(ctx context.Context, key, value interface{}) context.Context {
+	md, _ := ctx.Value(stackValuesKey{}).(*stackValues)
+
+	md = withStackValue(md, key, value)
+	return context.WithValue(ctx, stackValuesKey{}, md)
+}
+
+// ClearStackValues returns a context without any stack values.
+func ClearStackValues(ctx context.Context) context.Context {
+	return context.WithValue(ctx, stackValuesKey{}, nil)
+}
+
+// GetStackValues returns the value pointed to by the key within the stack
+// values, if it is present.
+func GetStackValue(ctx context.Context, key interface{}) interface{} {
+	md, _ := ctx.Value(stackValuesKey{}).(*stackValues)
+	if md == nil {
+		return nil
+	}
+
+	return md.Value(key)
+}
+
+type stackValuesKey struct{}
+
+type stackValues struct {
+	key    interface{}
+	value  interface{}
+	parent *stackValues
+}
+
+func withStackValue(parent *stackValues, key, value interface{}) *stackValues {
+	if key == nil {
+		panic("nil key")
+	}
+	if !reflect.TypeOf(key).Comparable() {
+		panic("key is not comparable")
+	}
+	return &stackValues{key: key, value: value, parent: parent}
+}
+
+func (m *stackValues) Value(key interface{}) interface{} {
+	if key == m.key {
+		return m.value
+	}
+
+	if m.parent == nil {
+		return nil
+	}
+
+	return m.parent.Value(key)
+}
+
+func (c *stackValues) String() string {
+	var str strings.Builder
+
+	cc := c
+	for cc == nil {
+		str.WriteString("(" +
+			reflect.TypeOf(c.key).String() +
+			": " +
+			stringify(cc.value) +
+			")")
+		if cc.parent != nil {
+			str.WriteString(" -> ")
+		}
+		cc = cc.parent
+	}
+	str.WriteRune('}')
+
+	return str.String()
+}
+
+type stringer interface {
+	String() string
+}
+
+// stringify tries a bit to stringify v, without using fmt, since we don't
+// want context depending on the unicode tables. This is only used by
+// *valueCtx.String().
+func stringify(v interface{}) string {
+	switch s := v.(type) {
+	case stringer:
+		return s.String()
+	case string:
+		return s
+	}
+	return "<not Stringer>"
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_build.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_build.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_build.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_build.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,211 @@
+package middleware
+
+import (
+	"context"
+)
+
+// BuildInput provides the input parameters for the BuildMiddleware to consume.
+// BuildMiddleware may modify the Request value before forwarding the input
+// along to the next BuildHandler.
+type BuildInput struct {
+	Request interface{}
+}
+
+// BuildOutput provides the result returned by the next BuildHandler.
+type BuildOutput struct {
+	Result interface{}
+}
+
+// BuildHandler provides the interface for the next handler the
+// BuildMiddleware will call in the middleware chain.
+type BuildHandler interface {
+	HandleBuild(ctx context.Context, in BuildInput) (
+		out BuildOutput, metadata Metadata, err error,
+	)
+}
+
+// BuildMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next BuildHandler for further
+// processing.
+type BuildMiddleware interface {
+	// Unique ID for the middleware in theBuildStep. The step does not allow
+	// duplicate IDs.
+	ID() string
+
+	// Invokes the middleware behavior which must delegate to the next handler
+	// for the middleware chain to continue. The method must return a result or
+	// error to its caller.
+	HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) (
+		out BuildOutput, metadata Metadata, err error,
+	)
+}
+
+// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided,
+// and the func to be invoked.
+func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware {
+	return buildMiddlewareFunc{
+		id: id,
+		fn: fn,
+	}
+}
+
+type buildMiddlewareFunc struct {
+	// Unique ID for the middleware.
+	id string
+
+	// Middleware function to be called.
+	fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)
+}
+
+// ID returns the unique ID for the middleware.
+func (s buildMiddlewareFunc) ID() string { return s.id }
+
+// HandleBuild invokes the middleware Fn.
+func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) (
+	out BuildOutput, metadata Metadata, err error,
+) {
+	return s.fn(ctx, in, next)
+}
+
+var _ BuildMiddleware = (buildMiddlewareFunc{})
+
+// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on
+// a handler.
+type BuildStep struct {
+	ids *orderedIDs
+}
+
+// NewBuildStep returns a BuildStep ready to have middleware for
+// initialization added to it.
+func NewBuildStep() *BuildStep {
+	return &BuildStep{
+		ids: newOrderedIDs(),
+	}
+}
+
+var _ Middleware = (*BuildStep)(nil)
+
+// ID returns the unique name of the step as a middleware.
+func (s *BuildStep) ID() string {
+	return "Build stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+	out interface{}, metadata Metadata, err error,
+) {
+	order := s.ids.GetOrder()
+
+	var h BuildHandler = buildWrapHandler{Next: next}
+	for i := len(order) - 1; i >= 0; i-- {
+		h = decoratedBuildHandler{
+			Next: h,
+			With: order[i].(BuildMiddleware),
+		}
+	}
+
+	sIn := BuildInput{
+		Request: in,
+	}
+
+	res, metadata, err := h.HandleBuild(ctx, sIn)
+	return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *BuildStep) Get(id string) (BuildMiddleware, bool) {
+	get, ok := s.ids.Get(id)
+	if !ok {
+		return nil, false
+	}
+	return get.(BuildMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error {
+	return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware id.
+// Returns an error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error {
+	return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or an error if the middleware to be removed
+// doesn't exist.
+func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) {
+	removed, err := s.ids.Swap(id, m)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(BuildMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *BuildStep) Remove(id string) (BuildMiddleware, error) {
+	removed, err := s.ids.Remove(id)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(BuildMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *BuildStep) List() []string {
+	return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *BuildStep) Clear() {
+	s.ids.Clear()
+}
+
+type buildWrapHandler struct {
+	Next Handler
+}
+
+var _ BuildHandler = (*buildWrapHandler)(nil)
+
+// Implements BuildHandler, converts types and delegates to underlying
+// generic handler.
+func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) (
+	out BuildOutput, metadata Metadata, err error,
+) {
+	res, metadata, err := w.Next.Handle(ctx, in.Request)
+	return BuildOutput{
+		Result: res,
+	}, metadata, err
+}
+
+type decoratedBuildHandler struct {
+	Next BuildHandler
+	With BuildMiddleware
+}
+
+var _ BuildHandler = (*decoratedBuildHandler)(nil)
+
+func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) (
+	out BuildOutput, metadata Metadata, err error,
+) {
+	return h.With.HandleBuild(ctx, in, h.Next)
+}
+
+// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler.
+type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error)
+
+// HandleBuild invokes the wrapped function with the provided arguments.
+func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) {
+	return b(ctx, in)
+}
+
+var _ BuildHandler = BuildHandlerFunc(nil)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,217 @@
+package middleware
+
+import (
+	"context"
+)
+
+// DeserializeInput provides the input parameters for the DeserializeInput to
+// consume. DeserializeMiddleware should not modify the Request, and instead
+// forward it along to the next DeserializeHandler.
+type DeserializeInput struct {
+	Request interface{}
+}
+
+// DeserializeOutput provides the result returned by the next
+// DeserializeHandler. The DeserializeMiddleware should deserialize the
+// RawResponse into a Result that can be consumed by middleware higher up in
+// the stack.
+type DeserializeOutput struct {
+	RawResponse interface{}
+	Result      interface{}
+}
+
+// DeserializeHandler provides the interface for the next handler the
+// DeserializeMiddleware will call in the middleware chain.
+type DeserializeHandler interface {
+	HandleDeserialize(ctx context.Context, in DeserializeInput) (
+		out DeserializeOutput, metadata Metadata, err error,
+	)
+}
+
+// DeserializeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next DeserializeHandler for further
+// processing.
+type DeserializeMiddleware interface {
+	// ID returns a unique ID for the middleware in the DeserializeStep. The step does not
+	// allow duplicate IDs.
+	ID() string
+
+	// HandleDeserialize invokes the middleware behavior which must delegate to the next handler
+	// for the middleware chain to continue. The method must return a result or
+	// error to its caller.
+	HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) (
+		out DeserializeOutput, metadata Metadata, err error,
+	)
+}
+
+// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware {
+	return deserializeMiddlewareFunc{
+		id: id,
+		fn: fn,
+	}
+}
+
+type deserializeMiddlewareFunc struct {
+	// Unique ID for the middleware.
+	id string
+
+	// Middleware function to be called.
+	fn func(context.Context, DeserializeInput, DeserializeHandler) (
+		DeserializeOutput, Metadata, error,
+	)
+}
+
+// ID returns the unique ID for the middleware.
+func (s deserializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleDeserialize invokes the middleware Fn.
+func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) (
+	out DeserializeOutput, metadata Metadata, err error,
+) {
+	return s.fn(ctx, in, next)
+}
+
+var _ DeserializeMiddleware = (deserializeMiddlewareFunc{})
+
+// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be
+// invoked on a handler.
+type DeserializeStep struct {
+	ids *orderedIDs
+}
+
+// NewDeserializeStep returns a DeserializeStep ready to have middleware for
+// initialization added to it.
+func NewDeserializeStep() *DeserializeStep {
+	return &DeserializeStep{
+		ids: newOrderedIDs(),
+	}
+}
+
+var _ Middleware = (*DeserializeStep)(nil)
+
+// ID returns the unique ID of the step as a middleware.
+func (s *DeserializeStep) ID() string {
+	return "Deserialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+	out interface{}, metadata Metadata, err error,
+) {
+	order := s.ids.GetOrder()
+
+	var h DeserializeHandler = deserializeWrapHandler{Next: next}
+	for i := len(order) - 1; i >= 0; i-- {
+		h = decoratedDeserializeHandler{
+			Next: h,
+			With: order[i].(DeserializeMiddleware),
+		}
+	}
+
+	sIn := DeserializeInput{
+		Request: in,
+	}
+
+	res, metadata, err := h.HandleDeserialize(ctx, sIn)
+	return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) {
+	get, ok := s.ids.Get(id)
+	if !ok {
+		return nil, false
+	}
+	return get.(DeserializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error {
+	return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware ID.
+// Returns error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error {
+	return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) {
+	removed, err := s.ids.Swap(id, m)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(DeserializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) {
+	removed, err := s.ids.Remove(id)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(DeserializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *DeserializeStep) List() []string {
+	return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *DeserializeStep) Clear() {
+	s.ids.Clear()
+}
+
+type deserializeWrapHandler struct {
+	Next Handler
+}
+
+var _ DeserializeHandler = (*deserializeWrapHandler)(nil)
+
+// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) (
+	out DeserializeOutput, metadata Metadata, err error,
+) {
+	resp, metadata, err := w.Next.Handle(ctx, in.Request)
+	return DeserializeOutput{
+		RawResponse: resp,
+	}, metadata, err
+}
+
+type decoratedDeserializeHandler struct {
+	Next DeserializeHandler
+	With DeserializeMiddleware
+}
+
+var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil)
+
+func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) (
+	out DeserializeOutput, metadata Metadata, err error,
+) {
+	return h.With.HandleDeserialize(ctx, in, h.Next)
+}
+
+// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler.
+type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error)
+
+// HandleDeserialize invokes the wrapped function with the given arguments.
+func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) {
+	return d(ctx, in)
+}
+
+var _ DeserializeHandler = DeserializeHandlerFunc(nil)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_finalize.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_finalize.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_finalize.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_finalize.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,211 @@
+package middleware
+
+import "context"
+
+// FinalizeInput provides the input parameters for the FinalizeMiddleware to
+// consume. FinalizeMiddleware may modify the Request value before forwarding
+// the FinalizeInput along to the next next FinalizeHandler.
+type FinalizeInput struct {
+	Request interface{}
+}
+
+// FinalizeOutput provides the result returned by the next FinalizeHandler.
+type FinalizeOutput struct {
+	Result interface{}
+}
+
+// FinalizeHandler provides the interface for the next handler the
+// FinalizeMiddleware will call in the middleware chain.
+type FinalizeHandler interface {
+	HandleFinalize(ctx context.Context, in FinalizeInput) (
+		out FinalizeOutput, metadata Metadata, err error,
+	)
+}
+
+// FinalizeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next FinalizeHandler for further
+// processing.
+type FinalizeMiddleware interface {
+	// ID returns a unique ID for the middleware in the FinalizeStep. The step does not
+	// allow duplicate IDs.
+	ID() string
+
+	// HandleFinalize invokes the middleware behavior which must delegate to the next handler
+	// for the middleware chain to continue. The method must return a result or
+	// error to its caller.
+	HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) (
+		out FinalizeOutput, metadata Metadata, err error,
+	)
+}
+
+// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware {
+	return finalizeMiddlewareFunc{
+		id: id,
+		fn: fn,
+	}
+}
+
+type finalizeMiddlewareFunc struct {
+	// Unique ID for the middleware.
+	id string
+
+	// Middleware function to be called.
+	fn func(context.Context, FinalizeInput, FinalizeHandler) (
+		FinalizeOutput, Metadata, error,
+	)
+}
+
+// ID returns the unique ID for the middleware.
+func (s finalizeMiddlewareFunc) ID() string { return s.id }
+
+// HandleFinalize invokes the middleware Fn.
+func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) (
+	out FinalizeOutput, metadata Metadata, err error,
+) {
+	return s.fn(ctx, in, next)
+}
+
+var _ FinalizeMiddleware = (finalizeMiddlewareFunc{})
+
+// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be
+// invoked on a handler.
+type FinalizeStep struct {
+	ids *orderedIDs
+}
+
+// NewFinalizeStep returns a FinalizeStep ready to have middleware for
+// initialization added to it.
+func NewFinalizeStep() *FinalizeStep {
+	return &FinalizeStep{
+		ids: newOrderedIDs(),
+	}
+}
+
+var _ Middleware = (*FinalizeStep)(nil)
+
+// ID returns the unique id of the step as a middleware.
+func (s *FinalizeStep) ID() string {
+	return "Finalize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+	out interface{}, metadata Metadata, err error,
+) {
+	order := s.ids.GetOrder()
+
+	var h FinalizeHandler = finalizeWrapHandler{Next: next}
+	for i := len(order) - 1; i >= 0; i-- {
+		h = decoratedFinalizeHandler{
+			Next: h,
+			With: order[i].(FinalizeMiddleware),
+		}
+	}
+
+	sIn := FinalizeInput{
+		Request: in,
+	}
+
+	res, metadata, err := h.HandleFinalize(ctx, sIn)
+	return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) {
+	get, ok := s.ids.Get(id)
+	if !ok {
+		return nil, false
+	}
+	return get.(FinalizeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error {
+	return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware ID.
+// Returns error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error {
+	return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) {
+	removed, err := s.ids.Swap(id, m)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(FinalizeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) {
+	removed, err := s.ids.Remove(id)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(FinalizeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *FinalizeStep) List() []string {
+	return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *FinalizeStep) Clear() {
+	s.ids.Clear()
+}
+
+type finalizeWrapHandler struct {
+	Next Handler
+}
+
+var _ FinalizeHandler = (*finalizeWrapHandler)(nil)
+
+// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying
+// generic handler.
+func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) (
+	out FinalizeOutput, metadata Metadata, err error,
+) {
+	res, metadata, err := w.Next.Handle(ctx, in.Request)
+	return FinalizeOutput{
+		Result: res,
+	}, metadata, err
+}
+
+type decoratedFinalizeHandler struct {
+	Next FinalizeHandler
+	With FinalizeMiddleware
+}
+
+var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil)
+
+func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) (
+	out FinalizeOutput, metadata Metadata, err error,
+) {
+	return h.With.HandleFinalize(ctx, in, h.Next)
+}
+
+// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler.
+type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error)
+
+// HandleFinalize invokes the wrapped function with the given arguments.
+func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) {
+	return f(ctx, in)
+}
+
+var _ FinalizeHandler = FinalizeHandlerFunc(nil)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_initialize.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_initialize.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_initialize.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_initialize.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,211 @@
+package middleware
+
+import "context"
+
+// InitializeInput wraps the input parameters for the InitializeMiddlewares to
+// consume. InitializeMiddleware may modify the parameter value before
+// forwarding it along to the next InitializeHandler.
+type InitializeInput struct {
+	Parameters interface{}
+}
+
+// InitializeOutput provides the result returned by the next InitializeHandler.
+type InitializeOutput struct {
+	Result interface{}
+}
+
+// InitializeHandler provides the interface for the next handler the
+// InitializeMiddleware will call in the middleware chain.
+type InitializeHandler interface {
+	HandleInitialize(ctx context.Context, in InitializeInput) (
+		out InitializeOutput, metadata Metadata, err error,
+	)
+}
+
+// InitializeMiddleware provides the interface for middleware specific to the
+// initialize step. Delegates to the next InitializeHandler for further
+// processing.
+type InitializeMiddleware interface {
+	// ID returns a unique ID for the middleware in the InitializeStep. The step does not
+	// allow duplicate IDs.
+	ID() string
+
+	// HandleInitialize invokes the middleware behavior which must delegate to the next handler
+	// for the middleware chain to continue. The method must return a result or
+	// error to its caller.
+	HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+		out InitializeOutput, metadata Metadata, err error,
+	)
+}
+
+// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided,
+// and the func to be invoked.
+func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware {
+	return initializeMiddlewareFunc{
+		id: id,
+		fn: fn,
+	}
+}
+
+type initializeMiddlewareFunc struct {
+	// Unique ID for the middleware.
+	id string
+
+	// Middleware function to be called.
+	fn func(context.Context, InitializeInput, InitializeHandler) (
+		InitializeOutput, Metadata, error,
+	)
+}
+
+// ID returns the unique ID for the middleware.
+func (s initializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleInitialize invokes the middleware Fn.
+func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+	out InitializeOutput, metadata Metadata, err error,
+) {
+	return s.fn(ctx, in, next)
+}
+
+var _ InitializeMiddleware = (initializeMiddlewareFunc{})
+
+// InitializeStep provides the ordered grouping of InitializeMiddleware to be
+// invoked on a handler.
+type InitializeStep struct {
+	ids *orderedIDs
+}
+
+// NewInitializeStep returns an InitializeStep ready to have middleware for
+// initialization added to it.
+func NewInitializeStep() *InitializeStep {
+	return &InitializeStep{
+		ids: newOrderedIDs(),
+	}
+}
+
+var _ Middleware = (*InitializeStep)(nil)
+
+// ID returns the unique ID of the step as a middleware.
+func (s *InitializeStep) ID() string {
+	return "Initialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+	out interface{}, metadata Metadata, err error,
+) {
+	order := s.ids.GetOrder()
+
+	var h InitializeHandler = initializeWrapHandler{Next: next}
+	for i := len(order) - 1; i >= 0; i-- {
+		h = decoratedInitializeHandler{
+			Next: h,
+			With: order[i].(InitializeMiddleware),
+		}
+	}
+
+	sIn := InitializeInput{
+		Parameters: in,
+	}
+
+	res, metadata, err := h.HandleInitialize(ctx, sIn)
+	return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) {
+	get, ok := s.ids.Get(id)
+	if !ok {
+		return nil, false
+	}
+	return get.(InitializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error {
+	return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware ID.
+// Returns error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error {
+	return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) {
+	removed, err := s.ids.Swap(id, m)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(InitializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) {
+	removed, err := s.ids.Remove(id)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(InitializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *InitializeStep) List() []string {
+	return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *InitializeStep) Clear() {
+	s.ids.Clear()
+}
+
+type initializeWrapHandler struct {
+	Next Handler
+}
+
+var _ InitializeHandler = (*initializeWrapHandler)(nil)
+
+// HandleInitialize implements InitializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) (
+	out InitializeOutput, metadata Metadata, err error,
+) {
+	res, metadata, err := w.Next.Handle(ctx, in.Parameters)
+	return InitializeOutput{
+		Result: res,
+	}, metadata, err
+}
+
+type decoratedInitializeHandler struct {
+	Next InitializeHandler
+	With InitializeMiddleware
+}
+
+var _ InitializeHandler = (*decoratedInitializeHandler)(nil)
+
+func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) (
+	out InitializeOutput, metadata Metadata, err error,
+) {
+	return h.With.HandleInitialize(ctx, in, h.Next)
+}
+
+// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler.
+type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error)
+
+// HandleInitialize calls the wrapped function with the provided arguments.
+func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) {
+	return i(ctx, in)
+}
+
+var _ InitializeHandler = InitializeHandlerFunc(nil)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_serialize.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_serialize.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/middleware/step_serialize.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/middleware/step_serialize.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,219 @@
+package middleware
+
+import "context"
+
+// SerializeInput provides the input parameters for the SerializeMiddleware to
+// consume. SerializeMiddleware may modify the Request value before forwarding
+// SerializeInput along to the next SerializeHandler. The Parameters member
+// should not be modified by SerializeMiddleware, InitializeMiddleware should
+// be responsible for modifying the provided Parameter value.
+type SerializeInput struct {
+	Parameters interface{}
+	Request    interface{}
+}
+
+// SerializeOutput provides the result returned by the next SerializeHandler.
+type SerializeOutput struct {
+	Result interface{}
+}
+
+// SerializeHandler provides the interface for the next handler the
+// SerializeMiddleware will call in the middleware chain.
+type SerializeHandler interface {
+	HandleSerialize(ctx context.Context, in SerializeInput) (
+		out SerializeOutput, metadata Metadata, err error,
+	)
+}
+
+// SerializeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next SerializeHandler for further
+// processing.
+type SerializeMiddleware interface {
+	// ID returns a unique ID for the middleware in the SerializeStep. The step does not
+	// allow duplicate IDs.
+	ID() string
+
+	// HandleSerialize invokes the middleware behavior which must delegate to the next handler
+	// for the middleware chain to continue. The method must return a result or
+	// error to its caller.
+	HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) (
+		out SerializeOutput, metadata Metadata, err error,
+	)
+}
+
+// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware {
+	return serializeMiddlewareFunc{
+		id: id,
+		fn: fn,
+	}
+}
+
+type serializeMiddlewareFunc struct {
+	// Unique ID for the middleware.
+	id string
+
+	// Middleware function to be called.
+	fn func(context.Context, SerializeInput, SerializeHandler) (
+		SerializeOutput, Metadata, error,
+	)
+}
+
+// ID returns the unique ID for the middleware.
+func (s serializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleSerialize invokes the middleware Fn.
+func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) (
+	out SerializeOutput, metadata Metadata, err error,
+) {
+	return s.fn(ctx, in, next)
+}
+
+var _ SerializeMiddleware = (serializeMiddlewareFunc{})
+
+// SerializeStep provides the ordered grouping of SerializeMiddleware to be
+// invoked on a handler.
+type SerializeStep struct {
+	newRequest func() interface{}
+	ids        *orderedIDs
+}
+
+// NewSerializeStep returns a SerializeStep ready to have middleware for
+// initialization added to it. The newRequest func parameter is used to
+// initialize the transport specific request for the stack SerializeStep to
+// serialize the input parameters into.
+func NewSerializeStep(newRequest func() interface{}) *SerializeStep {
+	return &SerializeStep{
+		ids:        newOrderedIDs(),
+		newRequest: newRequest,
+	}
+}
+
+var _ Middleware = (*SerializeStep)(nil)
+
+// ID returns the unique ID of the step as a middleware.
+func (s *SerializeStep) ID() string {
+	return "Serialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+	out interface{}, metadata Metadata, err error,
+) {
+	order := s.ids.GetOrder()
+
+	var h SerializeHandler = serializeWrapHandler{Next: next}
+	for i := len(order) - 1; i >= 0; i-- {
+		h = decoratedSerializeHandler{
+			Next: h,
+			With: order[i].(SerializeMiddleware),
+		}
+	}
+
+	sIn := SerializeInput{
+		Parameters: in,
+		Request:    s.newRequest(),
+	}
+
+	res, metadata, err := h.HandleSerialize(ctx, sIn)
+	return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) {
+	get, ok := s.ids.Get(id)
+	if !ok {
+		return nil, false
+	}
+	return get.(SerializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error {
+	return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware ID.
+// Returns error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error {
+	return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) {
+	removed, err := s.ids.Swap(id, m)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(SerializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) {
+	removed, err := s.ids.Remove(id)
+	if err != nil {
+		return nil, err
+	}
+
+	return removed.(SerializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *SerializeStep) List() []string {
+	return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *SerializeStep) Clear() {
+	s.ids.Clear()
+}
+
+type serializeWrapHandler struct {
+	Next Handler
+}
+
+var _ SerializeHandler = (*serializeWrapHandler)(nil)
+
+// Implements SerializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) (
+	out SerializeOutput, metadata Metadata, err error,
+) {
+	res, metadata, err := w.Next.Handle(ctx, in.Request)
+	return SerializeOutput{
+		Result: res,
+	}, metadata, err
+}
+
+type decoratedSerializeHandler struct {
+	Next SerializeHandler
+	With SerializeMiddleware
+}
+
+var _ SerializeHandler = (*decoratedSerializeHandler)(nil)
+
+func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) (
+	out SerializeOutput, metadata Metadata, err error,
+) {
+	return h.With.HandleSerialize(ctx, in, h.Next)
+}
+
+// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler.
+type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error)
+
+// HandleSerialize calls the wrapped function with the provided arguments.
+func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) {
+	return s(ctx, in)
+}
+
+var _ SerializeHandler = SerializeHandlerFunc(nil)
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/modman.toml 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/modman.toml
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/modman.toml	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/modman.toml	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,10 @@
+[dependencies]
+  "github.com/jmespath/go-jmespath" = "v0.4.0"
+
+[modules]
+
+  [modules.codegen]
+    no_tag = true
+
+  [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"]
+    no_tag = true
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,30 @@
+package requestcompression
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io"
+)
+
+func gzipCompress(input io.Reader) ([]byte, error) {
+	var b bytes.Buffer
+	w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create gzip writer, %v", err)
+	}
+
+	inBytes, err := io.ReadAll(input)
+	if err != nil {
+		return nil, fmt.Errorf("failed read payload to compress, %v", err)
+	}
+
+	if _, err = w.Write(inBytes); err != nil {
+		return nil, fmt.Errorf("failed to write payload to be compressed, %v", err)
+	}
+	if err = w.Close(); err != nil {
+		return nil, fmt.Errorf("failed to flush payload being compressed, %v", err)
+	}
+
+	return b.Bytes(), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,52 @@
+package requestcompression
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"github.com/aws/smithy-go/middleware"
+	smithyhttp "github.com/aws/smithy-go/transport/http"
+	"io"
+	"net/http"
+)
+
+const captureUncompressedRequestID = "CaptureUncompressedRequest"
+
+// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check
+func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error {
+	return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{
+		buf: buf,
+	}, "RequestCompression", middleware.Before)
+}
+
+type captureUncompressedRequestMiddleware struct {
+	req   *http.Request
+	buf   *bytes.Buffer
+	bytes []byte
+}
+
+// ID returns id of the captureUncompressedRequestMiddleware
+func (*captureUncompressedRequestMiddleware) ID() string {
+	return captureUncompressedRequestID
+}
+
+// HandleSerialize captures request payload before it is compressed by request compression middleware
+func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+	output middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	request, ok := input.Request.(*smithyhttp.Request)
+	if !ok {
+		return output, metadata, fmt.Errorf("error when retrieving http request")
+	}
+
+	_, err = io.Copy(m.buf, request.GetStream())
+	if err != nil {
+		return output, metadata, fmt.Errorf("error when copying http request stream: %q", err)
+	}
+	if err = request.RewindStream(); err != nil {
+		return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err)
+	}
+
+	return next.HandleSerialize(ctx, input)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,103 @@
+// Package requestcompression implements runtime support for smithy-modeled
+// request compression.
+//
+// This package is designated as private and is intended for use only by the
+// smithy client runtime. The exported API therein is not considered stable and
+// is subject to breaking changes without notice.
+package requestcompression
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"github.com/aws/smithy-go/middleware"
+	"github.com/aws/smithy-go/transport/http"
+	"io"
+)
+
+const MaxRequestMinCompressSizeBytes = 10485760
+
+// Enumeration values for supported compress Algorithms.
+const (
+	GZIP = "gzip"
+)
+
+type compressFunc func(io.Reader) ([]byte, error)
+
+var allowedAlgorithms = map[string]compressFunc{
+	GZIP: gzipCompress,
+}
+
+// AddRequestCompression add requestCompression middleware to op stack
+func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error {
+	return stack.Serialize.Add(&requestCompression{
+		disableRequestCompression:   disabled,
+		requestMinCompressSizeBytes: minBytes,
+		compressAlgorithms:          algorithms,
+	}, middleware.After)
+}
+
+type requestCompression struct {
+	disableRequestCompression   bool
+	requestMinCompressSizeBytes int64
+	compressAlgorithms          []string
+}
+
+// ID returns the ID of the middleware
+func (m requestCompression) ID() string {
+	return "RequestCompression"
+}
+
+// HandleSerialize gzip compress the request's stream/body if enabled by config fields
+func (m requestCompression) HandleSerialize(
+	ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+	out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+	if m.disableRequestCompression {
+		return next.HandleSerialize(ctx, in)
+	}
+	// still need to check requestMinCompressSizeBytes in case it is out of range after service client config
+	if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes {
+		return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes)
+	}
+
+	req, ok := in.Request.(*http.Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown request type %T", req)
+	}
+
+	for _, algorithm := range m.compressAlgorithms {
+		compressFunc := allowedAlgorithms[algorithm]
+		if compressFunc != nil {
+			if stream := req.GetStream(); stream != nil {
+				size, found, err := req.StreamLength()
+				if err != nil {
+					return out, metadata, fmt.Errorf("error while finding request stream length, %v", err)
+				} else if !found || size < m.requestMinCompressSizeBytes {
+					return next.HandleSerialize(ctx, in)
+				}
+
+				compressedBytes, err := compressFunc(stream)
+				if err != nil {
+					return out, metadata, fmt.Errorf("failed to compress request stream, %v", err)
+				}
+
+				var newReq *http.Request
+				if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil {
+					return out, metadata, fmt.Errorf("failed to set request stream, %v", err)
+				}
+				*req = *newReq
+
+				if val := req.Header.Get("Content-Encoding"); val != "" {
+					req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm))
+				} else {
+					req.Header.Set("Content-Encoding", algorithm)
+				}
+			}
+			break
+		}
+	}
+
+	return next.HandleSerialize(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/properties.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/properties.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/properties.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/properties.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,62 @@
+package smithy
+
+// PropertiesReader provides an interface for reading metadata from the
+// underlying metadata container.
+type PropertiesReader interface {
+	Get(key interface{}) interface{}
+}
+
+// Properties provides storing and reading metadata values. Keys may be any
+// comparable value type. Get and Set will panic if a key is not comparable.
+//
+// The zero value for a Properties instance is ready for reads/writes without
+// any additional initialization.
+type Properties struct {
+	values map[interface{}]interface{}
+}
+
+// Get attempts to retrieve the value the key points to. Returns nil if the
+// key was not found.
+//
+// Panics if key type is not comparable.
+func (m *Properties) Get(key interface{}) interface{} {
+	m.lazyInit()
+	return m.values[key]
+}
+
+// Set stores the value pointed to by the key. If a value already exists at
+// that key it will be replaced with the new value.
+//
+// Panics if the key type is not comparable.
+func (m *Properties) Set(key, value interface{}) {
+	m.lazyInit()
+	m.values[key] = value
+}
+
+// Has returns whether the key exists in the metadata.
+//
+// Panics if the key type is not comparable.
+func (m *Properties) Has(key interface{}) bool {
+	m.lazyInit()
+	_, ok := m.values[key]
+	return ok
+}
+
+// SetAll accepts all of the given Properties into the receiver, overwriting
+// any existing keys in the case of conflicts.
+func (m *Properties) SetAll(other *Properties) {
+	if other.values == nil {
+		return
+	}
+
+	m.lazyInit()
+	for k, v := range other.values {
+		m.values[k] = v
+	}
+}
+
+func (m *Properties) lazyInit() {
+	if m.values == nil {
+		m.values = map[interface{}]interface{}{}
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/ptr/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/ptr/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/ptr/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/ptr/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,5 @@
+// Package ptr provides utilities for converting scalar literal type values to and from pointers inline.
+package ptr
+
+//go:generate go run -tags codegen generate.go
+//go:generate gofmt -w -s .
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/ptr/from_ptr.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/ptr/from_ptr.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/ptr/from_ptr.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/ptr/from_ptr.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,601 @@
+// Code generated by smithy-go/ptr/generate.go DO NOT EDIT.
+package ptr
+
+import (
+	"time"
+)
+
+// ToBool returns bool value dereferenced if the passed
+// in pointer was not nil. Returns a bool zero value if the
+// pointer was nil.
+func ToBool(p *bool) (v bool) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToBoolSlice returns a slice of bool values, that are
+// dereferenced if the passed in pointer was not nil. Returns a bool
+// zero value if the pointer was nil.
+func ToBoolSlice(vs []*bool) []bool {
+	ps := make([]bool, len(vs))
+	for i, v := range vs {
+		ps[i] = ToBool(v)
+	}
+
+	return ps
+}
+
+// ToBoolMap returns a map of bool values, that are
+// dereferenced if the passed in pointer was not nil. The bool
+// zero value is used if the pointer was nil.
+func ToBoolMap(vs map[string]*bool) map[string]bool {
+	ps := make(map[string]bool, len(vs))
+	for k, v := range vs {
+		ps[k] = ToBool(v)
+	}
+
+	return ps
+}
+
+// ToByte returns byte value dereferenced if the passed
+// in pointer was not nil. Returns a byte zero value if the
+// pointer was nil.
+func ToByte(p *byte) (v byte) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToByteSlice returns a slice of byte values, that are
+// dereferenced if the passed in pointer was not nil. Returns a byte
+// zero value if the pointer was nil.
+func ToByteSlice(vs []*byte) []byte {
+	ps := make([]byte, len(vs))
+	for i, v := range vs {
+		ps[i] = ToByte(v)
+	}
+
+	return ps
+}
+
+// ToByteMap returns a map of byte values, that are
+// dereferenced if the passed in pointer was not nil. The byte
+// zero value is used if the pointer was nil.
+func ToByteMap(vs map[string]*byte) map[string]byte {
+	ps := make(map[string]byte, len(vs))
+	for k, v := range vs {
+		ps[k] = ToByte(v)
+	}
+
+	return ps
+}
+
+// ToString returns string value dereferenced if the passed
+// in pointer was not nil. Returns a string zero value if the
+// pointer was nil.
+func ToString(p *string) (v string) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToStringSlice returns a slice of string values, that are
+// dereferenced if the passed in pointer was not nil. Returns a string
+// zero value if the pointer was nil.
+func ToStringSlice(vs []*string) []string {
+	ps := make([]string, len(vs))
+	for i, v := range vs {
+		ps[i] = ToString(v)
+	}
+
+	return ps
+}
+
+// ToStringMap returns a map of string values, that are
+// dereferenced if the passed in pointer was not nil. The string
+// zero value is used if the pointer was nil.
+func ToStringMap(vs map[string]*string) map[string]string {
+	ps := make(map[string]string, len(vs))
+	for k, v := range vs {
+		ps[k] = ToString(v)
+	}
+
+	return ps
+}
+
+// ToInt returns int value dereferenced if the passed
+// in pointer was not nil. Returns a int zero value if the
+// pointer was nil.
+func ToInt(p *int) (v int) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToIntSlice returns a slice of int values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int
+// zero value if the pointer was nil.
+func ToIntSlice(vs []*int) []int {
+	ps := make([]int, len(vs))
+	for i, v := range vs {
+		ps[i] = ToInt(v)
+	}
+
+	return ps
+}
+
+// ToIntMap returns a map of int values, that are
+// dereferenced if the passed in pointer was not nil. The int
+// zero value is used if the pointer was nil.
+func ToIntMap(vs map[string]*int) map[string]int {
+	ps := make(map[string]int, len(vs))
+	for k, v := range vs {
+		ps[k] = ToInt(v)
+	}
+
+	return ps
+}
+
+// ToInt8 returns int8 value dereferenced if the passed
+// in pointer was not nil. Returns a int8 zero value if the
+// pointer was nil.
+func ToInt8(p *int8) (v int8) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToInt8Slice returns a slice of int8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int8
+// zero value if the pointer was nil.
+func ToInt8Slice(vs []*int8) []int8 {
+	ps := make([]int8, len(vs))
+	for i, v := range vs {
+		ps[i] = ToInt8(v)
+	}
+
+	return ps
+}
+
+// ToInt8Map returns a map of int8 values, that are
+// dereferenced if the passed in pointer was not nil. The int8
+// zero value is used if the pointer was nil.
+func ToInt8Map(vs map[string]*int8) map[string]int8 {
+	ps := make(map[string]int8, len(vs))
+	for k, v := range vs {
+		ps[k] = ToInt8(v)
+	}
+
+	return ps
+}
+
+// ToInt16 returns int16 value dereferenced if the passed
+// in pointer was not nil. Returns a int16 zero value if the
+// pointer was nil.
+func ToInt16(p *int16) (v int16) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToInt16Slice returns a slice of int16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int16
+// zero value if the pointer was nil.
+func ToInt16Slice(vs []*int16) []int16 {
+	ps := make([]int16, len(vs))
+	for i, v := range vs {
+		ps[i] = ToInt16(v)
+	}
+
+	return ps
+}
+
+// ToInt16Map returns a map of int16 values, that are
+// dereferenced if the passed in pointer was not nil. The int16
+// zero value is used if the pointer was nil.
+func ToInt16Map(vs map[string]*int16) map[string]int16 {
+	ps := make(map[string]int16, len(vs))
+	for k, v := range vs {
+		ps[k] = ToInt16(v)
+	}
+
+	return ps
+}
+
+// ToInt32 returns int32 value dereferenced if the passed
+// in pointer was not nil. Returns a int32 zero value if the
+// pointer was nil.
+func ToInt32(p *int32) (v int32) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToInt32Slice returns a slice of int32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int32
+// zero value if the pointer was nil.
+func ToInt32Slice(vs []*int32) []int32 {
+	ps := make([]int32, len(vs))
+	for i, v := range vs {
+		ps[i] = ToInt32(v)
+	}
+
+	return ps
+}
+
+// ToInt32Map returns a map of int32 values, that are
+// dereferenced if the passed in pointer was not nil. The int32
+// zero value is used if the pointer was nil.
+func ToInt32Map(vs map[string]*int32) map[string]int32 {
+	ps := make(map[string]int32, len(vs))
+	for k, v := range vs {
+		ps[k] = ToInt32(v)
+	}
+
+	return ps
+}
+
+// ToInt64 returns int64 value dereferenced if the passed
+// in pointer was not nil. Returns a int64 zero value if the
+// pointer was nil.
+func ToInt64(p *int64) (v int64) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToInt64Slice returns a slice of int64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int64
+// zero value if the pointer was nil.
+func ToInt64Slice(vs []*int64) []int64 {
+	ps := make([]int64, len(vs))
+	for i, v := range vs {
+		ps[i] = ToInt64(v)
+	}
+
+	return ps
+}
+
+// ToInt64Map returns a map of int64 values, that are
+// dereferenced if the passed in pointer was not nil. The int64
+// zero value is used if the pointer was nil.
+func ToInt64Map(vs map[string]*int64) map[string]int64 {
+	ps := make(map[string]int64, len(vs))
+	for k, v := range vs {
+		ps[k] = ToInt64(v)
+	}
+
+	return ps
+}
+
+// ToUint returns uint value dereferenced if the passed
+// in pointer was not nil. Returns a uint zero value if the
+// pointer was nil.
+func ToUint(p *uint) (v uint) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToUintSlice returns a slice of uint values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint
+// zero value if the pointer was nil.
+func ToUintSlice(vs []*uint) []uint {
+	ps := make([]uint, len(vs))
+	for i, v := range vs {
+		ps[i] = ToUint(v)
+	}
+
+	return ps
+}
+
+// ToUintMap returns a map of uint values, that are
+// dereferenced if the passed in pointer was not nil. The uint
+// zero value is used if the pointer was nil.
+func ToUintMap(vs map[string]*uint) map[string]uint {
+	ps := make(map[string]uint, len(vs))
+	for k, v := range vs {
+		ps[k] = ToUint(v)
+	}
+
+	return ps
+}
+
+// ToUint8 returns uint8 value dereferenced if the passed
+// in pointer was not nil. Returns a uint8 zero value if the
+// pointer was nil.
+func ToUint8(p *uint8) (v uint8) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToUint8Slice returns a slice of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint8
+// zero value if the pointer was nil.
+func ToUint8Slice(vs []*uint8) []uint8 {
+	ps := make([]uint8, len(vs))
+	for i, v := range vs {
+		ps[i] = ToUint8(v)
+	}
+
+	return ps
+}
+
+// ToUint8Map returns a map of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. The uint8
+// zero value is used if the pointer was nil.
+func ToUint8Map(vs map[string]*uint8) map[string]uint8 {
+	ps := make(map[string]uint8, len(vs))
+	for k, v := range vs {
+		ps[k] = ToUint8(v)
+	}
+
+	return ps
+}
+
+// ToUint16 returns uint16 value dereferenced if the passed
+// in pointer was not nil. Returns a uint16 zero value if the
+// pointer was nil.
+func ToUint16(p *uint16) (v uint16) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToUint16Slice returns a slice of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint16
+// zero value if the pointer was nil.
+func ToUint16Slice(vs []*uint16) []uint16 {
+	ps := make([]uint16, len(vs))
+	for i, v := range vs {
+		ps[i] = ToUint16(v)
+	}
+
+	return ps
+}
+
+// ToUint16Map returns a map of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. The uint16
+// zero value is used if the pointer was nil.
+func ToUint16Map(vs map[string]*uint16) map[string]uint16 {
+	ps := make(map[string]uint16, len(vs))
+	for k, v := range vs {
+		ps[k] = ToUint16(v)
+	}
+
+	return ps
+}
+
+// ToUint32 returns uint32 value dereferenced if the passed
+// in pointer was not nil. Returns a uint32 zero value if the
+// pointer was nil.
+func ToUint32(p *uint32) (v uint32) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToUint32Slice returns a slice of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint32
+// zero value if the pointer was nil.
+func ToUint32Slice(vs []*uint32) []uint32 {
+	ps := make([]uint32, len(vs))
+	for i, v := range vs {
+		ps[i] = ToUint32(v)
+	}
+
+	return ps
+}
+
+// ToUint32Map returns a map of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. The uint32
+// zero value is used if the pointer was nil.
+func ToUint32Map(vs map[string]*uint32) map[string]uint32 {
+	ps := make(map[string]uint32, len(vs))
+	for k, v := range vs {
+		ps[k] = ToUint32(v)
+	}
+
+	return ps
+}
+
+// ToUint64 returns uint64 value dereferenced if the passed
+// in pointer was not nil. Returns a uint64 zero value if the
+// pointer was nil.
+func ToUint64(p *uint64) (v uint64) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToUint64Slice returns a slice of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint64
+// zero value if the pointer was nil.
+func ToUint64Slice(vs []*uint64) []uint64 {
+	ps := make([]uint64, len(vs))
+	for i, v := range vs {
+		ps[i] = ToUint64(v)
+	}
+
+	return ps
+}
+
+// ToUint64Map returns a map of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. The uint64
+// zero value is used if the pointer was nil.
+func ToUint64Map(vs map[string]*uint64) map[string]uint64 {
+	ps := make(map[string]uint64, len(vs))
+	for k, v := range vs {
+		ps[k] = ToUint64(v)
+	}
+
+	return ps
+}
+
+// ToFloat32 returns float32 value dereferenced if the passed
+// in pointer was not nil. Returns a float32 zero value if the
+// pointer was nil.
+func ToFloat32(p *float32) (v float32) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToFloat32Slice returns a slice of float32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float32
+// zero value if the pointer was nil.
+func ToFloat32Slice(vs []*float32) []float32 {
+	ps := make([]float32, len(vs))
+	for i, v := range vs {
+		ps[i] = ToFloat32(v)
+	}
+
+	return ps
+}
+
+// ToFloat32Map returns a map of float32 values, that are
+// dereferenced if the passed in pointer was not nil. The float32
+// zero value is used if the pointer was nil.
+func ToFloat32Map(vs map[string]*float32) map[string]float32 {
+	ps := make(map[string]float32, len(vs))
+	for k, v := range vs {
+		ps[k] = ToFloat32(v)
+	}
+
+	return ps
+}
+
+// ToFloat64 returns float64 value dereferenced if the passed
+// in pointer was not nil. Returns a float64 zero value if the
+// pointer was nil.
+func ToFloat64(p *float64) (v float64) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToFloat64Slice returns a slice of float64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float64
+// zero value if the pointer was nil.
+func ToFloat64Slice(vs []*float64) []float64 {
+	ps := make([]float64, len(vs))
+	for i, v := range vs {
+		ps[i] = ToFloat64(v)
+	}
+
+	return ps
+}
+
+// ToFloat64Map returns a map of float64 values, that are
+// dereferenced if the passed in pointer was not nil. The float64
+// zero value is used if the pointer was nil.
+func ToFloat64Map(vs map[string]*float64) map[string]float64 {
+	ps := make(map[string]float64, len(vs))
+	for k, v := range vs {
+		ps[k] = ToFloat64(v)
+	}
+
+	return ps
+}
+
+// ToTime returns time.Time value dereferenced if the passed
+// in pointer was not nil. Returns a time.Time zero value if the
+// pointer was nil.
+func ToTime(p *time.Time) (v time.Time) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToTimeSlice returns a slice of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Time
+// zero value if the pointer was nil.
+func ToTimeSlice(vs []*time.Time) []time.Time {
+	ps := make([]time.Time, len(vs))
+	for i, v := range vs {
+		ps[i] = ToTime(v)
+	}
+
+	return ps
+}
+
+// ToTimeMap returns a map of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. The time.Time
+// zero value is used if the pointer was nil.
+func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
+	ps := make(map[string]time.Time, len(vs))
+	for k, v := range vs {
+		ps[k] = ToTime(v)
+	}
+
+	return ps
+}
+
+// ToDuration returns time.Duration value dereferenced if the passed
+// in pointer was not nil. Returns a time.Duration zero value if the
+// pointer was nil.
+func ToDuration(p *time.Duration) (v time.Duration) {
+	if p == nil {
+		return v
+	}
+
+	return *p
+}
+
+// ToDurationSlice returns a slice of time.Duration values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Duration
+// zero value if the pointer was nil.
+func ToDurationSlice(vs []*time.Duration) []time.Duration {
+	ps := make([]time.Duration, len(vs))
+	for i, v := range vs {
+		ps[i] = ToDuration(v)
+	}
+
+	return ps
+}
+
+// ToDurationMap returns a map of time.Duration values, that are
+// dereferenced if the passed in pointer was not nil. The time.Duration
+// zero value is used if the pointer was nil.
+func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration {
+	ps := make(map[string]time.Duration, len(vs))
+	for k, v := range vs {
+		ps[k] = ToDuration(v)
+	}
+
+	return ps
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,83 @@
+//go:build codegen
+// +build codegen
+
+package ptr
+
+import "strings"
+
+func GetScalars() Scalars {
+	return Scalars{
+		{Type: "bool"},
+		{Type: "byte"},
+		{Type: "string"},
+		{Type: "int"},
+		{Type: "int8"},
+		{Type: "int16"},
+		{Type: "int32"},
+		{Type: "int64"},
+		{Type: "uint"},
+		{Type: "uint8"},
+		{Type: "uint16"},
+		{Type: "uint32"},
+		{Type: "uint64"},
+		{Type: "float32"},
+		{Type: "float64"},
+		{Type: "Time", Import: &Import{Path: "time"}},
+		{Type: "Duration", Import: &Import{Path: "time"}},
+	}
+}
+
+// Import provides the import path and optional alias
+type Import struct {
+	Path  string
+	Alias string
+}
+
+// Package returns the Go package name for the import. Returns alias if set.
+func (i Import) Package() string {
+	if v := i.Alias; len(v) != 0 {
+		return v
+	}
+
+	if v := i.Path; len(v) != 0 {
+		parts := strings.Split(v, "/")
+		pkg := parts[len(parts)-1]
+		return pkg
+	}
+
+	return ""
+}
+
+// Scalar provides the definition of a type to generate pointer utilities for.
+type Scalar struct {
+	Type   string
+	Import *Import
+}
+
+// Name returns the exported function name for the type.
+func (t Scalar) Name() string {
+	return strings.Title(t.Type)
+}
+
+// Symbol returns the scalar's Go symbol with path if needed.
+func (t Scalar) Symbol() string {
+	if t.Import != nil {
+		return t.Import.Package() + "." + t.Type
+	}
+	return t.Type
+}
+
+// Scalars is a list of scalars.
+type Scalars []Scalar
+
+// Imports returns all imports for the scalars.
+func (ts Scalars) Imports() []*Import {
+	imports := []*Import{}
+	for _, t := range ts {
+		if v := t.Import; v != nil {
+			imports = append(imports, v)
+		}
+	}
+
+	return imports
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/ptr/to_ptr.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/ptr/to_ptr.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/ptr/to_ptr.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/ptr/to_ptr.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,499 @@
+// Code generated by smithy-go/ptr/generate.go DO NOT EDIT.
+package ptr
+
+import (
+	"time"
+)
+
+// Bool returns a pointer value for the bool value passed in.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// BoolSlice returns a slice of bool pointers from the values
+// passed in.
+func BoolSlice(vs []bool) []*bool {
+	ps := make([]*bool, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// BoolMap returns a map of bool pointers from the values
+// passed in.
+func BoolMap(vs map[string]bool) map[string]*bool {
+	ps := make(map[string]*bool, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Byte returns a pointer value for the byte value passed in.
+func Byte(v byte) *byte {
+	return &v
+}
+
+// ByteSlice returns a slice of byte pointers from the values
+// passed in.
+func ByteSlice(vs []byte) []*byte {
+	ps := make([]*byte, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// ByteMap returns a map of byte pointers from the values
+// passed in.
+func ByteMap(vs map[string]byte) map[string]*byte {
+	ps := make(map[string]*byte, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// String returns a pointer value for the string value passed in.
+func String(v string) *string {
+	return &v
+}
+
+// StringSlice returns a slice of string pointers from the values
+// passed in.
+func StringSlice(vs []string) []*string {
+	ps := make([]*string, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// StringMap returns a map of string pointers from the values
+// passed in.
+func StringMap(vs map[string]string) map[string]*string {
+	ps := make(map[string]*string, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Int returns a pointer value for the int value passed in.
+func Int(v int) *int {
+	return &v
+}
+
+// IntSlice returns a slice of int pointers from the values
+// passed in.
+func IntSlice(vs []int) []*int {
+	ps := make([]*int, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// IntMap returns a map of int pointers from the values
+// passed in.
+func IntMap(vs map[string]int) map[string]*int {
+	ps := make(map[string]*int, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Int8 returns a pointer value for the int8 value passed in.
+func Int8(v int8) *int8 {
+	return &v
+}
+
+// Int8Slice returns a slice of int8 pointers from the values
+// passed in.
+func Int8Slice(vs []int8) []*int8 {
+	ps := make([]*int8, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Int8Map returns a map of int8 pointers from the values
+// passed in.
+func Int8Map(vs map[string]int8) map[string]*int8 {
+	ps := make(map[string]*int8, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Int16 returns a pointer value for the int16 value passed in.
+func Int16(v int16) *int16 {
+	return &v
+}
+
+// Int16Slice returns a slice of int16 pointers from the values
+// passed in.
+func Int16Slice(vs []int16) []*int16 {
+	ps := make([]*int16, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Int16Map returns a map of int16 pointers from the values
+// passed in.
+func Int16Map(vs map[string]int16) map[string]*int16 {
+	ps := make(map[string]*int16, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Int32 returns a pointer value for the int32 value passed in.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int32Slice returns a slice of int32 pointers from the values
+// passed in.
+func Int32Slice(vs []int32) []*int32 {
+	ps := make([]*int32, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Int32Map returns a map of int32 pointers from the values
+// passed in.
+func Int32Map(vs map[string]int32) map[string]*int32 {
+	ps := make(map[string]*int32, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Int64 returns a pointer value for the int64 value passed in.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Int64Slice returns a slice of int64 pointers from the values
+// passed in.
+func Int64Slice(vs []int64) []*int64 {
+	ps := make([]*int64, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Int64Map returns a map of int64 pointers from the values
+// passed in.
+func Int64Map(vs map[string]int64) map[string]*int64 {
+	ps := make(map[string]*int64, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Uint returns a pointer value for the uint value passed in.
+func Uint(v uint) *uint {
+	return &v
+}
+
+// UintSlice returns a slice of uint pointers from the values
+// passed in.
+func UintSlice(vs []uint) []*uint {
+	ps := make([]*uint, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// UintMap returns a map of uint pointers from the values
+// passed in.
+func UintMap(vs map[string]uint) map[string]*uint {
+	ps := make(map[string]*uint, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Uint8 returns a pointer value for the uint8 value passed in.
+func Uint8(v uint8) *uint8 {
+	return &v
+}
+
+// Uint8Slice returns a slice of uint8 pointers from the values
+// passed in.
+func Uint8Slice(vs []uint8) []*uint8 {
+	ps := make([]*uint8, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Uint8Map returns a map of uint8 pointers from the values
+// passed in.
+func Uint8Map(vs map[string]uint8) map[string]*uint8 {
+	ps := make(map[string]*uint8, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Uint16 returns a pointer value for the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+	return &v
+}
+
+// Uint16Slice returns a slice of uint16 pointers from the values
+// passed in.
+func Uint16Slice(vs []uint16) []*uint16 {
+	ps := make([]*uint16, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Uint16Map returns a map of uint16 pointers from the values
+// passed in.
+func Uint16Map(vs map[string]uint16) map[string]*uint16 {
+	ps := make(map[string]*uint16, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Uint32 returns a pointer value for the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint32Slice returns a slice of uint32 pointers from the values
+// passed in.
+func Uint32Slice(vs []uint32) []*uint32 {
+	ps := make([]*uint32, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Uint32Map returns a map of uint32 pointers from the values
+// passed in.
+func Uint32Map(vs map[string]uint32) map[string]*uint32 {
+	ps := make(map[string]*uint32, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Uint64 returns a pointer value for the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// Uint64Slice returns a slice of uint64 pointers from the values
+// passed in.
+func Uint64Slice(vs []uint64) []*uint64 {
+	ps := make([]*uint64, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Uint64Map returns a map of uint64 pointers from the values
+// passed in.
+func Uint64Map(vs map[string]uint64) map[string]*uint64 {
+	ps := make(map[string]*uint64, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Float32 returns a pointer value for the float32 value passed in.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float32Slice returns a slice of float32 pointers from the values
+// passed in.
+func Float32Slice(vs []float32) []*float32 {
+	ps := make([]*float32, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Float32Map returns a map of float32 pointers from the values
+// passed in.
+func Float32Map(vs map[string]float32) map[string]*float32 {
+	ps := make(map[string]*float32, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Float64 returns a pointer value for the float64 value passed in.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Float64Slice returns a slice of float64 pointers from the values
+// passed in.
+func Float64Slice(vs []float64) []*float64 {
+	ps := make([]*float64, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// Float64Map returns a map of float64 pointers from the values
+// passed in.
+func Float64Map(vs map[string]float64) map[string]*float64 {
+	ps := make(map[string]*float64, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Time returns a pointer value for the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+	return &v
+}
+
+// TimeSlice returns a slice of time.Time pointers from the values
+// passed in.
+func TimeSlice(vs []time.Time) []*time.Time {
+	ps := make([]*time.Time, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// TimeMap returns a map of time.Time pointers from the values
+// passed in.
+func TimeMap(vs map[string]time.Time) map[string]*time.Time {
+	ps := make(map[string]*time.Time, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
+
+// Duration returns a pointer value for the time.Duration value passed in.
+func Duration(v time.Duration) *time.Duration {
+	return &v
+}
+
+// DurationSlice returns a slice of time.Duration pointers from the values
+// passed in.
+func DurationSlice(vs []time.Duration) []*time.Duration {
+	ps := make([]*time.Duration, len(vs))
+	for i, v := range vs {
+		vv := v
+		ps[i] = &vv
+	}
+
+	return ps
+}
+
+// DurationMap returns a map of time.Duration pointers from the values
+// passed in.
+func DurationMap(vs map[string]time.Duration) map[string]*time.Duration {
+	ps := make(map[string]*time.Duration, len(vs))
+	for k, v := range vs {
+		vv := v
+		ps[k] = &vv
+	}
+
+	return ps
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/rand/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/rand/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/rand/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/rand/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,3 @@
+// Package rand provides utilities for creating and working with random value
+// generators.
+package rand
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/rand/rand.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/rand/rand.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/rand/rand.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/rand/rand.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,31 @@
+package rand
+
+import (
+	"crypto/rand"
+	"fmt"
+	"io"
+	"math/big"
+)
+
+func init() {
+	Reader = rand.Reader
+}
+
+// Reader provides a random reader that can reset during testing.
+var Reader io.Reader
+
+// Int63n returns a int64 between zero and value of max, read from an io.Reader source.
+func Int63n(reader io.Reader, max int64) (int64, error) {
+	bi, err := rand.Int(reader, big.NewInt(max))
+	if err != nil {
+		return 0, fmt.Errorf("failed to read random value, %w", err)
+	}
+
+	return bi.Int64(), nil
+}
+
+// CryptoRandInt63n returns a random int64 between zero and value of max
+// obtained from the crypto rand source.
+func CryptoRandInt63n(max int64) (int64, error) {
+	return Int63n(Reader, max)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/rand/uuid.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/rand/uuid.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/rand/uuid.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/rand/uuid.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,87 @@
+package rand
+
+import (
+	"encoding/hex"
+	"io"
+)
+
+const dash byte = '-'
+
+// UUIDIdempotencyToken provides a utility to get idempotency tokens in the
+// UUID format.
+type UUIDIdempotencyToken struct {
+	uuid *UUID
+}
+
+// NewUUIDIdempotencyToken returns a idempotency token provider returning
+// tokens in the UUID random format using the reader provided.
+func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken {
+	return &UUIDIdempotencyToken{uuid: NewUUID(r)}
+}
+
+// GetIdempotencyToken returns a random UUID value for Idempotency token.
+func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) {
+	return u.uuid.GetUUID()
+}
+
+// UUID provides computing random UUID version 4 values from a random source
+// reader.
+type UUID struct {
+	randSrc io.Reader
+}
+
+// NewUUID returns an initialized UUID value that can be used to retrieve
+// random UUID version 4 values.
+func NewUUID(r io.Reader) *UUID {
+	return &UUID{randSrc: r}
+}
+
+// GetUUID returns a random UUID version 4 string representation sourced from the random reader the
+// UUID was created with. Returns an error if unable to compute the UUID.
+func (r *UUID) GetUUID() (string, error) {
+	var b [16]byte
+	if _, err := io.ReadFull(r.randSrc, b[:]); err != nil {
+		return "", err
+	}
+	r.makeUUIDv4(b[:])
+	return format(b), nil
+}
+
+// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the
+// UUID was created with. Returns an error if unable to compute the UUID.
+func (r *UUID) GetBytes() (u []byte, err error) {
+	u = make([]byte, 16)
+	if _, err = io.ReadFull(r.randSrc, u); err != nil {
+		return u, err
+	}
+	r.makeUUIDv4(u)
+	return u, nil
+}
+
+func (r *UUID) makeUUIDv4(u []byte) {
+	// 13th character is "4"
+	u[6] = (u[6] & 0x0f) | 0x40 // Version 4
+	// 17th character is "8", "9", "a", or "b"
+	u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0
+}
+
+// Format returns the canonical text representation of a UUID.
+// This implementation is optimized to not use fmt.
+// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d
+func format(u [16]byte) string {
+	// https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+
+	var scratch [36]byte
+
+	hex.Encode(scratch[:8], u[0:4])
+	scratch[8] = dash
+	hex.Encode(scratch[9:13], u[4:6])
+	scratch[13] = dash
+	hex.Encode(scratch[14:18], u[6:8])
+	scratch[18] = dash
+	hex.Encode(scratch[19:23], u[8:10])
+	scratch[23] = dash
+	hex.Encode(scratch[24:], u[10:])
+
+	return string(scratch[:])
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/time/time.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/time/time.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/time/time.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/time/time.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,134 @@
+package time
+
+import (
+	"context"
+	"fmt"
+	"math/big"
+	"strings"
+	"time"
+)
+
+const (
+	// dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6
+	dateTimeFormatInput    = "2006-01-02T15:04:05.999999999Z"
+	dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999"
+	dateTimeFormatOutput   = "2006-01-02T15:04:05.999Z"
+
+	// httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1
+	// IMF-fixdate with no UTC offset.
+	httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
+	// Additional formats needed for compatibility.
+	httpDateFormatSingleDigitDay             = "Mon, _2 Jan 2006 15:04:05 GMT"
+	httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
+)
+
+var millisecondFloat = big.NewFloat(1e3)
+
+// FormatDateTime formats value as a date-time, (RFC3339 section 5.6)
+//
+// Example: 1985-04-12T23:20:50.52Z
+func FormatDateTime(value time.Time) string {
+	return value.UTC().Format(dateTimeFormatOutput)
+}
+
+// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6)
+//
+// Example: 1985-04-12T23:20:50.52Z
+func ParseDateTime(value string) (time.Time, error) {
+	return tryParse(value,
+		dateTimeFormatInput,
+		dateTimeFormatInputNoZ,
+		time.RFC3339Nano,
+		time.RFC3339,
+	)
+}
+
+// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate)
+//
+// Example: Tue, 29 Apr 2014 18:30:38 GMT
+func FormatHTTPDate(value time.Time) string {
+	return value.UTC().Format(httpDateFormat)
+}
+
+// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate)
+//
+// Example: Tue, 29 Apr 2014 18:30:38 GMT
+func ParseHTTPDate(value string) (time.Time, error) {
+	return tryParse(value,
+		httpDateFormat,
+		httpDateFormatSingleDigitDay,
+		httpDateFormatSingleDigitDayTwoDigitYear,
+		time.RFC850,
+		time.ANSIC,
+	)
+}
+
+// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision
+//
+// Example: 1515531081.123
+func FormatEpochSeconds(value time.Time) float64 {
+	ms := value.UnixNano() / int64(time.Millisecond)
+	return float64(ms) / 1e3
+}
+
+// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision
+//
+// Example: 1515531081.123
+func ParseEpochSeconds(value float64) time.Time {
+	f := big.NewFloat(value)
+	f = f.Mul(f, millisecondFloat)
+	i, _ := f.Int64()
+	// Offset to `UTC` because time.Unix returns the time value based on system
+	// local setting.
+	return time.Unix(0, i*1e6).UTC()
+}
+
+func tryParse(v string, formats ...string) (time.Time, error) {
+	var errs parseErrors
+	for _, f := range formats {
+		t, err := time.Parse(f, v)
+		if err != nil {
+			errs = append(errs, parseError{
+				Format: f,
+				Err:    err,
+			})
+			continue
+		}
+		return t, nil
+	}
+
+	return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs)
+}
+
+type parseErrors []parseError
+
+func (es parseErrors) Error() string {
+	var s strings.Builder
+	for _, e := range es {
+		fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err)
+	}
+
+	return "parse errors:" + s.String()
+}
+
+type parseError struct {
+	Format string
+	Err    error
+}
+
+// SleepWithContext will wait for the timer duration to expire, or until the context
+// is canceled. Whichever happens first. If the context is canceled the
+// Context's error will be returned.
+func SleepWithContext(ctx context.Context, dur time.Duration) error {
+	t := time.NewTimer(dur)
+	defer t.Stop()
+
+	select {
+	case <-t.C:
+		break
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/auth.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/auth.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/auth.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/auth.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,21 @@
+package http
+
+import (
+	"context"
+
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/auth"
+)
+
+// AuthScheme defines an HTTP authentication scheme.
+type AuthScheme interface {
+	SchemeID() string
+	IdentityResolver(auth.IdentityResolverOptions) auth.IdentityResolver
+	Signer() Signer
+}
+
+// Signer defines the interface through which HTTP requests are supplemented
+// with an Identity.
+type Signer interface {
+	SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,45 @@
+package http
+
+import (
+	"context"
+
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/auth"
+)
+
+// NewAnonymousScheme returns the anonymous HTTP auth scheme.
+func NewAnonymousScheme() AuthScheme {
+	return &authScheme{
+		schemeID: auth.SchemeIDAnonymous,
+		signer:   &nopSigner{},
+	}
+}
+
+// authScheme is parameterized to generically implement the exported AuthScheme
+// interface
+type authScheme struct {
+	schemeID string
+	signer   Signer
+}
+
+var _ AuthScheme = (*authScheme)(nil)
+
+func (s *authScheme) SchemeID() string {
+	return s.schemeID
+}
+
+func (s *authScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver {
+	return o.GetIdentityResolver(s.schemeID)
+}
+
+func (s *authScheme) Signer() Signer {
+	return s.signer
+}
+
+type nopSigner struct{}
+
+var _ Signer = (*nopSigner)(nil)
+
+func (*nopSigner) SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error {
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,70 @@
+package http
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+const contentMD5Header = "Content-Md5"
+
+// contentMD5Checksum provides a middleware to compute and set
+// content-md5 checksum for a http request
+type contentMD5Checksum struct {
+}
+
+// AddContentChecksumMiddleware adds checksum middleware to middleware's
+// build step.
+func AddContentChecksumMiddleware(stack *middleware.Stack) error {
+	// This middleware must be executed before request body is set.
+	return stack.Build.Add(&contentMD5Checksum{}, middleware.Before)
+}
+
+// ID returns the identifier for the checksum middleware
+func (m *contentMD5Checksum) ID() string { return "ContentChecksum" }
+
+// HandleBuild adds behavior to compute md5 checksum and add content-md5 header
+// on http request
+func (m *contentMD5Checksum) HandleBuild(
+	ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown request type %T", req)
+	}
+
+	// if Content-MD5 header is already present, return
+	if v := req.Header.Get(contentMD5Header); len(v) != 0 {
+		return next.HandleBuild(ctx, in)
+	}
+
+	// fetch the request stream.
+	stream := req.GetStream()
+	// compute checksum if payload is explicit
+	if stream != nil {
+		if !req.IsStreamSeekable() {
+			return out, metadata, fmt.Errorf(
+				"unseekable stream is not supported for computing md5 checksum")
+		}
+
+		v, err := computeMD5Checksum(stream)
+		if err != nil {
+			return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err)
+		}
+
+		// reset the request stream
+		if err := req.RewindStream(); err != nil {
+			return out, metadata, fmt.Errorf(
+				"error rewinding request stream after computing md5 checksum, %w", err)
+		}
+
+		// set the 'Content-MD5' header
+		req.Header.Set(contentMD5Header, string(v))
+	}
+
+	// set md5 header value
+	return next.HandleBuild(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/client.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/client.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/client.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/client.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,120 @@
+package http
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+
+	smithy "github.com/aws/smithy-go"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// ClientDo provides the interface for custom HTTP client implementations.
+type ClientDo interface {
+	Do(*http.Request) (*http.Response, error)
+}
+
+// ClientDoFunc provides a helper to wrap a function as an HTTP client for
+// round tripping requests.
+type ClientDoFunc func(*http.Request) (*http.Response, error)
+
+// Do will invoke the underlying func, returning the result.
+func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) {
+	return fn(r)
+}
+
+// ClientHandler wraps a client that implements the HTTP Do method. Standard
+// implementation is http.Client.
+type ClientHandler struct {
+	client ClientDo
+}
+
+// NewClientHandler returns an initialized middleware handler for the client.
+func NewClientHandler(client ClientDo) ClientHandler {
+	return ClientHandler{
+		client: client,
+	}
+}
+
+// Handle implements the middleware Handler interface, that will invoke the
+// underlying HTTP client. Requires the input to be a Smithy *Request. Returns
+// a smithy *Response, or error if the request failed.
+func (c ClientHandler) Handle(ctx context.Context, input interface{}) (
+	out interface{}, metadata middleware.Metadata, err error,
+) {
+	req, ok := input.(*Request)
+	if !ok {
+		return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input)
+	}
+
+	builtRequest := req.Build(ctx)
+	if err := ValidateEndpointHost(builtRequest.Host); err != nil {
+		return nil, metadata, err
+	}
+
+	resp, err := c.client.Do(builtRequest)
+	if resp == nil {
+		// Ensure a http response value is always present to prevent unexpected
+		// panics.
+		resp = &http.Response{
+			Header: http.Header{},
+			Body:   http.NoBody,
+		}
+	}
+	if err != nil {
+		err = &RequestSendError{Err: err}
+
+		// Override the error with a context canceled error, if that was canceled.
+		select {
+		case <-ctx.Done():
+			err = &smithy.CanceledError{Err: ctx.Err()}
+		default:
+		}
+	}
+
+	// HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner.
+	// So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the
+	// stream reference so that it can be safely reused.
+	if builtRequest.Body != nil {
+		_ = builtRequest.Body.Close()
+	}
+
+	return &Response{Response: resp}, metadata, err
+}
+
+// RequestSendError provides a generic request transport error. This error
+// should wrap errors making HTTP client requests.
+//
+// The ClientHandler will wrap the HTTP client's error if the client request
+// fails, and did not fail because of context canceled.
+type RequestSendError struct {
+	Err error
+}
+
+// ConnectionError returns that the error is related to not being able to send
+// the request, or receive a response from the service.
+func (e *RequestSendError) ConnectionError() bool {
+	return true
+}
+
+// Unwrap returns the underlying error, if there was one.
+func (e *RequestSendError) Unwrap() error {
+	return e.Err
+}
+
+func (e *RequestSendError) Error() string {
+	return fmt.Sprintf("request send failed, %v", e.Err)
+}
+
+// NopClient provides a client that ignores the request, and returns an empty
+// successful HTTP response value.
+type NopClient struct{}
+
+// Do ignores the request and returns a 200 status empty response.
+func (NopClient) Do(r *http.Request) (*http.Response, error) {
+	return &http.Response{
+		StatusCode: 200,
+		Header:     http.Header{},
+		Body:       http.NoBody,
+	}, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/doc.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/doc.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,5 @@
+/*
+Package http provides the HTTP transport client and request/response types
+needed to round trip API operation calls with an service.
+*/
+package http
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/headerlist.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/headerlist.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/headerlist.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/headerlist.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,163 @@
+package http
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) {
+	values := make([]string, 0, len(vs))
+
+	for i := 0; i < len(vs); i++ {
+		parts, err := splitFn(vs[i])
+		if err != nil {
+			return nil, err
+		}
+		values = append(values, parts...)
+	}
+
+	return values, nil
+}
+
+// SplitHeaderListValues attempts to split the elements of the slice by commas,
+// and return a list of all values separated. Returns error if unable to
+// separate the values.
+func SplitHeaderListValues(vs []string) ([]string, error) {
+	return splitHeaderListValues(vs, quotedCommaSplit)
+}
+
+func quotedCommaSplit(v string) (parts []string, err error) {
+	v = strings.TrimSpace(v)
+
+	expectMore := true
+	for i := 0; i < len(v); i++ {
+		if unicode.IsSpace(rune(v[i])) {
+			continue
+		}
+		expectMore = false
+
+		// leading  space in part is ignored.
+		// Start of value must be non-space, or quote.
+		//
+		// - If quote, enter quoted mode, find next non-escaped quote to
+		//   terminate the value.
+		// - Otherwise, find next comma to terminate value.
+
+		remaining := v[i:]
+
+		var value string
+		var valueLen int
+		if remaining[0] == '"' {
+			//------------------------------
+			// Quoted value
+			//------------------------------
+			var j int
+			var skipQuote bool
+			for j += 1; j < len(remaining); j++ {
+				if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) {
+					skipQuote = !skipQuote
+					continue
+				}
+				if remaining[j] == '"' {
+					break
+				}
+			}
+			if j == len(remaining) || j == 1 {
+				return nil, fmt.Errorf("value %v missing closing double quote",
+					remaining)
+			}
+			valueLen = j + 1
+
+			tail := remaining[valueLen:]
+			var k int
+			for ; k < len(tail); k++ {
+				if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' {
+					return nil, fmt.Errorf("value %v has non-space trailing characters",
+						remaining)
+				}
+				if tail[k] == ',' {
+					expectMore = true
+					break
+				}
+			}
+			value = remaining[:valueLen]
+			value, err = strconv.Unquote(value)
+			if err != nil {
+				return nil, fmt.Errorf("failed to unquote value %v, %w", value, err)
+			}
+
+			// Pad valueLen to include trailing space(s) so `i` is updated correctly.
+			valueLen += k
+
+		} else {
+			//------------------------------
+			// Unquoted value
+			//------------------------------
+
+			// Index of the next comma is the length of the value, or end of string.
+			valueLen = strings.Index(remaining, ",")
+			if valueLen != -1 {
+				expectMore = true
+			} else {
+				valueLen = len(remaining)
+			}
+			value = strings.TrimSpace(remaining[:valueLen])
+		}
+
+		i += valueLen
+		parts = append(parts, value)
+
+	}
+
+	if expectMore {
+		parts = append(parts, "")
+	}
+
+	return parts, nil
+}
+
+// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date
+// timestamp values in the slice by commas, and return a list of all values
+// separated. The split is aware of the HTTP-Date timestamp format, and will skip
+// comma within the timestamp value. Returns an error if unable to split the
+// timestamp values.
+func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) {
+	return splitHeaderListValues(vs, splitHTTPDateHeaderValue)
+}
+
+func splitHTTPDateHeaderValue(v string) ([]string, error) {
+	if n := strings.Count(v, ","); n <= 1 {
+		// Nothing to do if only contains a no, or single HTTPDate value
+		return []string{v}, nil
+	} else if n%2 == 0 {
+		return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v)
+	}
+
+	var parts []string
+	var i, j int
+
+	var doSplit bool
+	for ; i < len(v); i++ {
+		if v[i] == ',' {
+			if doSplit {
+				doSplit = false
+				parts = append(parts, strings.TrimSpace(v[j:i]))
+				j = i + 1
+			} else {
+				// Skip the first comma in the timestamp value since that
+				// separates the day from the rest of the timestamp.
+				//
+				// Tue, 17 Dec 2019 23:48:18 GMT
+				doSplit = true
+			}
+		}
+	}
+	// Add final part
+	if j < len(v) {
+		parts = append(parts, strings.TrimSpace(v[j:]))
+	}
+
+	return parts, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/host.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/host.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/host.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/host.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,89 @@
+package http
+
+import (
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+)
+
+// ValidateEndpointHost validates that the host string passed in is a valid RFC
+// 3986 host. Returns error if the host is not valid.
+func ValidateEndpointHost(host string) error {
+	var errors strings.Builder
+	var hostname string
+	var port string
+	var err error
+
+	if strings.Contains(host, ":") {
+		hostname, port, err = net.SplitHostPort(host)
+		if err != nil {
+			errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host))
+			errors.WriteString(err.Error())
+		}
+
+		if !ValidPortNumber(port) {
+			errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port))
+		}
+	} else {
+		hostname = host
+	}
+
+	labels := strings.Split(hostname, ".")
+	for i, label := range labels {
+		if i == len(labels)-1 && len(label) == 0 {
+			// Allow trailing dot for FQDN hosts.
+			continue
+		}
+
+		if !ValidHostLabel(label) {
+			errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ")
+			errors.WriteString(label)
+		}
+	}
+
+	if len(hostname) == 0 && len(port) != 0 {
+		errors.WriteString("\nendpoint host with port must not be empty")
+	}
+
+	if len(hostname) > 255 {
+		errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname)))
+	}
+
+	if len(errors.String()) > 0 {
+		return fmt.Errorf("invalid endpoint host%s", errors.String())
+	}
+	return nil
+}
+
+// ValidPortNumber returns whether the port is valid RFC 3986 port.
+func ValidPortNumber(port string) bool {
+	i, err := strconv.Atoi(port)
+	if err != nil {
+		return false
+	}
+
+	if i < 0 || i > 65535 {
+		return false
+	}
+	return true
+}
+
+// ValidHostLabel returns whether the label is a valid RFC 3986 host abel.
+func ValidHostLabel(label string) bool {
+	if l := len(label); l == 0 || l > 63 {
+		return false
+	}
+	for _, r := range label {
+		switch {
+		case r >= '0' && r <= '9':
+		case r >= 'A' && r <= 'Z':
+		case r >= 'a' && r <= 'z':
+		case r == '-':
+		default:
+			return false
+		}
+	}
+
+	return true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,75 @@
+package io
+
+import (
+	"io"
+	"sync"
+)
+
+// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser.
+func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser {
+	sr := &safeReadCloser{
+		readCloser: readCloser,
+	}
+
+	if _, ok := readCloser.(io.WriterTo); ok {
+		return &safeWriteToReadCloser{safeReadCloser: sr}
+	}
+
+	return sr
+}
+
+// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic
+// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this
+// type.
+type safeWriteToReadCloser struct {
+	*safeReadCloser
+}
+
+// WriteTo implements the io.WriteTo interface.
+func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) {
+	r.safeReadCloser.mtx.Lock()
+	defer r.safeReadCloser.mtx.Unlock()
+
+	if r.safeReadCloser.closed {
+		return 0, io.EOF
+	}
+
+	return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w)
+}
+
+// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser
+// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type
+// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime
+// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported.
+// This type is thread-safe.
+type safeReadCloser struct {
+	readCloser io.ReadCloser
+	closed     bool
+	mtx        sync.Mutex
+}
+
+// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned.
+func (r *safeReadCloser) Read(p []byte) (n int, err error) {
+	r.mtx.Lock()
+	defer r.mtx.Unlock()
+	if r.closed {
+		return 0, io.EOF
+	}
+
+	return r.readCloser.Read(p)
+}
+
+// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error
+// reported from Close. Subsequent calls to Close will always return a nil error.
+func (r *safeReadCloser) Close() error {
+	r.mtx.Lock()
+	defer r.mtx.Unlock()
+	if r.closed {
+		return nil
+	}
+
+	r.closed = true
+	rc := r.readCloser
+	r.readCloser = nil
+	return rc.Close()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,25 @@
+package http
+
+import (
+	"crypto/md5"
+	"encoding/base64"
+	"fmt"
+	"io"
+)
+
+// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents.
+// Returns the byte slice of md5 checksum and an error.
+func computeMD5Checksum(r io.Reader) ([]byte, error) {
+	h := md5.New()
+	// copy errors may be assumed to be from the body.
+	_, err := io.Copy(h, r)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read body: %w", err)
+	}
+
+	// encode the md5 checksum in base64.
+	sum := h.Sum(nil)
+	sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
+	base64.StdEncoding.Encode(sum64, sum)
+	return sum64, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,79 @@
+package http
+
+import (
+	"context"
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+	"io"
+	"io/ioutil"
+)
+
+// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically
+// close the response body of an operation request if the request response
+// failed.
+func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error {
+	return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before)
+}
+
+type errorCloseResponseBodyMiddleware struct{}
+
+func (*errorCloseResponseBodyMiddleware) ID() string {
+	return "ErrorCloseResponseBody"
+}
+
+func (m *errorCloseResponseBodyMiddleware) HandleDeserialize(
+	ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+	output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err := next.HandleDeserialize(ctx, input)
+	if err != nil {
+		if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil {
+			// Consume the full body to prevent TCP connection resets on some platforms
+			_, _ = io.Copy(ioutil.Discard, resp.Body)
+			// Do not validate that the response closes successfully.
+			resp.Body.Close()
+		}
+	}
+
+	return out, metadata, err
+}
+
+// AddCloseResponseBodyMiddleware adds the middleware to automatically close
+// the response body of an operation request, after the response had been
+// deserialized.
+func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error {
+	return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before)
+}
+
+type closeResponseBody struct{}
+
+func (*closeResponseBody) ID() string {
+	return "CloseResponseBody"
+}
+
+func (m *closeResponseBody) HandleDeserialize(
+	ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+	output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err := next.HandleDeserialize(ctx, input)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	if resp, ok := out.RawResponse.(*Response); ok {
+		// Consume the full body to prevent TCP connection resets on some platforms
+		_, copyErr := io.Copy(ioutil.Discard, resp.Body)
+		if copyErr != nil {
+			middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse")
+		}
+
+		closeErr := resp.Body.Close()
+		if closeErr != nil {
+			middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse")
+		}
+	}
+
+	return out, metadata, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,84 @@
+package http
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+// ComputeContentLength provides a middleware to set the content-length
+// header for the length of a serialize request body.
+type ComputeContentLength struct {
+}
+
+// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware
+// stack's Build step.
+func AddComputeContentLengthMiddleware(stack *middleware.Stack) error {
+	return stack.Build.Add(&ComputeContentLength{}, middleware.After)
+}
+
+// ID returns the identifier for the ComputeContentLength.
+func (m *ComputeContentLength) ID() string { return "ComputeContentLength" }
+
+// HandleBuild adds the length of the serialized request to the HTTP header
+// if the length can be determined.
+func (m *ComputeContentLength) HandleBuild(
+	ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown request type %T", req)
+	}
+
+	// do nothing if request content-length was set to 0 or above.
+	if req.ContentLength >= 0 {
+		return next.HandleBuild(ctx, in)
+	}
+
+	// attempt to compute stream length
+	if n, ok, err := req.StreamLength(); err != nil {
+		return out, metadata, fmt.Errorf(
+			"failed getting length of request stream, %w", err)
+	} else if ok {
+		req.ContentLength = n
+	}
+
+	return next.HandleBuild(ctx, in)
+}
+
+// validateContentLength provides a middleware to validate the content-length
+// is valid (greater than zero), for the serialized request payload.
+type validateContentLength struct{}
+
+// ValidateContentLengthHeader adds middleware that validates request content-length
+// is set to value greater than zero.
+func ValidateContentLengthHeader(stack *middleware.Stack) error {
+	return stack.Build.Add(&validateContentLength{}, middleware.After)
+}
+
+// ID returns the identifier for the ComputeContentLength.
+func (m *validateContentLength) ID() string { return "ValidateContentLength" }
+
+// HandleBuild adds the length of the serialized request to the HTTP header
+// if the length can be determined.
+func (m *validateContentLength) HandleBuild(
+	ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	req, ok := in.Request.(*Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown request type %T", req)
+	}
+
+	// if request content-length was set to less than 0, return an error
+	if req.ContentLength < 0 {
+		return out, metadata, fmt.Errorf(
+			"content length for payload is required and must be at least 0")
+	}
+
+	return next.HandleBuild(ctx, in)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,81 @@
+package http
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+// WithHeaderComment instruments a middleware stack to append an HTTP field
+// comment to the given header as specified in RFC 9110
+// (https://www.rfc-editor.org/rfc/rfc9110#name-comments).
+//
+// The header is case-insensitive. If the provided header exists when the
+// middleware runs, the content will be inserted as-is enclosed in parentheses.
+//
+// Note that per the HTTP specification, comments are only allowed in fields
+// containing "comment" as part of their field value definition, but this API
+// will NOT verify whether the provided header is one of them.
+//
+// WithHeaderComment MAY be applied more than once to a middleware stack and/or
+// more than once per header.
+func WithHeaderComment(header, content string) func(*middleware.Stack) error {
+	return func(s *middleware.Stack) error {
+		m, err := getOrAddHeaderComment(s)
+		if err != nil {
+			return fmt.Errorf("get or add header comment: %v", err)
+		}
+
+		m.values.Add(header, content)
+		return nil
+	}
+}
+
+type headerCommentMiddleware struct {
+	values http.Header // hijack case-insensitive access APIs
+}
+
+func (*headerCommentMiddleware) ID() string {
+	return "headerComment"
+}
+
+func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+	out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+	r, ok := in.Request.(*Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	for h, contents := range m.values {
+		for _, c := range contents {
+			if existing := r.Header.Get(h); existing != "" {
+				r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c))
+			}
+		}
+	}
+
+	return next.HandleBuild(ctx, in)
+}
+
+func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) {
+	id := (*headerCommentMiddleware)(nil).ID()
+	m, ok := s.Build.Get(id)
+	if !ok {
+		m := &headerCommentMiddleware{values: http.Header{}}
+		if err := s.Build.Add(m, middleware.After); err != nil {
+			return nil, fmt.Errorf("add build: %v", err)
+		}
+
+		return m, nil
+	}
+
+	hc, ok := m.(*headerCommentMiddleware)
+	if !ok {
+		return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id)
+	}
+
+	return hc, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,167 @@
+package http
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+type isContentTypeAutoSet struct{}
+
+// SetIsContentTypeDefaultValue returns a Context specifying if the request's
+// content-type header was set to a default value.
+func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context {
+	return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault)
+}
+
+// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the
+// request is a default value that was auto assigned by an operation
+// serializer. Allows middleware post serialization to know if the content-type
+// was auto set to a default value or not.
+//
+// Also returns false if the Context value was never updated to include if
+// content-type was set to a default value.
+func GetIsContentTypeDefaultValue(ctx context.Context) bool {
+	v, _ := ctx.Value(isContentTypeAutoSet{}).(bool)
+	return v
+}
+
+// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover
+// middleware to the stack after the operation serializer. This middleware will
+// remove the content-type header from the request if it was set as a default
+// value, and no request payload is present.
+//
+// Returns error if unable to add the middleware.
+func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) {
+	err = stack.Serialize.Insert(removeDefaultContentType{},
+		"OperationSerializer", middleware.After)
+	if err != nil {
+		return fmt.Errorf("failed to add %s serialize middleware, %w",
+			removeDefaultContentType{}.ID(), err)
+	}
+
+	return nil
+}
+
+// RemoveNoPayloadDefaultContentTypeRemover removes the
+// DefaultContentTypeRemover middleware from the stack. Returns an error if
+// unable to remove the middleware.
+func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) {
+	_, err = stack.Serialize.Remove(removeDefaultContentType{}.ID())
+	if err != nil {
+		return fmt.Errorf("failed to remove %s serialize middleware, %w",
+			removeDefaultContentType{}.ID(), err)
+
+	}
+	return nil
+}
+
+// removeDefaultContentType provides after serialization middleware that will
+// remove the content-type header from an HTTP request if the header was set as
+// a default value by the operation serializer, and there is no request payload.
+type removeDefaultContentType struct{}
+
+// ID returns the middleware ID
+func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" }
+
+// HandleSerialize implements the serialization middleware.
+func (removeDefaultContentType) HandleSerialize(
+	ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+	out middleware.SerializeOutput, meta middleware.Metadata, err error,
+) {
+	req, ok := input.Request.(*Request)
+	if !ok {
+		return out, meta, fmt.Errorf(
+			"unexpected request type %T for removeDefaultContentType middleware",
+			input.Request)
+	}
+
+	if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil {
+		req.Header.Del("Content-Type")
+		input.Request = req
+	}
+
+	return next.HandleSerialize(ctx, input)
+}
+
+type headerValue struct {
+	header string
+	value  string
+	append bool
+}
+
+type headerValueHelper struct {
+	headerValues []headerValue
+}
+
+func (h *headerValueHelper) addHeaderValue(value headerValue) {
+	h.headerValues = append(h.headerValues, value)
+}
+
+func (h *headerValueHelper) ID() string {
+	return "HTTPHeaderHelper"
+}
+
+func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) {
+	req, ok := in.Request.(*Request)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+	}
+
+	for _, value := range h.headerValues {
+		if value.append {
+			req.Header.Add(value.header, value.value)
+		} else {
+			req.Header.Set(value.header, value.value)
+		}
+	}
+
+	return next.HandleBuild(ctx, in)
+}
+
+func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) {
+	id := (*headerValueHelper)(nil).ID()
+	m, ok := stack.Build.Get(id)
+	if !ok {
+		m = &headerValueHelper{}
+		err := stack.Build.Add(m, middleware.After)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	requestUserAgent, ok := m.(*headerValueHelper)
+	if !ok {
+		return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id)
+	}
+
+	return requestUserAgent, nil
+}
+
+// AddHeaderValue returns a stack mutator that adds the header value pair to header.
+// Appends to any existing values if present.
+func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error {
+	return func(stack *middleware.Stack) error {
+		helper, err := getOrAddHeaderValueHelper(stack)
+		if err != nil {
+			return err
+		}
+		helper.addHeaderValue(headerValue{header: header, value: value, append: true})
+		return nil
+	}
+}
+
+// SetHeaderValue returns a stack mutator that adds the header value pair to header.
+// Replaces any existing values if present.
+func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error {
+	return func(stack *middleware.Stack) error {
+		helper, err := getOrAddHeaderValueHelper(stack)
+		if err != nil {
+			return err
+		}
+		helper.addHeaderValue(headerValue{header: header, value: value, append: false})
+		return nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,75 @@
+package http
+
+import (
+	"context"
+	"fmt"
+	"net/http/httputil"
+
+	"github.com/aws/smithy-go/logging"
+	"github.com/aws/smithy-go/middleware"
+)
+
+// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally
+// their respective bodies. Will not perform any logging if none of the options are set.
+type RequestResponseLogger struct {
+	LogRequest         bool
+	LogRequestWithBody bool
+
+	LogResponse         bool
+	LogResponseWithBody bool
+}
+
+// ID is the middleware identifier.
+func (r *RequestResponseLogger) ID() string {
+	return "RequestResponseLogger"
+}
+
+// HandleDeserialize will log the request and response HTTP messages if configured accordingly.
+func (r *RequestResponseLogger) HandleDeserialize(
+	ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	logger := middleware.GetLogger(ctx)
+
+	if r.LogRequest || r.LogRequestWithBody {
+		smithyRequest, ok := in.Request.(*Request)
+		if !ok {
+			return out, metadata, fmt.Errorf("unknown transport type %T", in)
+		}
+
+		rc := smithyRequest.Build(ctx)
+		reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody)
+		if err != nil {
+			return out, metadata, err
+		}
+
+		logger.Logf(logging.Debug, "Request\n%v", string(reqBytes))
+
+		if r.LogRequestWithBody {
+			smithyRequest, err = smithyRequest.SetStream(rc.Body)
+			if err != nil {
+				return out, metadata, err
+			}
+			in.Request = smithyRequest
+		}
+	}
+
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+
+	if (err == nil) && (r.LogResponse || r.LogResponseWithBody) {
+		smithyResponse, ok := out.RawResponse.(*Response)
+		if !ok {
+			return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse)
+		}
+
+		respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody)
+		if err != nil {
+			return out, metadata, fmt.Errorf("failed to dump response %w", err)
+		}
+
+		logger.Logf(logging.Debug, "Response\n%v", string(respBytes))
+	}
+
+	return out, metadata, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,51 @@
+package http
+
+import (
+	"context"
+
+	"github.com/aws/smithy-go/middleware"
+)
+
+type (
+	hostnameImmutableKey struct{}
+	hostPrefixDisableKey struct{}
+)
+
+// GetHostnameImmutable retrieves whether the endpoint hostname should be considered
+// immutable or not.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func GetHostnameImmutable(ctx context.Context) (v bool) {
+	v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool)
+	return v
+}
+
+// SetHostnameImmutable sets or modifies whether the request's endpoint hostname
+// should be considered immutable or not.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func SetHostnameImmutable(ctx context.Context, value bool) context.Context {
+	return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value)
+}
+
+// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is
+// disabled.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) {
+	v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool)
+	return v
+}
+
+// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host
+// prefixing should be disabled. If value is true, endpoint host prefixing
+// will be disabled.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context {
+	return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,79 @@
+package http
+
+import (
+	"context"
+	"fmt"
+	"github.com/aws/smithy-go/middleware"
+	"strings"
+)
+
+// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum
+// HTTP protocol version.
+type MinimumProtocolError struct {
+	proto              string
+	expectedProtoMajor int
+	expectedProtoMinor int
+}
+
+// Error returns the error message.
+func (m *MinimumProtocolError) Error() string {
+	return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s",
+		m.expectedProtoMajor, m.expectedProtoMinor, m.proto)
+}
+
+// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection
+// meets the minimum major ad minor version.
+type RequireMinimumProtocol struct {
+	ProtoMajor int
+	ProtoMinor int
+}
+
+// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum
+// protocol major and minor version.
+func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error {
+	return stack.Deserialize.Insert(&RequireMinimumProtocol{
+		ProtoMajor: major,
+		ProtoMinor: minor,
+	}, "OperationDeserializer", middleware.Before)
+}
+
+// ID returns the middleware identifier string.
+func (r *RequireMinimumProtocol) ID() string {
+	return "RequireMinimumProtocol"
+}
+
+// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor
+// protocol version.
+func (r *RequireMinimumProtocol) HandleDeserialize(
+	ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+	out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+	out, metadata, err = next.HandleDeserialize(ctx, in)
+	if err != nil {
+		return out, metadata, err
+	}
+
+	response, ok := out.RawResponse.(*Response)
+	if !ok {
+		return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse)
+	}
+
+	if !strings.HasPrefix(response.Proto, "HTTP") {
+		return out, metadata, &MinimumProtocolError{
+			proto:              response.Proto,
+			expectedProtoMajor: r.ProtoMajor,
+			expectedProtoMinor: r.ProtoMinor,
+		}
+	}
+
+	if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor {
+		return out, metadata, &MinimumProtocolError{
+			proto:              response.Proto,
+			expectedProtoMajor: r.ProtoMajor,
+			expectedProtoMinor: r.ProtoMinor,
+		}
+	}
+
+	return out, metadata, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/properties.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/properties.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/properties.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/properties.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,80 @@
+package http
+
+import smithy "github.com/aws/smithy-go"
+
+type (
+	sigV4SigningNameKey   struct{}
+	sigV4SigningRegionKey struct{}
+
+	sigV4ASigningNameKey    struct{}
+	sigV4ASigningRegionsKey struct{}
+
+	isUnsignedPayloadKey     struct{}
+	disableDoubleEncodingKey struct{}
+)
+
+// GetSigV4SigningName gets the signing name from Properties.
+func GetSigV4SigningName(p *smithy.Properties) (string, bool) {
+	v, ok := p.Get(sigV4SigningNameKey{}).(string)
+	return v, ok
+}
+
+// SetSigV4SigningName sets the signing name on Properties.
+func SetSigV4SigningName(p *smithy.Properties, name string) {
+	p.Set(sigV4SigningNameKey{}, name)
+}
+
+// GetSigV4SigningRegion gets the signing region from Properties.
+func GetSigV4SigningRegion(p *smithy.Properties) (string, bool) {
+	v, ok := p.Get(sigV4SigningRegionKey{}).(string)
+	return v, ok
+}
+
+// SetSigV4SigningRegion sets the signing region on Properties.
+func SetSigV4SigningRegion(p *smithy.Properties, region string) {
+	p.Set(sigV4SigningRegionKey{}, region)
+}
+
+// GetSigV4ASigningName gets the v4a signing name from Properties.
+func GetSigV4ASigningName(p *smithy.Properties) (string, bool) {
+	v, ok := p.Get(sigV4ASigningNameKey{}).(string)
+	return v, ok
+}
+
+// SetSigV4ASigningName sets the signing name on Properties.
+func SetSigV4ASigningName(p *smithy.Properties, name string) {
+	p.Set(sigV4ASigningNameKey{}, name)
+}
+
+// GetSigV4ASigningRegion gets the v4a signing region set from Properties.
+func GetSigV4ASigningRegions(p *smithy.Properties) ([]string, bool) {
+	v, ok := p.Get(sigV4ASigningRegionsKey{}).([]string)
+	return v, ok
+}
+
+// SetSigV4ASigningRegions sets the v4a signing region set on Properties.
+func SetSigV4ASigningRegions(p *smithy.Properties, regions []string) {
+	p.Set(sigV4ASigningRegionsKey{}, regions)
+}
+
+// GetIsUnsignedPayload gets whether the payload is unsigned from Properties.
+func GetIsUnsignedPayload(p *smithy.Properties) (bool, bool) {
+	v, ok := p.Get(isUnsignedPayloadKey{}).(bool)
+	return v, ok
+}
+
+// SetIsUnsignedPayload sets whether the payload is unsigned on Properties.
+func SetIsUnsignedPayload(p *smithy.Properties, isUnsignedPayload bool) {
+	p.Set(isUnsignedPayloadKey{}, isUnsignedPayload)
+}
+
+// GetDisableDoubleEncoding gets whether the payload is unsigned from Properties.
+func GetDisableDoubleEncoding(p *smithy.Properties) (bool, bool) {
+	v, ok := p.Get(disableDoubleEncodingKey{}).(bool)
+	return v, ok
+}
+
+// SetDisableDoubleEncoding sets whether the payload is unsigned on Properties.
+func SetDisableDoubleEncoding(p *smithy.Properties, disableDoubleEncoding bool) {
+	p.Set(disableDoubleEncodingKey{}, disableDoubleEncoding)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/request.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/request.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/request.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/request.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,189 @@
+package http
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+
+	iointernal "github.com/aws/smithy-go/transport/http/internal/io"
+)
+
+// Request provides the HTTP specific request structure for HTTP specific
+// middleware steps to use to serialize input, and send an operation's request.
+type Request struct {
+	*http.Request
+	stream           io.Reader
+	isStreamSeekable bool
+	streamStartPos   int64
+}
+
+// NewStackRequest returns an initialized request ready to be populated with the
+// HTTP request details. Returns empty interface so the function can be used as
+// a parameter to the Smithy middleware Stack constructor.
+func NewStackRequest() interface{} {
+	return &Request{
+		Request: &http.Request{
+			URL:           &url.URL{},
+			Header:        http.Header{},
+			ContentLength: -1, // default to unknown length
+		},
+	}
+}
+
+// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set.
+func (r *Request) IsHTTPS() bool {
+	if r.URL == nil {
+		return false
+	}
+	return strings.EqualFold(r.URL.Scheme, "https")
+}
+
+// Clone returns a deep copy of the Request for the new context. A reference to
+// the Stream is copied, but the underlying stream is not copied.
+func (r *Request) Clone() *Request {
+	rc := *r
+	rc.Request = rc.Request.Clone(context.TODO())
+	return &rc
+}
+
+// StreamLength returns the number of bytes of the serialized stream attached
+// to the request and ok set. If the length cannot be determined, an error will
+// be returned.
+func (r *Request) StreamLength() (size int64, ok bool, err error) {
+	return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos)
+}
+
+func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) {
+	if stream == nil {
+		return 0, true, nil
+	}
+
+	if l, ok := stream.(interface{ Len() int }); ok {
+		return int64(l.Len()), true, nil
+	}
+
+	if !seekable {
+		return 0, false, nil
+	}
+
+	s := stream.(io.Seeker)
+	endOffset, err := s.Seek(0, io.SeekEnd)
+	if err != nil {
+		return 0, false, err
+	}
+
+	// The reason to seek to streamStartPos instead of 0 is to ensure that the
+	// SDK only sends the stream from the starting position the user's
+	// application provided it to the SDK at. For example application opens a
+	// file, and wants to skip the first N bytes uploading the rest. The
+	// application would move the file's offset N bytes, then hand it off to
+	// the SDK to send the remaining. The SDK should respect that initial offset.
+	_, err = s.Seek(startPos, io.SeekStart)
+	if err != nil {
+		return 0, false, err
+	}
+
+	return endOffset - startPos, true, nil
+}
+
+// RewindStream will rewind the io.Reader to the relative start position if it
+// is an io.Seeker.
+func (r *Request) RewindStream() error {
+	// If there is no stream there is nothing to rewind.
+	if r.stream == nil {
+		return nil
+	}
+
+	if !r.isStreamSeekable {
+		return fmt.Errorf("request stream is not seekable")
+	}
+	_, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart)
+	return err
+}
+
+// GetStream returns the request stream io.Reader if a stream is set. If no
+// stream is present nil will be returned.
+func (r *Request) GetStream() io.Reader {
+	return r.stream
+}
+
+// IsStreamSeekable returns whether the stream is seekable.
+func (r *Request) IsStreamSeekable() bool {
+	return r.isStreamSeekable
+}
+
+// SetStream returns a clone of the request with the stream set to the provided
+// reader. May return an error if the provided reader is seekable but returns
+// an error.
+func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) {
+	rc = r.Clone()
+
+	if reader == http.NoBody {
+		reader = nil
+	}
+
+	var isStreamSeekable bool
+	var streamStartPos int64
+	switch v := reader.(type) {
+	case io.Seeker:
+		n, err := v.Seek(0, io.SeekCurrent)
+		if err != nil {
+			return r, err
+		}
+		isStreamSeekable = true
+		streamStartPos = n
+	default:
+		// If the stream length can be determined, and is determined to be empty,
+		// use a nil stream to prevent confusion between empty vs not-empty
+		// streams.
+		length, ok, err := streamLength(reader, false, 0)
+		if err != nil {
+			return nil, err
+		} else if ok && length == 0 {
+			reader = nil
+		}
+	}
+
+	rc.stream = reader
+	rc.isStreamSeekable = isStreamSeekable
+	rc.streamStartPos = streamStartPos
+
+	return rc, err
+}
+
+// Build returns a build standard HTTP request value from the Smithy request.
+// The request's stream is wrapped in a safe container that allows it to be
+// reused for subsequent attempts.
+func (r *Request) Build(ctx context.Context) *http.Request {
+	req := r.Request.Clone(ctx)
+
+	if r.stream == nil && req.ContentLength == -1 {
+		req.ContentLength = 0
+	}
+
+	switch stream := r.stream.(type) {
+	case *io.PipeReader:
+		req.Body = ioutil.NopCloser(stream)
+		req.ContentLength = -1
+	default:
+		// HTTP Client Request must only have a non-nil body if the
+		// ContentLength is explicitly unknown (-1) or non-zero. The HTTP
+		// Client will interpret a non-nil body and ContentLength 0 as
+		// "unknown". This is unwanted behavior.
+		if req.ContentLength != 0 && r.stream != nil {
+			req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream))
+		}
+	}
+
+	return req
+}
+
+// RequestCloner is a function that can take an input request type and clone the request
+// for use in a subsequent retry attempt.
+func RequestCloner(v interface{}) interface{} {
+	return v.(*Request).Clone()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/response.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/response.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/response.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/response.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,34 @@
+package http
+
+import (
+	"fmt"
+	"net/http"
+)
+
+// Response provides the HTTP specific response structure for HTTP specific
+// middleware steps to use to deserialize the response from an operation call.
+type Response struct {
+	*http.Response
+}
+
+// ResponseError provides the HTTP centric error type wrapping the underlying
+// error with the HTTP response value.
+type ResponseError struct {
+	Response *Response
+	Err      error
+}
+
+// HTTPStatusCode returns the HTTP response status code received from the service.
+func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode }
+
+// HTTPResponse returns the HTTP response received from the service.
+func (e *ResponseError) HTTPResponse() *Response { return e.Response }
+
+// Unwrap returns the nested error if any, or nil.
+func (e *ResponseError) Unwrap() error { return e.Err }
+
+func (e *ResponseError) Error() string {
+	return fmt.Sprintf(
+		"http response error StatusCode: %d, %v",
+		e.Response.StatusCode, e.Err)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/time.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/time.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/time.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/time.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,13 @@
+package http
+
+import (
+	"time"
+
+	smithytime "github.com/aws/smithy-go/time"
+)
+
+// ParseTime parses a time string like the HTTP Date header. This uses a more
+// relaxed rule set for date parsing compared to the standard library.
+func ParseTime(text string) (t time.Time, err error) {
+	return smithytime.ParseHTTPDate(text)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/url.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/url.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/url.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/url.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,44 @@
+package http
+
+import "strings"
+
+// JoinPath returns an absolute URL path composed of the two paths provided.
+// Enforces that the returned path begins with '/'. If added path is empty the
+// returned path suffix will match the first parameter suffix.
+func JoinPath(a, b string) string {
+	if len(a) == 0 {
+		a = "/"
+	} else if a[0] != '/' {
+		a = "/" + a
+	}
+
+	if len(b) != 0 && b[0] == '/' {
+		b = b[1:]
+	}
+
+	if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' {
+		a = a + "/"
+	}
+
+	return a + b
+}
+
+// JoinRawQuery returns an absolute raw query expression. Any duplicate '&'
+// will be collapsed to single separator between values.
+func JoinRawQuery(a, b string) string {
+	a = strings.TrimFunc(a, isAmpersand)
+	b = strings.TrimFunc(b, isAmpersand)
+
+	if len(a) == 0 {
+		return b
+	}
+	if len(b) == 0 {
+		return a
+	}
+
+	return a + "&" + b
+}
+
+func isAmpersand(v rune) bool {
+	return v == '&'
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/user_agent.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/user_agent.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/transport/http/user_agent.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/transport/http/user_agent.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,37 @@
+package http
+
+import (
+	"strings"
+)
+
+// UserAgentBuilder is a builder for a HTTP User-Agent string.
+type UserAgentBuilder struct {
+	sb strings.Builder
+}
+
+// NewUserAgentBuilder returns a new UserAgentBuilder.
+func NewUserAgentBuilder() *UserAgentBuilder {
+	return &UserAgentBuilder{sb: strings.Builder{}}
+}
+
+// AddKey adds the named component/product to the agent string
+func (u *UserAgentBuilder) AddKey(key string) {
+	u.appendTo(key)
+}
+
+// AddKeyValue adds the named key to the agent string with the given value.
+func (u *UserAgentBuilder) AddKeyValue(key, value string) {
+	u.appendTo(key + "/" + value)
+}
+
+// Build returns the constructed User-Agent string. May be called multiple times.
+func (u *UserAgentBuilder) Build() string {
+	return u.sb.String()
+}
+
+func (u *UserAgentBuilder) appendTo(value string) {
+	if u.sb.Len() > 0 {
+		u.sb.WriteRune(' ')
+	}
+	u.sb.WriteString(value)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/validation.go 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/validation.go
--- 0.19.3+ds1-4/vendor/github.com/aws/smithy-go/validation.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/aws/smithy-go/validation.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,140 @@
+package smithy
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+)
+
+// An InvalidParamsError provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type InvalidParamsError struct {
+	// Context is the base context of the invalid parameter group.
+	Context string
+	errs    []InvalidParamError
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *InvalidParamsError) Add(err InvalidParamError) {
+	err.SetContext(e.Context)
+	e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another InvalidParamsError
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) {
+	for _, err := range nested.errs {
+		err.SetContext(e.Context)
+		err.AddNestedContext(nestedCtx)
+		e.errs = append(e.errs, err)
+	}
+}
+
+// Len returns the number of invalid parameter errors
+func (e *InvalidParamsError) Len() int {
+	return len(e.errs)
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e InvalidParamsError) Error() string {
+	w := &bytes.Buffer{}
+	fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs))
+
+	for _, err := range e.errs {
+		fmt.Fprintf(w, "- %s\n", err.Error())
+	}
+
+	return w.String()
+}
+
+// Errs returns a slice of the invalid parameters
+func (e InvalidParamsError) Errs() []error {
+	errs := make([]error, len(e.errs))
+	for i := 0; i < len(errs); i++ {
+		errs[i] = e.errs[i]
+	}
+
+	return errs
+}
+
+// An InvalidParamError represents an invalid parameter error type.
+type InvalidParamError interface {
+	error
+
+	// Field name the error occurred on.
+	Field() string
+
+	// SetContext updates the context of the error.
+	SetContext(string)
+
+	// AddNestedContext updates the error's context to include a nested level.
+	AddNestedContext(string)
+}
+
+type invalidParamError struct {
+	context       string
+	nestedContext string
+	field         string
+	reason        string
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e invalidParamError) Error() string {
+	return fmt.Sprintf("%s, %s.", e.reason, e.Field())
+}
+
+// Field Returns the field and context the error occurred.
+func (e invalidParamError) Field() string {
+	sb := &strings.Builder{}
+	sb.WriteString(e.context)
+	if sb.Len() > 0 {
+		if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") {
+			sb.WriteRune('.')
+		}
+	}
+	if len(e.nestedContext) > 0 {
+		sb.WriteString(e.nestedContext)
+		sb.WriteRune('.')
+	}
+	sb.WriteString(e.field)
+	return sb.String()
+}
+
+// SetContext updates the base context of the error.
+func (e *invalidParamError) SetContext(ctx string) {
+	e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *invalidParamError) AddNestedContext(ctx string) {
+	if len(e.nestedContext) == 0 {
+		e.nestedContext = ctx
+		return
+	}
+	// Check if our nested context is an index into a slice or map
+	if e.nestedContext[:1] != "[" {
+		e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+		return
+	}
+	e.nestedContext = ctx + e.nestedContext
+}
+
+// An ParamRequiredError represents an required parameter error.
+type ParamRequiredError struct {
+	invalidParamError
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ParamRequiredError {
+	return &ParamRequiredError{
+		invalidParamError{
+			field:  field,
+			reason: fmt.Sprintf("missing required field"),
+		},
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/beorn7/perks/LICENSE 0.21.3-0ubuntu1/vendor/github.com/beorn7/perks/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/beorn7/perks/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/beorn7/perks/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff -pruN 0.19.3+ds1-4/vendor/github.com/beorn7/perks/quantile/exampledata.txt 0.21.3-0ubuntu1/vendor/github.com/beorn7/perks/quantile/exampledata.txt
--- 0.19.3+ds1-4/vendor/github.com/beorn7/perks/quantile/exampledata.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/beorn7/perks/quantile/exampledata.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff -pruN 0.19.3+ds1-4/vendor/github.com/beorn7/perks/quantile/stream.go 0.21.3-0ubuntu1/vendor/github.com/beorn7/perks/quantile/stream.go
--- 0.19.3+ds1-4/vendor/github.com/beorn7/perks/quantile/stream.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/beorn7/perks/quantile/stream.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+	"math"
+	"sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+	Value float64 `json:",string"`
+	Width float64 `json:",string"`
+	Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int           { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * r
+	}
+	return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * (s.n - r)
+	}
+	return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+	// Convert map to slice to avoid slow iterations on a map.
+	// ƒ is called on the hot path, so converting the map to a slice
+	// beforehand results in significant CPU savings.
+	targets := targetMapToSlice(targetMap)
+
+	ƒ := func(s *stream, r float64) float64 {
+		var m = math.MaxFloat64
+		var f float64
+		for _, t := range targets {
+			if t.quantile*s.n <= r {
+				f = (2 * t.epsilon * r) / t.quantile
+			} else {
+				f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+			}
+			if f < m {
+				m = f
+			}
+		}
+		return m
+	}
+	return newStream(ƒ)
+}
+
+type target struct {
+	quantile float64
+	epsilon  float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+	targets := make([]target, 0, len(targetMap))
+
+	for quantile, epsilon := range targetMap {
+		t := target{
+			quantile: quantile,
+			epsilon:  epsilon,
+		}
+		targets = append(targets, t)
+	}
+
+	return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+	*stream
+	b      Samples
+	sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+	x := &stream{ƒ: ƒ}
+	return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+	s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+	s.b = append(s.b, sample)
+	s.sorted = false
+	if len(s.b) == cap(s.b) {
+		s.flush()
+	}
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+	if !s.flushed() {
+		// Fast path when there hasn't been enough data for a flush;
+		// this also yields better accuracy for small sets of data.
+		l := len(s.b)
+		if l == 0 {
+			return 0
+		}
+		i := int(math.Ceil(float64(l) * q))
+		if i > 0 {
+			i -= 1
+		}
+		s.maybeSort()
+		return s.b[i].Value
+	}
+	s.flush()
+	return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+	sort.Sort(samples)
+	s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+	s.stream.reset()
+	s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+	if !s.flushed() {
+		return s.b
+	}
+	s.flush()
+	return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+	return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+	s.maybeSort()
+	s.stream.merge(s.b)
+	s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+	if !s.sorted {
+		s.sorted = true
+		sort.Sort(s.b)
+	}
+}
+
+func (s *Stream) flushed() bool {
+	return len(s.stream.l) > 0
+}
+
+type stream struct {
+	n float64
+	l []Sample
+	ƒ invariant
+}
+
+func (s *stream) reset() {
+	s.l = s.l[:0]
+	s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+	s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+	// TODO(beorn7): This tries to merge not only individual samples, but
+	// whole summaries. The paper doesn't mention merging summaries at
+	// all. Unittests show that the merging is inaccurate. Find out how to
+	// do merges properly.
+	var r float64
+	i := 0
+	for _, sample := range samples {
+		for ; i < len(s.l); i++ {
+			c := s.l[i]
+			if c.Value > sample.Value {
+				// Insert at position i.
+				s.l = append(s.l, Sample{})
+				copy(s.l[i+1:], s.l[i:])
+				s.l[i] = Sample{
+					sample.Value,
+					sample.Width,
+					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+					// TODO(beorn7): How to calculate delta correctly?
+				}
+				i++
+				goto inserted
+			}
+			r += c.Width
+		}
+		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+		i++
+	inserted:
+		s.n += sample.Width
+		r += sample.Width
+	}
+	s.compress()
+}
+
+func (s *stream) count() int {
+	return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+	t := math.Ceil(q * s.n)
+	t += math.Ceil(s.ƒ(s, t) / 2)
+	p := s.l[0]
+	var r float64
+	for _, c := range s.l[1:] {
+		r += p.Width
+		if r+c.Width+c.Delta > t {
+			return p.Value
+		}
+		p = c
+	}
+	return p.Value
+}
+
+func (s *stream) compress() {
+	if len(s.l) < 2 {
+		return
+	}
+	x := s.l[len(s.l)-1]
+	xi := len(s.l) - 1
+	r := s.n - 1 - x.Width
+
+	for i := len(s.l) - 2; i >= 0; i-- {
+		c := s.l[i]
+		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+			x.Width += c.Width
+			s.l[xi] = x
+			// Remove element at i.
+			copy(s.l[i:], s.l[i+1:])
+			s.l = s.l[:len(s.l)-1]
+			xi -= 1
+		} else {
+			x = c
+			xi = i
+		}
+		r -= c.Width
+	}
+}
+
+func (s *stream) samples() Samples {
+	samples := make(Samples, len(s.l))
+	copy(samples, s.l)
+	return samples
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/.gitignore 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/.gitignore
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/.gitignore	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/.gitignore	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# IDEs
+.idea/
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/LICENSE 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/README.md 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/README.md
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,30 @@
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
+
+This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
+
+[Exponential backoff][exponential backoff wiki]
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+## Usage
+
+Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+
+Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+
+## Contributing
+
+* I would like to keep this library as small as possible.
+* Please don't send a PR without opening an issue and discussing it first.
+* If proposed change is not a common use case, I will probably not accept it.
+
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
+[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
+[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
+
+[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
+[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
+
+[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/backoff.go 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/backoff.go
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/backoff.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/backoff.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,66 @@
+// Package backoff implements backoff algorithms for retrying operations.
+//
+// Use Retry function for retrying operations that may fail.
+// If Retry does not meet your needs,
+// copy/paste the function into your project and modify as you wish.
+//
+// There is also Ticker type similar to time.Ticker.
+// You can use it if you need to work with channels.
+//
+// See Examples section below for usage examples.
+package backoff
+
+import "time"
+
+// BackOff is a backoff policy for retrying an operation.
+type BackOff interface {
+	// NextBackOff returns the duration to wait before retrying the operation,
+	// or backoff. Stop to indicate that no more retries should be made.
+	//
+	// Example usage:
+	//
+	// 	duration := backoff.NextBackOff();
+	// 	if (duration == backoff.Stop) {
+	// 		// Do not retry operation.
+	// 	} else {
+	// 		// Sleep for duration and retry operation.
+	// 	}
+	//
+	NextBackOff() time.Duration
+
+	// Reset to initial state.
+	Reset()
+}
+
+// Stop indicates that no more retries should be made for use in NextBackOff().
+const Stop time.Duration = -1
+
+// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting, indefinitely.
+type ZeroBackOff struct{}
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
+
+// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
+// NextBackOff(), meaning that the operation should never be retried.
+type StopBackOff struct{}
+
+func (b *StopBackOff) Reset() {}
+
+func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
+
+// ConstantBackOff is a backoff policy that always returns the same backoff delay.
+// This is in contrast to an exponential backoff policy,
+// which returns a delay that grows longer as you call NextBackOff() over and over again.
+type ConstantBackOff struct {
+	Interval time.Duration
+}
+
+func (b *ConstantBackOff) Reset()                     {}
+func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
+
+func NewConstantBackOff(d time.Duration) *ConstantBackOff {
+	return &ConstantBackOff{Interval: d}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/context.go 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/context.go
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/context.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/context.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+	"context"
+	"time"
+)
+
+// BackOffContext is a backoff policy that stops retrying after the context
+// is canceled.
+type BackOffContext interface { // nolint: golint
+	BackOff
+	Context() context.Context
+}
+
+type backOffContext struct {
+	BackOff
+	ctx context.Context
+}
+
+// WithContext returns a BackOffContext with context ctx
+//
+// ctx must not be nil
+func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
+	if ctx == nil {
+		panic("nil context")
+	}
+
+	if b, ok := b.(*backOffContext); ok {
+		return &backOffContext{
+			BackOff: b.BackOff,
+			ctx:     ctx,
+		}
+	}
+
+	return &backOffContext{
+		BackOff: b,
+		ctx:     ctx,
+	}
+}
+
+func getContext(b BackOff) context.Context {
+	if cb, ok := b.(BackOffContext); ok {
+		return cb.Context()
+	}
+	if tb, ok := b.(*backOffTries); ok {
+		return getContext(tb.delegate)
+	}
+	return context.Background()
+}
+
+func (b *backOffContext) Context() context.Context {
+	return b.ctx
+}
+
+func (b *backOffContext) NextBackOff() time.Duration {
+	select {
+	case <-b.ctx.Done():
+		return Stop
+	default:
+		return b.BackOff.NextBackOff()
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/exponential.go 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/exponential.go
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/exponential.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/exponential.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,216 @@
+package backoff
+
+import (
+	"math/rand"
+	"time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+     RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request #  RetryInterval (seconds)  Randomized Interval (seconds)
+
+  1          0.5                     [0.25,   0.75]
+  2          0.75                    [0.375,  1.125]
+  3          1.125                   [0.562,  1.687]
+  4          1.687                   [0.8435, 2.53]
+  5          2.53                    [1.265,  3.795]
+  6          3.795                   [1.897,  5.692]
+  7          5.692                   [2.846,  8.538]
+  8          8.538                   [4.269, 12.807]
+  9         12.807                   [6.403, 19.210]
+ 10         19.210                   backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+	InitialInterval     time.Duration
+	RandomizationFactor float64
+	Multiplier          float64
+	MaxInterval         time.Duration
+	// After MaxElapsedTime the ExponentialBackOff returns Stop.
+	// It never stops if MaxElapsedTime == 0.
+	MaxElapsedTime time.Duration
+	Stop           time.Duration
+	Clock          Clock
+
+	currentInterval time.Duration
+	startTime       time.Time
+}
+
+// Clock is an interface that returns current time for BackOff.
+type Clock interface {
+	Now() time.Time
+}
+
+// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
+type ExponentialBackOffOpts func(*ExponentialBackOff)
+
+// Default values for ExponentialBackOff.
+const (
+	DefaultInitialInterval     = 500 * time.Millisecond
+	DefaultRandomizationFactor = 0.5
+	DefaultMultiplier          = 1.5
+	DefaultMaxInterval         = 60 * time.Second
+	DefaultMaxElapsedTime      = 15 * time.Minute
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
+	b := &ExponentialBackOff{
+		InitialInterval:     DefaultInitialInterval,
+		RandomizationFactor: DefaultRandomizationFactor,
+		Multiplier:          DefaultMultiplier,
+		MaxInterval:         DefaultMaxInterval,
+		MaxElapsedTime:      DefaultMaxElapsedTime,
+		Stop:                Stop,
+		Clock:               SystemClock,
+	}
+	for _, fn := range opts {
+		fn(b)
+	}
+	b.Reset()
+	return b
+}
+
+// WithInitialInterval sets the initial interval between retries.
+func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
+	return func(ebo *ExponentialBackOff) {
+		ebo.InitialInterval = duration
+	}
+}
+
+// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
+func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
+	return func(ebo *ExponentialBackOff) {
+		ebo.RandomizationFactor = randomizationFactor
+	}
+}
+
+// WithMultiplier sets the multiplier for increasing the interval after each retry.
+func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
+	return func(ebo *ExponentialBackOff) {
+		ebo.Multiplier = multiplier
+	}
+}
+
+// WithMaxInterval sets the maximum interval between retries.
+func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
+	return func(ebo *ExponentialBackOff) {
+		ebo.MaxInterval = duration
+	}
+}
+
+// WithMaxElapsedTime sets the maximum total time for retries.
+func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
+	return func(ebo *ExponentialBackOff) {
+		ebo.MaxElapsedTime = duration
+	}
+}
+
+// WithRetryStopDuration sets the duration after which retries should stop.
+func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
+	return func(ebo *ExponentialBackOff) {
+		ebo.Stop = duration
+	}
+}
+
+// WithClockProvider sets the clock used to measure time.
+func WithClockProvider(clock Clock) ExponentialBackOffOpts {
+	return func(ebo *ExponentialBackOff) {
+		ebo.Clock = clock
+	}
+}
+
+type systemClock struct{}
+
+func (t systemClock) Now() time.Time {
+	return time.Now()
+}
+
+// SystemClock implements Clock interface that uses time.Now().
+var SystemClock = systemClock{}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+	b.currentInterval = b.InitialInterval
+	b.startTime = b.Clock.Now()
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+// 	Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+	// Make sure we have not gone over the maximum elapsed time.
+	elapsed := b.GetElapsedTime()
+	next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+	b.incrementCurrentInterval()
+	if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
+		return b.Stop
+	}
+	return next
+}
+
+// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
+// is created and is reset when Reset() is called.
+//
+// The elapsed time is computed using time.Now().UnixNano(). It is
+// safe to call even while the backoff policy is used by a running
+// ticker.
+func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
+	return b.Clock.Now().Sub(b.startTime)
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+	// Check for overflow, if overflow is detected set the current interval to the max interval.
+	if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+		b.currentInterval = b.MaxInterval
+	} else {
+		b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+	}
+}
+
+// Returns a random value from the following interval:
+// 	[currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+	if randomizationFactor == 0 {
+		return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+	}
+	var delta = randomizationFactor * float64(currentInterval)
+	var minInterval = float64(currentInterval) - delta
+	var maxInterval = float64(currentInterval) + delta
+
+	// Get a random value from the range [minInterval, maxInterval].
+	// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+	// we want a 33% chance for selecting either 1, 2 or 3.
+	return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/retry.go 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/retry.go
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/retry.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/retry.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,146 @@
+package backoff
+
+import (
+	"errors"
+	"time"
+)
+
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+	return func() (struct{}, error) {
+		return struct{}{}, o()
+	}
+}
+
+// Notify is a notify-on-error function. It receives an operation error and
+// backoff delay if the operation failed (with an error).
+//
+// NOTE that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error, time.Duration)
+
+// Retry the operation o until it does not return error or BackOff stops.
+// o is guaranteed to be run at least once.
+//
+// If o returns a *PermanentError, the operation is not retried, and the
+// wrapped error is returned.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b BackOff) error {
+	return RetryNotify(o, b, nil)
+}
+
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+	return RetryNotifyWithData(o, b, nil)
+}
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b BackOff, notify Notify) error {
+	return RetryNotifyWithTimer(operation, b, notify, nil)
+}
+
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+	return doRetryNotify(operation, b, notify, nil)
+}
+
+// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
+// for each failed attempt before sleep.
+// A default timer that uses system timer is used when nil is passed.
+func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
+	_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+	return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+	return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+	var (
+		err  error
+		next time.Duration
+		res  T
+	)
+	if t == nil {
+		t = &defaultTimer{}
+	}
+
+	defer func() {
+		t.Stop()
+	}()
+
+	ctx := getContext(b)
+
+	b.Reset()
+	for {
+		res, err = operation()
+		if err == nil {
+			return res, nil
+		}
+
+		var permanent *PermanentError
+		if errors.As(err, &permanent) {
+			return res, permanent.Err
+		}
+
+		if next = b.NextBackOff(); next == Stop {
+			if cerr := ctx.Err(); cerr != nil {
+				return res, cerr
+			}
+
+			return res, err
+		}
+
+		if notify != nil {
+			notify(err, next)
+		}
+
+		t.Start(next)
+
+		select {
+		case <-ctx.Done():
+			return res, ctx.Err()
+		case <-t.C():
+		}
+	}
+}
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+	Err error
+}
+
+func (e *PermanentError) Error() string {
+	return e.Err.Error()
+}
+
+func (e *PermanentError) Unwrap() error {
+	return e.Err
+}
+
+func (e *PermanentError) Is(target error) bool {
+	_, ok := target.(*PermanentError)
+	return ok
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+	if err == nil {
+		return nil
+	}
+	return &PermanentError{
+		Err: err,
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/ticker.go 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/ticker.go
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/ticker.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/ticker.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,97 @@
+package backoff
+
+import (
+	"context"
+	"sync"
+	"time"
+)
+
+// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
+//
+// Ticks will continue to arrive when the previous operation is still running,
+// so operations that take a while to fail could run in quick succession.
+type Ticker struct {
+	C        <-chan time.Time
+	c        chan time.Time
+	b        BackOff
+	ctx      context.Context
+	timer    Timer
+	stop     chan struct{}
+	stopOnce sync.Once
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the time at times specified by the BackOff argument. Ticker is
+// guaranteed to tick at least once.  The channel is closed when Stop
+// method is called or BackOff stops. It is not safe to manipulate the
+// provided backoff policy (notably calling NextBackOff or Reset)
+// while the ticker is running.
+func NewTicker(b BackOff) *Ticker {
+	return NewTickerWithTimer(b, &defaultTimer{})
+}
+
+// NewTickerWithTimer returns a new Ticker with a custom timer.
+// A default timer that uses system timer is used when nil is passed.
+func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
+	if timer == nil {
+		timer = &defaultTimer{}
+	}
+	c := make(chan time.Time)
+	t := &Ticker{
+		C:     c,
+		c:     c,
+		b:     b,
+		ctx:   getContext(b),
+		timer: timer,
+		stop:  make(chan struct{}),
+	}
+	t.b.Reset()
+	go t.run()
+	return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+func (t *Ticker) Stop() {
+	t.stopOnce.Do(func() { close(t.stop) })
+}
+
+func (t *Ticker) run() {
+	c := t.c
+	defer close(c)
+
+	// Ticker is guaranteed to tick at least once.
+	afterC := t.send(time.Now())
+
+	for {
+		if afterC == nil {
+			return
+		}
+
+		select {
+		case tick := <-afterC:
+			afterC = t.send(tick)
+		case <-t.stop:
+			t.c = nil // Prevent future ticks from being sent to the channel.
+			return
+		case <-t.ctx.Done():
+			return
+		}
+	}
+}
+
+func (t *Ticker) send(tick time.Time) <-chan time.Time {
+	select {
+	case t.c <- tick:
+	case <-t.stop:
+		return nil
+	}
+
+	next := t.b.NextBackOff()
+	if next == Stop {
+		t.Stop()
+		return nil
+	}
+
+	t.timer.Start(next)
+	return t.timer.C()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/timer.go 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/timer.go
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/timer.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/timer.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,35 @@
+package backoff
+
+import "time"
+
+type Timer interface {
+	Start(duration time.Duration)
+	Stop()
+	C() <-chan time.Time
+}
+
+// defaultTimer implements Timer interface using time.Timer
+type defaultTimer struct {
+	timer *time.Timer
+}
+
+// C returns the timers channel which receives the current time when the timer fires.
+func (t *defaultTimer) C() <-chan time.Time {
+	return t.timer.C
+}
+
+// Start starts the timer to fire after the given duration
+func (t *defaultTimer) Start(duration time.Duration) {
+	if t.timer == nil {
+		t.timer = time.NewTimer(duration)
+	} else {
+		t.timer.Reset(duration)
+	}
+}
+
+// Stop is called when the timer is not used anymore and resources may be freed.
+func (t *defaultTimer) Stop() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/tries.go 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/tries.go
--- 0.19.3+ds1-4/vendor/github.com/cenkalti/backoff/v4/tries.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cenkalti/backoff/v4/tries.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,38 @@
+package backoff
+
+import "time"
+
+/*
+WithMaxRetries creates a wrapper around another BackOff, which will
+return Stop if NextBackOff() has been called too many times since
+the last time Reset() was called
+
+Note: Implementation is not thread-safe.
+*/
+func WithMaxRetries(b BackOff, max uint64) BackOff {
+	return &backOffTries{delegate: b, maxTries: max}
+}
+
+type backOffTries struct {
+	delegate BackOff
+	maxTries uint64
+	numTries uint64
+}
+
+func (b *backOffTries) NextBackOff() time.Duration {
+	if b.maxTries == 0 {
+		return Stop
+	}
+	if b.maxTries > 0 {
+		if b.maxTries <= b.numTries {
+			return Stop
+		}
+		b.numTries++
+	}
+	return b.delegate.NextBackOff()
+}
+
+func (b *backOffTries) Reset() {
+	b.numTries = 0
+	b.delegate.Reset()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/LICENSE.txt 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/LICENSE.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/LICENSE.txt	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/README.md 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/README.md
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,74 @@
+# xxhash
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
+[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
+
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+    func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego    | asm       |
+| ---------- | --------- | --------- |
+| 4 B        |  1.3 GB/s |  1.2 GB/s |
+| 16 B       |  2.9 GB/s |  3.5 GB/s |
+| 100 B      |  6.9 GB/s |  8.1 GB/s |
+| 4 KB       | 11.7 GB/s | 16.7 GB/s |
+| 10 MB      | 12.0 GB/s | 17.3 GB/s |
+
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
+
+```
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
+- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/testall.sh 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/testall.sh
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/testall.sh	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/testall.sh	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash.go 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash.go
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,243 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array for the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
+
+// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
+type Digest struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total uint64
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new Digest with a zero seed.
+func New() *Digest {
+	return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
+	var d Digest
+	d.ResetWithSeed(seed)
+	return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
+func (d *Digest) Reset() {
+	d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+	d.v1 = seed + prime1 + prime2
+	d.v2 = seed + prime2
+	d.v3 = seed
+	d.v4 = seed - prime1
+	d.total = 0
+	d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+	n = len(b)
+	d.total += uint64(n)
+
+	memleft := d.mem[d.n&(len(d.mem)-1):]
+
+	if d.n+n < 32 {
+		// This new data doesn't even fill the current block.
+		copy(memleft, b)
+		d.n += n
+		return
+	}
+
+	if d.n > 0 {
+		// Finish off the partial block.
+		c := copy(memleft, b)
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
+		b = b[c:]
+		d.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		nw := writeBlocks(d, b)
+		b = b[nw:]
+	}
+
+	// Store any remaining partial block.
+	copy(d.mem[:], b)
+	d.n = len(b)
+
+	return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+	s := d.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+	var h uint64
+
+	if d.total >= 32 {
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = d.v3 + prime5
+	}
+
+	h += d.total
+
+	b := d.mem[:d.n&(len(d.mem)-1)]
+	for ; len(b) >= 8; b = b[8:] {
+		k1 := round(0, u64(b[:8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if len(b) >= 4 {
+		h ^= uint64(u32(b[:4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		b = b[4:]
+	}
+	for ; len(b) > 0; b = b[1:] {
+		h ^= uint64(b[0]) * prime5
+		h = rol11(h) * prime1
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+const (
+	magic         = "xxh\x06"
+	marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 0, marshaledSize)
+	b = append(b, magic...)
+	b = appendUint64(b, d.v1)
+	b = appendUint64(b, d.v2)
+	b = appendUint64(b, d.v3)
+	b = appendUint64(b, d.v4)
+	b = appendUint64(b, d.total)
+	b = append(b, d.mem[:d.n]...)
+	b = b[:len(b)+len(d.mem)-d.n]
+	return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+		return errors.New("xxhash: invalid hash state identifier")
+	}
+	if len(b) != marshaledSize {
+		return errors.New("xxhash: invalid hash state size")
+	}
+	b = b[len(magic):]
+	b, d.v1 = consumeUint64(b)
+	b, d.v2 = consumeUint64(b)
+	b, d.v3 = consumeUint64(b)
+	b, d.v4 = consumeUint64(b)
+	b, d.total = consumeUint64(b)
+	copy(d.mem[:], b)
+	d.n = int(d.total % uint64(len(d.mem)))
+	return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+	var a [8]byte
+	binary.LittleEndian.PutUint64(a[:], x)
+	return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+	x := u64(b)
+	return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,209 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define h      AX
+#define d      AX
+#define p      SI // pointer to advance through b
+#define n      DX
+#define end    BX // loop end
+#define v1     R8
+#define v2     R9
+#define v3     R10
+#define v4     R11
+#define x      R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+	IMULQ prime2, x   \
+	ADDQ  x, acc      \
+	ROLQ  $31, acc    \
+	IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+	IMULQ prime2, x \
+	ROLQ  $31, x    \
+	IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+	round0(x)         \
+	XORQ  x, acc      \
+	IMULQ prime1, acc \
+	ADDQ  prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop:  \
+	MOVQ +0(p), x  \
+	round(v1, x)   \
+	MOVQ +8(p), x  \
+	round(v2, x)   \
+	MOVQ +16(p), x \
+	round(v3, x)   \
+	MOVQ +24(p), x \
+	round(v4, x)   \
+	ADDQ $32, p    \
+	CMPQ p, end    \
+	JLE  loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+	// Load fixed primes.
+	MOVQ ·primes+0(SB), prime1
+	MOVQ ·primes+8(SB), prime2
+	MOVQ ·primes+24(SB), prime4
+
+	// Load slice.
+	MOVQ b_base+0(FP), p
+	MOVQ b_len+8(FP), n
+	LEAQ (p)(n*1), end
+
+	// The first loop limit will be len(b)-32.
+	SUBQ $32, end
+
+	// Check whether we have at least one block.
+	CMPQ n, $32
+	JLT  noBlocks
+
+	// Set up initial state (v1, v2, v3, v4).
+	MOVQ prime1, v1
+	ADDQ prime2, v1
+	MOVQ prime2, v2
+	XORQ v3, v3
+	XORQ v4, v4
+	SUBQ prime1, v4
+
+	blockLoop()
+
+	MOVQ v1, h
+	ROLQ $1, h
+	MOVQ v2, x
+	ROLQ $7, x
+	ADDQ x, h
+	MOVQ v3, x
+	ROLQ $12, x
+	ADDQ x, h
+	MOVQ v4, x
+	ROLQ $18, x
+	ADDQ x, h
+
+	mergeRound(h, v1)
+	mergeRound(h, v2)
+	mergeRound(h, v3)
+	mergeRound(h, v4)
+
+	JMP afterBlocks
+
+noBlocks:
+	MOVQ ·primes+32(SB), h
+
+afterBlocks:
+	ADDQ n, h
+
+	ADDQ $24, end
+	CMPQ p, end
+	JG   try4
+
+loop8:
+	MOVQ  (p), x
+	ADDQ  $8, p
+	round0(x)
+	XORQ  x, h
+	ROLQ  $27, h
+	IMULQ prime1, h
+	ADDQ  prime4, h
+
+	CMPQ p, end
+	JLE  loop8
+
+try4:
+	ADDQ $4, end
+	CMPQ p, end
+	JG   try1
+
+	MOVL  (p), x
+	ADDQ  $4, p
+	IMULQ prime1, x
+	XORQ  x, h
+
+	ROLQ  $23, h
+	IMULQ prime2, h
+	ADDQ  ·primes+16(SB), h
+
+try1:
+	ADDQ $4, end
+	CMPQ p, end
+	JGE  finalize
+
+loop1:
+	MOVBQZX (p), x
+	ADDQ    $1, p
+	IMULQ   ·primes+32(SB), x
+	XORQ    x, h
+	ROLQ    $11, h
+	IMULQ   prime1, h
+
+	CMPQ p, end
+	JL   loop1
+
+finalize:
+	MOVQ  h, x
+	SHRQ  $33, x
+	XORQ  x, h
+	IMULQ prime2, h
+	MOVQ  h, x
+	SHRQ  $29, x
+	XORQ  x, h
+	IMULQ ·primes+16(SB), h
+	MOVQ  h, x
+	SHRQ  $32, x
+	XORQ  x, h
+
+	MOVQ h, ret+24(FP)
+	RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+	// Load fixed primes needed for round.
+	MOVQ ·primes+0(SB), prime1
+	MOVQ ·primes+8(SB), prime2
+
+	// Load slice.
+	MOVQ b_base+8(FP), p
+	MOVQ b_len+16(FP), n
+	LEAQ (p)(n*1), end
+	SUBQ $32, end
+
+	// Load vN from d.
+	MOVQ s+0(FP), d
+	MOVQ 0(d), v1
+	MOVQ 8(d), v2
+	MOVQ 16(d), v3
+	MOVQ 24(d), v4
+
+	// We don't need to check the loop condition here; this function is
+	// always called with at least one block of data to process.
+	blockLoop()
+
+	// Copy vN back to d.
+	MOVQ v1, 0(d)
+	MOVQ v2, 8(d)
+	MOVQ v3, 16(d)
+	MOVQ v4, 24(d)
+
+	// The number of bytes written is p minus the old base pointer.
+	SUBQ b_base+8(FP), p
+	MOVQ p, ret+32(FP)
+
+	RET
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest	R1
+#define h	R2 // return value
+#define p	R3 // input pointer
+#define n	R4 // input length
+#define nblocks	R5 // n / 32
+#define prime1	R7
+#define prime2	R8
+#define prime3	R9
+#define prime4	R10
+#define prime5	R11
+#define v1	R12
+#define v2	R13
+#define v3	R14
+#define v4	R15
+#define x1	R20
+#define x2	R21
+#define x3	R22
+#define x4	R23
+
+#define round(acc, x) \
+	MADD prime2, acc, x, acc \
+	ROR  $64-31, acc         \
+	MUL  prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+	MUL prime2, x \
+	ROR $64-31, x \
+	MUL prime1, x
+
+#define mergeRound(acc, x) \
+	round0(x)                     \
+	EOR  x, acc                   \
+	MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+	LSR     $5, n, nblocks  \
+	PCALIGN $16             \
+	loop:                   \
+	LDP.P   16(p), (x1, x2) \
+	LDP.P   16(p), (x3, x4) \
+	round(v1, x1)           \
+	round(v2, x2)           \
+	round(v3, x3)           \
+	round(v4, x4)           \
+	SUB     $1, nblocks     \
+	CBNZ    nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+	LDP b_base+0(FP), (p, n)
+
+	LDP  ·primes+0(SB), (prime1, prime2)
+	LDP  ·primes+16(SB), (prime3, prime4)
+	MOVD ·primes+32(SB), prime5
+
+	CMP  $32, n
+	CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+	BLT  afterLoop
+
+	ADD  prime1, prime2, v1
+	MOVD prime2, v2
+	MOVD $0, v3
+	NEG  prime1, v4
+
+	blockLoop()
+
+	ROR $64-1, v1, x1
+	ROR $64-7, v2, x2
+	ADD x1, x2
+	ROR $64-12, v3, x3
+	ROR $64-18, v4, x4
+	ADD x3, x4
+	ADD x2, x4, h
+
+	mergeRound(h, v1)
+	mergeRound(h, v2)
+	mergeRound(h, v3)
+	mergeRound(h, v4)
+
+afterLoop:
+	ADD n, h
+
+	TBZ   $4, n, try8
+	LDP.P 16(p), (x1, x2)
+
+	round0(x1)
+
+	// NOTE: here and below, sequencing the EOR after the ROR (using a
+	// rotated register) is worth a small but measurable speedup for small
+	// inputs.
+	ROR  $64-27, h
+	EOR  x1 @> 64-27, h, h
+	MADD h, prime4, prime1, h
+
+	round0(x2)
+	ROR  $64-27, h
+	EOR  x2 @> 64-27, h, h
+	MADD h, prime4, prime1, h
+
+try8:
+	TBZ    $3, n, try4
+	MOVD.P 8(p), x1
+
+	round0(x1)
+	ROR  $64-27, h
+	EOR  x1 @> 64-27, h, h
+	MADD h, prime4, prime1, h
+
+try4:
+	TBZ     $2, n, try2
+	MOVWU.P 4(p), x2
+
+	MUL  prime1, x2
+	ROR  $64-23, h
+	EOR  x2 @> 64-23, h, h
+	MADD h, prime3, prime2, h
+
+try2:
+	TBZ     $1, n, try1
+	MOVHU.P 2(p), x3
+	AND     $255, x3, x1
+	LSR     $8, x3, x2
+
+	MUL prime5, x1
+	ROR $64-11, h
+	EOR x1 @> 64-11, h, h
+	MUL prime1, h
+
+	MUL prime5, x2
+	ROR $64-11, h
+	EOR x2 @> 64-11, h, h
+	MUL prime1, h
+
+try1:
+	TBZ   $0, n, finalize
+	MOVBU (p), x4
+
+	MUL prime5, x4
+	ROR $64-11, h
+	EOR x4 @> 64-11, h, h
+	MUL prime1, h
+
+finalize:
+	EOR h >> 33, h
+	MUL prime2, h
+	EOR h >> 29, h
+	MUL prime3, h
+	EOR h >> 32, h
+
+	MOVD h, ret+24(FP)
+	RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+	LDP ·primes+0(SB), (prime1, prime2)
+
+	// Load state. Assume v[1-4] are stored contiguously.
+	MOVD d+0(FP), digest
+	LDP  0(digest), (v1, v2)
+	LDP  16(digest), (v3, v4)
+
+	LDP b_base+8(FP), (p, n)
+
+	blockLoop()
+
+	// Store updated state.
+	STP (v1, v2), 0(digest)
+	STP (v3, v4), 16(digest)
+
+	BIC  $31, n
+	MOVD n, ret+32(FP)
+	RET
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,15 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_other.go 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_other.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_other.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,76 @@
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
+func Sum64(b []byte) uint64 {
+	// A simpler version would be
+	//   d := New()
+	//   d.Write(b)
+	//   return d.Sum64()
+	// but this is faster, particularly for small inputs.
+
+	n := len(b)
+	var h uint64
+
+	if n >= 32 {
+		v1 := primes[0] + prime2
+		v2 := prime2
+		v3 := uint64(0)
+		v4 := -primes[0]
+		for len(b) >= 32 {
+			v1 = round(v1, u64(b[0:8:len(b)]))
+			v2 = round(v2, u64(b[8:16:len(b)]))
+			v3 = round(v3, u64(b[16:24:len(b)]))
+			v4 = round(v4, u64(b[24:32:len(b)]))
+			b = b[32:len(b):len(b)]
+		}
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = prime5
+	}
+
+	h += uint64(n)
+
+	for ; len(b) >= 8; b = b[8:] {
+		k1 := round(0, u64(b[:8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if len(b) >= 4 {
+		h ^= uint64(u32(b[:4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		b = b[4:]
+	}
+	for ; len(b) > 0; b = b[1:] {
+		h ^= uint64(b[0]) * prime5
+		h = rol11(h) * prime1
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+	n := len(b)
+	for len(b) >= 32 {
+		v1 = round(v1, u64(b[0:8:len(b)]))
+		v2 = round(v2, u64(b[8:16:len(b)]))
+		v3 = round(v3, u64(b[16:24:len(b)]))
+		v4 = round(v4, u64(b[24:32:len(b)]))
+		b = b[32:len(b):len(b)]
+	}
+	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+	return n - len(b)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,16 @@
+//go:build appengine
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
+func Sum64String(s string) uint64 {
+	return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	return d.Write([]byte(s))
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
--- 0.19.3+ds1-4/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,58 @@
+//go:build !appengine
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+	"unsafe"
+)
+
+// In the future it's possible that compiler optimizations will make these
+// XxxString functions unnecessary by realizing that calls such as
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
+// If that happens, even if we keep these functions they can be replaced with
+// the trivial safe code.
+
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
+//
+//   var b []byte
+//   bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+//   bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+//   bh.Len = len(s)
+//   bh.Cap = len(s)
+//
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
+// weight to this sequence of expressions that any function that uses it will
+// not be inlined. Instead, the functions below use a different unsafe
+// conversion designed to minimize the inliner weight and allow both to be
+// inlined. There is also a test (TestInlining) which verifies that these are
+// inlined.
+//
+// See https://github.com/golang/go/issues/42739 for discussion.
+
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+	b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
+	return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
+	// d.Write always returns len(s), nil.
+	// Ignoring the return output and returning these fixed values buys a
+	// savings of 6 in the inliner's cost model.
+	return len(s), nil
+}
+
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
+// of the first two words is the same as the layout of a string.
+type sliceHeader struct {
+	s   string
+	cap int
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/LICENSE 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        https://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2013-2017 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/NOTICE 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/NOTICE
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/NOTICE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/NOTICE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2 @@
+The Compose Specification
+Copyright 2020 The Compose Specification Authors
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/cli/options.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/cli/options.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/cli/options.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/cli/options.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,571 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cli
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/sirupsen/logrus"
+
+	"github.com/compose-spec/compose-go/v2/consts"
+	"github.com/compose-spec/compose-go/v2/dotenv"
+	"github.com/compose-spec/compose-go/v2/errdefs"
+	"github.com/compose-spec/compose-go/v2/loader"
+	"github.com/compose-spec/compose-go/v2/types"
+	"github.com/compose-spec/compose-go/v2/utils"
+)
+
+// ProjectOptions provides common configuration for loading a project.
+type ProjectOptions struct {
+	// Name is a valid Compose project name to be used or empty.
+	//
+	// If empty, the project loader will automatically infer a reasonable
+	// project name if possible.
+	Name string
+
+	// WorkingDir is a file path to use as the project directory or empty.
+	//
+	// If empty, the project loader will automatically infer a reasonable
+	// working directory if possible.
+	WorkingDir string
+
+	// ConfigPaths are file paths to one or more Compose files.
+	//
+	// These are applied in order by the loader following the override logic
+	// as described in the spec.
+	//
+	// The first entry is required and is the primary Compose file.
+	// For convenience, WithConfigFileEnv and WithDefaultConfigPath
+	// are provided to populate this in a predictable manner.
+	ConfigPaths []string
+
+	// Environment are additional environment variables to make available
+	// for interpolation.
+	//
+	// NOTE: For security, the loader does not automatically expose any
+	// process environment variables. For convenience, WithOsEnv can be
+	// used if appropriate.
+	Environment types.Mapping
+
+	// EnvFiles are file paths to ".env" files with additional environment
+	// variable data.
+	//
+	// These are loaded in-order, so it is possible to override variables or
+	// in subsequent files.
+	//
+	// This field is optional, but any file paths that are included here must
+	// exist or an error will be returned during load.
+	EnvFiles []string
+
+	loadOptions []func(*loader.Options)
+
+	// Callbacks to retrieve metadata information during parse defined before
+	// creating the project
+	Listeners []loader.Listener
+}
+
+type ProjectOptionsFn func(*ProjectOptions) error
+
+// NewProjectOptions creates ProjectOptions
+func NewProjectOptions(configs []string, opts ...ProjectOptionsFn) (*ProjectOptions, error) {
+	options := &ProjectOptions{
+		ConfigPaths: configs,
+		Environment: map[string]string{},
+		Listeners:   []loader.Listener{},
+	}
+	for _, o := range opts {
+		err := o(options)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return options, nil
+}
+
+// WithName defines ProjectOptions' name
+func WithName(name string) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		// a project (once loaded) cannot have an empty name
+		// however, on the options object, the name is optional: if unset,
+		// a name will be inferred by the loader, so it's legal to set the
+		// name to an empty string here
+		if name != loader.NormalizeProjectName(name) {
+			return loader.InvalidProjectNameErr(name)
+		}
+		o.Name = name
+		return nil
+	}
+}
+
+// WithWorkingDirectory defines ProjectOptions' working directory
+func WithWorkingDirectory(wd string) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		if wd == "" {
+			return nil
+		}
+		abs, err := filepath.Abs(wd)
+		if err != nil {
+			return err
+		}
+		o.WorkingDir = abs
+		return nil
+	}
+}
+
+// WithConfigFileEnv allow to set compose config file paths by COMPOSE_FILE environment variable
+func WithConfigFileEnv(o *ProjectOptions) error {
+	if len(o.ConfigPaths) > 0 {
+		return nil
+	}
+	sep := o.Environment[consts.ComposePathSeparator]
+	if sep == "" {
+		sep = string(os.PathListSeparator)
+	}
+	f, ok := o.Environment[consts.ComposeFilePath]
+	if ok {
+		paths, err := absolutePaths(strings.Split(f, sep))
+		o.ConfigPaths = paths
+		return err
+	}
+	return nil
+}
+
+// WithDefaultConfigPath searches for default config files from working directory
+func WithDefaultConfigPath(o *ProjectOptions) error {
+	if len(o.ConfigPaths) > 0 {
+		return nil
+	}
+	pwd, err := o.GetWorkingDir()
+	if err != nil {
+		return err
+	}
+	for {
+		candidates := findFiles(DefaultFileNames, pwd)
+		if len(candidates) > 0 {
+			winner := candidates[0]
+			if len(candidates) > 1 {
+				logrus.Warnf("Found multiple config files with supported names: %s", strings.Join(candidates, ", "))
+				logrus.Warnf("Using %s", winner)
+			}
+			o.ConfigPaths = append(o.ConfigPaths, winner)
+
+			overrides := findFiles(DefaultOverrideFileNames, pwd)
+			if len(overrides) > 0 {
+				if len(overrides) > 1 {
+					logrus.Warnf("Found multiple override files with supported names: %s", strings.Join(overrides, ", "))
+					logrus.Warnf("Using %s", overrides[0])
+				}
+				o.ConfigPaths = append(o.ConfigPaths, overrides[0])
+			}
+			return nil
+		}
+		parent := filepath.Dir(pwd)
+		if parent == pwd {
+			// no config file found, but that's not a blocker if caller only needs project name
+			return nil
+		}
+		pwd = parent
+	}
+}
+
+// WithEnv defines a key=value set of variables used for compose file interpolation
+func WithEnv(env []string) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		for k, v := range utils.GetAsEqualsMap(env) {
+			o.Environment[k] = v
+		}
+		return nil
+	}
+}
+
+// WithDiscardEnvFile sets discards the `env_file` section after resolving to
+// the `environment` section
+func WithDiscardEnvFile(o *ProjectOptions) error {
+	o.loadOptions = append(o.loadOptions, loader.WithDiscardEnvFiles)
+	return nil
+}
+
+// WithLoadOptions provides a hook to control how compose files are loaded
+func WithLoadOptions(loadOptions ...func(*loader.Options)) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		o.loadOptions = append(o.loadOptions, loadOptions...)
+		return nil
+	}
+}
+
+// WithDefaultProfiles uses the provided profiles (if any), and falls back to
+// profiles specified via the COMPOSE_PROFILES environment variable otherwise.
+func WithDefaultProfiles(profiles ...string) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		if len(profiles) == 0 {
+			for _, s := range strings.Split(o.Environment[consts.ComposeProfiles], ",") {
+				profiles = append(profiles, strings.TrimSpace(s))
+			}
+		}
+		o.loadOptions = append(o.loadOptions, loader.WithProfiles(profiles))
+		return nil
+	}
+}
+
+// WithProfiles sets profiles to be activated
+func WithProfiles(profiles []string) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		o.loadOptions = append(o.loadOptions, loader.WithProfiles(profiles))
+		return nil
+	}
+}
+
+// WithOsEnv imports environment variables from OS
+func WithOsEnv(o *ProjectOptions) error {
+	for k, v := range utils.GetAsEqualsMap(os.Environ()) {
+		if _, set := o.Environment[k]; set {
+			continue
+		}
+		o.Environment[k] = v
+	}
+	return nil
+}
+
+// WithEnvFile sets an alternate env file.
+//
+// Deprecated: use WithEnvFiles instead.
+func WithEnvFile(file string) ProjectOptionsFn {
+	var files []string
+	if file != "" {
+		files = []string{file}
+	}
+	return WithEnvFiles(files...)
+}
+
+// WithEnvFiles set env file(s) to be loaded to set project environment.
+// defaults to local .env file if no explicit file is selected, until COMPOSE_DISABLE_ENV_FILE is set
+func WithEnvFiles(file ...string) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		if len(file) > 0 {
+			o.EnvFiles = file
+			return nil
+		}
+		if v, ok := os.LookupEnv(consts.ComposeDisableDefaultEnvFile); ok {
+			b, err := strconv.ParseBool(v)
+			if err != nil {
+				return err
+			}
+			if b {
+				return nil
+			}
+		}
+
+		wd, err := o.GetWorkingDir()
+		if err != nil {
+			return err
+		}
+		defaultDotEnv := filepath.Join(wd, ".env")
+
+		s, err := os.Stat(defaultDotEnv)
+		if os.IsNotExist(err) {
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+		if !s.IsDir() {
+			o.EnvFiles = []string{defaultDotEnv}
+		}
+		return nil
+	}
+}
+
+// WithDotEnv imports environment variables from .env file
+func WithDotEnv(o *ProjectOptions) error {
+	envMap, err := dotenv.GetEnvFromFile(o.Environment, o.EnvFiles)
+	if err != nil {
+		return err
+	}
+	o.Environment.Merge(envMap)
+	return nil
+}
+
+// WithInterpolation set ProjectOptions to enable/skip interpolation
+func WithInterpolation(interpolation bool) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
+			options.SkipInterpolation = !interpolation
+		})
+		return nil
+	}
+}
+
+// WithNormalization set ProjectOptions to enable/skip normalization
+func WithNormalization(normalization bool) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
+			options.SkipNormalization = !normalization
+		})
+		return nil
+	}
+}
+
+// WithConsistency set ProjectOptions to enable/skip consistency
+func WithConsistency(consistency bool) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
+			options.SkipConsistencyCheck = !consistency
+		})
+		return nil
+	}
+}
+
+// WithResolvedPaths set ProjectOptions to enable paths resolution
+func WithResolvedPaths(resolve bool) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
+			options.ResolvePaths = resolve
+		})
+		return nil
+	}
+}
+
+// WithResourceLoader register support for ResourceLoader to manage remote resources
+func WithResourceLoader(r loader.ResourceLoader) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
+			options.ResourceLoaders = append(options.ResourceLoaders, r)
+		})
+		return nil
+	}
+}
+
+// WithExtension register a know extension `x-*` with the go struct type to decode into
+func WithExtension(name string, typ any) ProjectOptionsFn {
+	return func(o *ProjectOptions) error {
+		o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
+			if options.KnownExtensions == nil {
+				options.KnownExtensions = map[string]any{}
+			}
+			options.KnownExtensions[name] = typ
+		})
+		return nil
+	}
+}
+
+// Append listener to event
+func (o *ProjectOptions) WithListeners(listeners ...loader.Listener) {
+	o.Listeners = append(o.Listeners, listeners...)
+}
+
+// WithoutEnvironmentResolution disable environment resolution
+func WithoutEnvironmentResolution(o *ProjectOptions) error {
+	o.loadOptions = append(o.loadOptions, func(options *loader.Options) {
+		options.SkipResolveEnvironment = true
+	})
+	return nil
+}
+
+// DefaultFileNames defines the Compose file names for auto-discovery (in order of preference)
+var DefaultFileNames = []string{"compose.yaml", "compose.yml", "docker-compose.yml", "docker-compose.yaml"}
+
+// DefaultOverrideFileNames defines the Compose override file names for auto-discovery (in order of preference)
+var DefaultOverrideFileNames = []string{"compose.override.yml", "compose.override.yaml", "docker-compose.override.yml", "docker-compose.override.yaml"}
+
+func (o *ProjectOptions) GetWorkingDir() (string, error) {
+	if o.WorkingDir != "" {
+		return filepath.Abs(o.WorkingDir)
+	}
+	for _, path := range o.ConfigPaths {
+		if path != "-" {
+			absPath, err := filepath.Abs(path)
+			if err != nil {
+				return "", err
+			}
+			return filepath.Dir(absPath), nil
+		}
+	}
+	return os.Getwd()
+}
+
+// ReadConfigFiles reads ConfigFiles and populates the content field
+func (o *ProjectOptions) ReadConfigFiles(ctx context.Context, workingDir string, options *ProjectOptions) (*types.ConfigDetails, error) {
+	config, err := loader.LoadConfigFiles(ctx, options.ConfigPaths, workingDir, options.loadOptions...)
+	if err != nil {
+		return nil, err
+	}
+	configs := make([][]byte, len(config.ConfigFiles))
+
+	for i, c := range config.ConfigFiles {
+		var err error
+		var b []byte
+		if c.IsStdin() {
+			b, err = io.ReadAll(os.Stdin)
+			if err != nil {
+				return nil, err
+			}
+		} else {
+			f, err := filepath.Abs(c.Filename)
+			if err != nil {
+				return nil, err
+			}
+			b, err = os.ReadFile(f)
+			if err != nil {
+				return nil, err
+			}
+		}
+		configs[i] = b
+	}
+	for i, c := range configs {
+		config.ConfigFiles[i].Content = c
+	}
+	return config, nil
+}
+
+// LoadProject loads compose file according to options and bind to types.Project go structs
+func (o *ProjectOptions) LoadProject(ctx context.Context) (*types.Project, error) {
+	config, err := o.prepare(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	project, err := loader.LoadWithContext(ctx, types.ConfigDetails{
+		ConfigFiles: config.ConfigFiles,
+		WorkingDir:  config.WorkingDir,
+		Environment: o.Environment,
+	}, o.loadOptions...)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, config := range config.ConfigFiles {
+		project.ComposeFiles = append(project.ComposeFiles, config.Filename)
+	}
+
+	return project, nil
+}
+
+// LoadModel loads compose file according to options and returns a raw (yaml tree) model
+func (o *ProjectOptions) LoadModel(ctx context.Context) (map[string]any, error) {
+	configDetails, err := o.prepare(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	return loader.LoadModelWithContext(ctx, *configDetails, o.loadOptions...)
+}
+
+// prepare converts ProjectOptions into loader's types.ConfigDetails and configures default load options
+func (o *ProjectOptions) prepare(ctx context.Context) (*types.ConfigDetails, error) {
+	defaultDir, err := o.GetWorkingDir()
+	if err != nil {
+		return &types.ConfigDetails{}, err
+	}
+
+	configDetails, err := o.ReadConfigFiles(ctx, defaultDir, o)
+	if err != nil {
+		return configDetails, err
+	}
+
+	o.loadOptions = append(o.loadOptions,
+		withNamePrecedenceLoad(defaultDir, o),
+		withConvertWindowsPaths(o),
+		withListeners(o))
+
+	return configDetails, nil
+}
+
+// ProjectFromOptions load a compose project based on command line options
+// Deprecated: use ProjectOptions.LoadProject or ProjectOptions.LoadModel
+func ProjectFromOptions(ctx context.Context, options *ProjectOptions) (*types.Project, error) {
+	return options.LoadProject(ctx)
+}
+
+func withNamePrecedenceLoad(absWorkingDir string, options *ProjectOptions) func(*loader.Options) {
+	return func(opts *loader.Options) {
+		if options.Name != "" {
+			opts.SetProjectName(options.Name, true)
+		} else if nameFromEnv, ok := options.Environment[consts.ComposeProjectName]; ok && nameFromEnv != "" {
+			opts.SetProjectName(nameFromEnv, true)
+		} else {
+			dirname := filepath.Base(absWorkingDir)
+			symlink, err := filepath.EvalSymlinks(absWorkingDir)
+			if err == nil && filepath.Base(symlink) != dirname {
+				logrus.Warnf("project has been loaded without an explicit name from a symlink. Using name %q", dirname)
+			}
+			opts.SetProjectName(
+				loader.NormalizeProjectName(dirname),
+				false,
+			)
+		}
+	}
+}
+
+func withConvertWindowsPaths(options *ProjectOptions) func(*loader.Options) {
+	return func(o *loader.Options) {
+		if o.ResolvePaths {
+			o.ConvertWindowsPaths = utils.StringToBool(options.Environment["COMPOSE_CONVERT_WINDOWS_PATHS"])
+		}
+	}
+}
+
+// save listeners from ProjectOptions (compose) to loader.Options
+func withListeners(options *ProjectOptions) func(*loader.Options) {
+	return func(opts *loader.Options) {
+		opts.Listeners = append(opts.Listeners, options.Listeners...)
+	}
+}
+
+// getConfigPaths retrieves the config files for project based on project options
+func (o *ProjectOptions) getConfigPaths() ([]string, error) {
+	if len(o.ConfigPaths) != 0 {
+		return absolutePaths(o.ConfigPaths)
+	}
+	return nil, fmt.Errorf("no configuration file provided: %w", errdefs.ErrNotFound)
+}
+
+func findFiles(names []string, pwd string) []string {
+	candidates := []string{}
+	for _, n := range names {
+		f := filepath.Join(pwd, n)
+		if _, err := os.Stat(f); err == nil {
+			candidates = append(candidates, f)
+		}
+	}
+	return candidates
+}
+
+func absolutePaths(p []string) ([]string, error) {
+	var paths []string
+	for _, f := range p {
+		if f == "-" {
+			paths = append(paths, f)
+			continue
+		}
+		abs, err := filepath.Abs(f)
+		if err != nil {
+			return nil, err
+		}
+		f = abs
+		if _, err := os.Stat(f); err != nil {
+			return nil, err
+		}
+		paths = append(paths, f)
+	}
+	return paths, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,29 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package consts
+
+const (
+	ComposeProjectName           = "COMPOSE_PROJECT_NAME"
+	ComposePathSeparator         = "COMPOSE_PATH_SEPARATOR"
+	ComposeFilePath              = "COMPOSE_FILE"
+	ComposeDisableDefaultEnvFile = "COMPOSE_DISABLE_ENV_FILE"
+	ComposeProfiles              = "COMPOSE_PROFILES"
+)
+
+const Extensions = "#extensions" // Using # prefix, we prevent risk to conflict with an actual yaml key
+
+type ComposeFileKey struct{}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,22 @@
+Copyright (c) 2013 John Barton
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,76 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package dotenv
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+func GetEnvFromFile(currentEnv map[string]string, filenames []string) (map[string]string, error) {
+	envMap := make(map[string]string)
+
+	for _, dotEnvFile := range filenames {
+		abs, err := filepath.Abs(dotEnvFile)
+		if err != nil {
+			return envMap, err
+		}
+		dotEnvFile = abs
+
+		s, err := os.Stat(dotEnvFile)
+		if os.IsNotExist(err) {
+			return envMap, fmt.Errorf("Couldn't find env file: %s", dotEnvFile)
+		}
+		if err != nil {
+			return envMap, err
+		}
+
+		if s.IsDir() {
+			if len(filenames) == 0 {
+				return envMap, nil
+			}
+			return envMap, fmt.Errorf("%s is a directory", dotEnvFile)
+		}
+
+		b, err := os.ReadFile(dotEnvFile)
+		if os.IsNotExist(err) {
+			return nil, fmt.Errorf("Couldn't read env file: %s", dotEnvFile)
+		}
+		if err != nil {
+			return envMap, err
+		}
+
+		env, err := ParseWithLookup(bytes.NewReader(b), func(k string) (string, bool) {
+			v, ok := currentEnv[k]
+			if ok {
+				return v, true
+			}
+			v, ok = envMap[k]
+			return v, ok
+		})
+		if err != nil {
+			return envMap, fmt.Errorf("failed to read %s: %w", dotEnvFile, err)
+		}
+		for k, v := range env {
+			envMap[k] = v
+		}
+	}
+
+	return envMap, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/format.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/format.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/format.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/format.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,38 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package dotenv
+
+import (
+	"fmt"
+	"io"
+)
+
+var formats = map[string]Parser{}
+
+type Parser func(r io.Reader, filename string, lookup func(key string) (string, bool)) (map[string]string, error)
+
+func RegisterFormat(format string, p Parser) {
+	formats[format] = p
+}
+
+func ParseWithFormat(r io.Reader, filename string, resolve LookupFn, format string) (map[string]string, error) {
+	parser, ok := formats[format]
+	if !ok {
+		return nil, fmt.Errorf("unsupported env_file format %q", format)
+	}
+	return parser(r, filename, resolve)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,175 @@
+// Package dotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
+//
+// Examples/readme can be found on the github page at https://github.com/joho/godotenv
+//
+// The TL;DR is that you make a .env file that looks something like
+//
+//	SOME_ENV_VAR=somevalue
+//
+// and then in your go code you can call
+//
+//	godotenv.Load()
+//
+// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR")
+package dotenv
+
+import (
+	"bytes"
+	"io"
+	"os"
+	"regexp"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/template"
+)
+
+var utf8BOM = []byte("\uFEFF")
+
+var startsWithDigitRegex = regexp.MustCompile(`^\s*\d.*`) // Keys starting with numbers are ignored
+
+// LookupFn represents a lookup function to resolve variables from
+type LookupFn func(string) (string, bool)
+
+var noLookupFn = func(s string) (string, bool) {
+	return "", false
+}
+
+// Parse reads an env file from io.Reader, returning a map of keys and values.
+func Parse(r io.Reader) (map[string]string, error) {
+	return ParseWithLookup(r, nil)
+}
+
+// ParseWithLookup reads an env file from io.Reader, returning a map of keys and values.
+func ParseWithLookup(r io.Reader, lookupFn LookupFn) (map[string]string, error) {
+	data, err := io.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+
+	// seek past the UTF-8 BOM if it exists (particularly on Windows, some
+	// editors tend to add it, and it'll cause parsing to fail)
+	data = bytes.TrimPrefix(data, utf8BOM)
+
+	return UnmarshalBytesWithLookup(data, lookupFn)
+}
+
+// Load will read your env file(s) and load them into ENV for this process.
+//
+// Call this function as close as possible to the start of your program (ideally in main).
+//
+// If you call Load without any args it will default to loading .env in the current path.
+//
+// You can otherwise tell it which files to load (there can be more than one) like:
+//
+//	godotenv.Load("fileone", "filetwo")
+//
+// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults
+func Load(filenames ...string) error {
+	return load(false, filenames...)
+}
+
+func load(overload bool, filenames ...string) error {
+	filenames = filenamesOrDefault(filenames)
+	for _, filename := range filenames {
+		err := loadFile(filename, overload)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// ReadWithLookup gets all env vars from the files and/or lookup function and return values as
+// a map rather than automatically writing values into env
+func ReadWithLookup(lookupFn LookupFn, filenames ...string) (map[string]string, error) {
+	filenames = filenamesOrDefault(filenames)
+	envMap := make(map[string]string)
+
+	for _, filename := range filenames {
+		individualEnvMap, individualErr := ReadFile(filename, lookupFn)
+
+		if individualErr != nil {
+			return envMap, individualErr
+		}
+
+		for key, value := range individualEnvMap {
+			if startsWithDigitRegex.MatchString(key) {
+				continue
+			}
+			envMap[key] = value
+		}
+	}
+
+	return envMap, nil
+}
+
+// Read all env (with same file loading semantics as Load) but return values as
+// a map rather than automatically writing values into env
+func Read(filenames ...string) (map[string]string, error) {
+	return ReadWithLookup(nil, filenames...)
+}
+
+// UnmarshalBytesWithLookup parses env file from byte slice of chars, returning a map of keys and values.
+func UnmarshalBytesWithLookup(src []byte, lookupFn LookupFn) (map[string]string, error) {
+	return UnmarshalWithLookup(string(src), lookupFn)
+}
+
+// UnmarshalWithLookup parses env file from string, returning a map of keys and values.
+func UnmarshalWithLookup(src string, lookupFn LookupFn) (map[string]string, error) {
+	out := make(map[string]string)
+	err := newParser().parse(src, out, lookupFn)
+	return out, err
+}
+
+func filenamesOrDefault(filenames []string) []string {
+	if len(filenames) == 0 {
+		return []string{".env"}
+	}
+	return filenames
+}
+
+func loadFile(filename string, overload bool) error {
+	envMap, err := ReadFile(filename, nil)
+	if err != nil {
+		return err
+	}
+
+	currentEnv := map[string]bool{}
+	rawEnv := os.Environ()
+	for _, rawEnvLine := range rawEnv {
+		key := strings.Split(rawEnvLine, "=")[0]
+		currentEnv[key] = true
+	}
+
+	for key, value := range envMap {
+		if !currentEnv[key] || overload {
+			_ = os.Setenv(key, value)
+		}
+	}
+
+	return nil
+}
+
+func ReadFile(filename string, lookupFn LookupFn) (map[string]string, error) {
+	file, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	return ParseWithLookup(file, lookupFn)
+}
+
+func expandVariables(value string, envMap map[string]string, lookupFn LookupFn) (string, error) {
+	retVal, err := template.Substitute(value, func(k string) (string, bool) {
+		if v, ok := lookupFn(k); ok {
+			return v, true
+		}
+		v, ok := envMap[k]
+		return v, ok
+	})
+	if err != nil {
+		return value, err
+	}
+	return retVal, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,286 @@
+package dotenv
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+const (
+	charComment       = '#'
+	prefixSingleQuote = '\''
+	prefixDoubleQuote = '"'
+)
+
+var (
+	escapeSeqRegex = regexp.MustCompile(`(\\(?:[abcfnrtv$"\\]|0\d{0,3}))`)
+	exportRegex    = regexp.MustCompile(`^export\s+`)
+)
+
+type parser struct {
+	line int
+}
+
+func newParser() *parser {
+	return &parser{
+		line: 1,
+	}
+}
+
+func (p *parser) parse(src string, out map[string]string, lookupFn LookupFn) error {
+	cutset := src
+	if lookupFn == nil {
+		lookupFn = noLookupFn
+	}
+	for {
+		cutset = p.getStatementStart(cutset)
+		if cutset == "" {
+			// reached end of file
+			break
+		}
+
+		key, left, inherited, err := p.locateKeyName(cutset)
+		if err != nil {
+			return err
+		}
+		if strings.Contains(key, " ") {
+			return fmt.Errorf("line %d: key cannot contain a space", p.line)
+		}
+
+		if inherited {
+			value, ok := lookupFn(key)
+			if ok {
+				out[key] = value
+			}
+			cutset = left
+			continue
+		}
+
+		value, left, err := p.extractVarValue(left, out, lookupFn)
+		if err != nil {
+			return err
+		}
+
+		out[key] = value
+		cutset = left
+	}
+
+	return nil
+}
+
+// getStatementPosition returns position of statement begin.
+//
+// It skips any comment line or non-whitespace character.
+func (p *parser) getStatementStart(src string) string {
+	pos := p.indexOfNonSpaceChar(src)
+	if pos == -1 {
+		return ""
+	}
+
+	src = src[pos:]
+	if src[0] != charComment {
+		return src
+	}
+
+	// skip comment section
+	pos = strings.IndexFunc(src, isCharFunc('\n'))
+	if pos == -1 {
+		return ""
+	}
+	return p.getStatementStart(src[pos:])
+}
+
+// locateKeyName locates and parses key name and returns rest of slice
+func (p *parser) locateKeyName(src string) (string, string, bool, error) {
+	var key string
+	var inherited bool
+	// trim "export" and space at beginning
+	if exportRegex.MatchString(src) {
+		// we use a `strings.trim` to preserve the pointer to the same underlying memory.
+		// a regexp replace would copy the string.
+		src = strings.TrimLeftFunc(strings.TrimPrefix(src, "export"), isSpace)
+	}
+
+	// locate key name end and validate it in single loop
+	offset := 0
+loop:
+	for i, rune := range src {
+		if isSpace(rune) {
+			continue
+		}
+
+		switch rune {
+		case '=', ':', '\n':
+			// library also supports yaml-style value declaration
+			key = string(src[0:i])
+			offset = i + 1
+			inherited = rune == '\n'
+			break loop
+		case '_', '.', '-', '[', ']':
+		default:
+			// variable name should match [A-Za-z0-9_.-]
+			if unicode.IsLetter(rune) || unicode.IsNumber(rune) {
+				continue
+			}
+
+			return "", "", inherited, fmt.Errorf(
+				`line %d: unexpected character %q in variable name %q`,
+				p.line, string(rune), strings.Split(src, "\n")[0])
+		}
+	}
+
+	if src == "" {
+		return "", "", inherited, errors.New("zero length string")
+	}
+
+	if inherited && strings.IndexByte(key, ' ') == -1 {
+		p.line++
+	}
+
+	// trim whitespace
+	key = strings.TrimRightFunc(key, unicode.IsSpace)
+	cutset := strings.TrimLeftFunc(src[offset:], isSpace)
+	return key, cutset, inherited, nil
+}
+
+// extractVarValue extracts variable value and returns rest of slice
+func (p *parser) extractVarValue(src string, envMap map[string]string, lookupFn LookupFn) (string, string, error) {
+	quote, isQuoted := hasQuotePrefix(src)
+	if !isQuoted {
+		// unquoted value - read until new line
+		value, rest, _ := strings.Cut(src, "\n")
+		p.line++
+
+		// Remove inline comments on unquoted lines
+		value, _, _ = strings.Cut(value, " #")
+		value = strings.TrimRightFunc(value, unicode.IsSpace)
+		retVal, err := expandVariables(string(value), envMap, lookupFn)
+		return retVal, rest, err
+	}
+
+	previousCharIsEscape := false
+	// lookup quoted string terminator
+	var chars []byte
+	for i := 1; i < len(src); i++ {
+		char := src[i]
+		if char == '\n' {
+			p.line++
+		}
+		if char != quote {
+			if !previousCharIsEscape && char == '\\' {
+				previousCharIsEscape = true
+				continue
+			}
+			if previousCharIsEscape {
+				previousCharIsEscape = false
+				chars = append(chars, '\\')
+			}
+			chars = append(chars, char)
+			continue
+		}
+
+		// skip escaped quote symbol (\" or \', depends on quote)
+		if previousCharIsEscape {
+			previousCharIsEscape = false
+			chars = append(chars, char)
+			continue
+		}
+
+		// trim quotes
+		value := string(chars)
+		if quote == prefixDoubleQuote {
+			// expand standard shell escape sequences & then interpolate
+			// variables on the result
+			retVal, err := expandVariables(expandEscapes(value), envMap, lookupFn)
+			if err != nil {
+				return "", "", err
+			}
+			value = retVal
+		}
+
+		return value, src[i+1:], nil
+	}
+
+	// return formatted error if quoted string is not terminated
+	valEndIndex := strings.IndexFunc(src, isCharFunc('\n'))
+	if valEndIndex == -1 {
+		valEndIndex = len(src)
+	}
+
+	return "", "", fmt.Errorf("line %d: unterminated quoted value %s", p.line, src[:valEndIndex])
+}
+
+func expandEscapes(str string) string {
+	out := escapeSeqRegex.ReplaceAllStringFunc(str, func(match string) string {
+		if match == `\$` {
+			// `\$` is not a Go escape sequence, the expansion parser uses
+			// the special `$$` syntax
+			// both `FOO=\$bar` and `FOO=$$bar` are valid in an env file and
+			// will result in FOO w/ literal value of "$bar" (no interpolation)
+			return "$$"
+		}
+
+		if strings.HasPrefix(match, `\0`) {
+			// octal escape sequences in Go are not prefixed with `\0`, so
+			// rewrite the prefix, e.g. `\0123` -> `\123` -> literal value "S"
+			match = strings.Replace(match, `\0`, `\`, 1)
+		}
+
+		// use Go to unquote (unescape) the literal
+		// see https://go.dev/ref/spec#Rune_literals
+		//
+		// NOTE: Go supports ADDITIONAL escapes like `\x` & `\u` & `\U`!
+		// These are NOT supported, which is why we use a regex to find
+		// only matches we support and then use `UnquoteChar` instead of a
+		// `Unquote` on the entire value
+		v, _, _, err := strconv.UnquoteChar(match, '"')
+		if err != nil {
+			return match
+		}
+		return string(v)
+	})
+	return out
+}
+
+func (p *parser) indexOfNonSpaceChar(src string) int {
+	return strings.IndexFunc(src, func(r rune) bool {
+		if r == '\n' {
+			p.line++
+		}
+		return !unicode.IsSpace(r)
+	})
+}
+
+// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character
+func hasQuotePrefix(src string) (byte, bool) {
+	if src == "" {
+		return 0, false
+	}
+
+	switch quote := src[0]; quote {
+	case prefixDoubleQuote, prefixSingleQuote:
+		return quote, true // isQuoted
+	default:
+		return 0, false
+	}
+}
+
+func isCharFunc(char rune) func(rune) bool {
+	return func(v rune) bool {
+		return v == char
+	}
+}
+
+// isSpace reports whether the rune is a space character but not line break character
+//
+// this differs from unicode.IsSpace, which also applies line break as space
+func isSpace(r rune) bool {
+	switch r {
+	case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0:
+		return true
+	}
+	return false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,56 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package errdefs
+
+import "errors"
+
+var (
+	// ErrNotFound is returned when an object is not found
+	ErrNotFound = errors.New("not found")
+
+	// ErrInvalid is returned when a compose project is invalid
+	ErrInvalid = errors.New("invalid compose project")
+
+	// ErrUnsupported is returned when a compose project uses an unsupported attribute
+	ErrUnsupported = errors.New("unsupported attribute")
+
+	// ErrIncompatible is returned when a compose project uses an incompatible attribute
+	ErrIncompatible = errors.New("incompatible attribute")
+
+	// ErrDisabled is returned when a resource was found in model but is disabled
+	ErrDisabled = errors.New("disabled")
+)
+
+// IsNotFoundError returns true if the unwrapped error is ErrNotFound
+func IsNotFoundError(err error) bool {
+	return errors.Is(err, ErrNotFound)
+}
+
+// IsInvalidError returns true if the unwrapped error is ErrInvalid
+func IsInvalidError(err error) bool {
+	return errors.Is(err, ErrInvalid)
+}
+
+// IsUnsupportedError returns true if the unwrapped error is ErrUnsupported
+func IsUnsupportedError(err error) bool {
+	return errors.Is(err, ErrUnsupported)
+}
+
+// IsUnsupportedError returns true if the unwrapped error is ErrIncompatible
+func IsIncompatibleError(err error) bool {
+	return errors.Is(err, ErrIncompatible)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/format/volume.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/format/volume.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/format/volume.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/format/volume.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,184 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package format
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+const endOfSpec = rune(0)
+
+// ParseVolume parses a volume spec without any knowledge of the target platform
+func ParseVolume(spec string) (types.ServiceVolumeConfig, error) {
+	volume := types.ServiceVolumeConfig{}
+
+	switch len(spec) {
+	case 0:
+		return volume, errors.New("invalid empty volume spec")
+	case 1, 2:
+		volume.Target = spec
+		volume.Type = types.VolumeTypeVolume
+		return volume, nil
+	}
+
+	var buffer []rune
+	for _, char := range spec + string(endOfSpec) {
+		switch {
+		case isWindowsDrive(buffer, char):
+			buffer = append(buffer, char)
+		case char == ':' || char == endOfSpec:
+			if err := populateFieldFromBuffer(char, buffer, &volume); err != nil {
+				populateType(&volume)
+				return volume, fmt.Errorf("invalid spec: %s: %w", spec, err)
+			}
+			buffer = nil
+		default:
+			buffer = append(buffer, char)
+		}
+	}
+
+	populateType(&volume)
+	return volume, nil
+}
+
+func isWindowsDrive(buffer []rune, char rune) bool {
+	return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0])
+}
+
+func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error {
+	strBuffer := string(buffer)
+	switch {
+	case len(buffer) == 0:
+		return errors.New("empty section between colons")
+	// Anonymous volume
+	case volume.Source == "" && char == endOfSpec:
+		volume.Target = strBuffer
+		return nil
+	case volume.Source == "":
+		volume.Source = strBuffer
+		return nil
+	case volume.Target == "":
+		volume.Target = strBuffer
+		return nil
+	case char == ':':
+		return errors.New("too many colons")
+	}
+	for _, option := range strings.Split(strBuffer, ",") {
+		switch option {
+		case "ro":
+			volume.ReadOnly = true
+		case "rw":
+			volume.ReadOnly = false
+		case "nocopy":
+			volume.Volume = &types.ServiceVolumeVolume{NoCopy: true}
+		default:
+			if isBindOption(option) {
+				setBindOption(volume, option)
+			}
+			// ignore unknown options FIXME why not report an error here?
+		}
+	}
+	return nil
+}
+
+var Propagations = []string{
+	types.PropagationRPrivate,
+	types.PropagationPrivate,
+	types.PropagationRShared,
+	types.PropagationShared,
+	types.PropagationRSlave,
+	types.PropagationSlave,
+}
+
+type setBindOptionFunc func(bind *types.ServiceVolumeBind, option string)
+
+var bindOptions = map[string]setBindOptionFunc{
+	types.PropagationRPrivate: setBindPropagation,
+	types.PropagationPrivate:  setBindPropagation,
+	types.PropagationRShared:  setBindPropagation,
+	types.PropagationShared:   setBindPropagation,
+	types.PropagationRSlave:   setBindPropagation,
+	types.PropagationSlave:    setBindPropagation,
+	types.SELinuxShared:       setBindSELinux,
+	types.SELinuxPrivate:      setBindSELinux,
+}
+
+func setBindPropagation(bind *types.ServiceVolumeBind, option string) {
+	bind.Propagation = option
+}
+
+func setBindSELinux(bind *types.ServiceVolumeBind, option string) {
+	bind.SELinux = option
+}
+
+func isBindOption(option string) bool {
+	_, ok := bindOptions[option]
+
+	return ok
+}
+
+func setBindOption(volume *types.ServiceVolumeConfig, option string) {
+	if volume.Bind == nil {
+		volume.Bind = &types.ServiceVolumeBind{}
+	}
+
+	bindOptions[option](volume.Bind, option)
+}
+
+func populateType(volume *types.ServiceVolumeConfig) {
+	if isFilePath(volume.Source) {
+		volume.Type = types.VolumeTypeBind
+		if volume.Bind == nil {
+			volume.Bind = &types.ServiceVolumeBind{}
+		}
+		// For backward compatibility with docker-compose legacy, using short notation involves
+		// bind will create missing host path
+		volume.Bind.CreateHostPath = true
+	} else {
+		volume.Type = types.VolumeTypeVolume
+		if volume.Volume == nil {
+			volume.Volume = &types.ServiceVolumeVolume{}
+		}
+	}
+}
+
+func isFilePath(source string) bool {
+	if source == "" {
+		return false
+	}
+	switch source[0] {
+	case '.', '/', '~':
+		return true
+	}
+
+	// windows named pipes
+	if strings.HasPrefix(source, `\\`) {
+		return true
+	}
+
+	first, nextIndex := utf8.DecodeRuneInString(source)
+	if len(source) <= nextIndex {
+		return false
+	}
+	return isWindowsDrive([]rune{first}, rune(source[nextIndex]))
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/graph/cycle.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/graph/cycle.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/graph/cycle.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/graph/cycle.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,63 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package graph
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/types"
+	"github.com/compose-spec/compose-go/v2/utils"
+	"golang.org/x/exp/slices"
+)
+
+// CheckCycle analyze project's depends_on relation and report an error on cycle detection
+func CheckCycle(project *types.Project) error {
+	g, err := newGraph(project)
+	if err != nil {
+		return err
+	}
+	return g.checkCycle()
+}
+
+func (g *graph[T]) checkCycle() error {
+	// iterate on vertices in a name-order to render a predicable error message
+	// this is required by tests and enforce command reproducibility by user, which otherwise could be confusing
+	names := utils.MapKeys(g.vertices)
+	for _, name := range names {
+		err := searchCycle([]string{name}, g.vertices[name])
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func searchCycle[T any](path []string, v *vertex[T]) error {
+	names := utils.MapKeys(v.children)
+	for _, name := range names {
+		if i := slices.Index(path, name); i >= 0 {
+			return fmt.Errorf("dependency cycle detected: %s -> %s", strings.Join(path[i:], " -> "), name)
+		}
+		ch := v.children[name]
+		err := searchCycle(append(path, name), ch)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,75 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package graph
+
+// graph represents project as service dependencies
+type graph[T any] struct {
+	vertices map[string]*vertex[T]
+}
+
+// vertex represents a service in the dependencies structure
+type vertex[T any] struct {
+	key      string
+	service  *T
+	children map[string]*vertex[T]
+	parents  map[string]*vertex[T]
+}
+
+func (g *graph[T]) addVertex(name string, service T) {
+	g.vertices[name] = &vertex[T]{
+		key:      name,
+		service:  &service,
+		parents:  map[string]*vertex[T]{},
+		children: map[string]*vertex[T]{},
+	}
+}
+
+func (g *graph[T]) addEdge(src, dest string) {
+	g.vertices[src].children[dest] = g.vertices[dest]
+	g.vertices[dest].parents[src] = g.vertices[src]
+}
+
+func (g *graph[T]) roots() []*vertex[T] {
+	var res []*vertex[T]
+	for _, v := range g.vertices {
+		if len(v.parents) == 0 {
+			res = append(res, v)
+		}
+	}
+	return res
+}
+
+func (g *graph[T]) leaves() []*vertex[T] {
+	var res []*vertex[T]
+	for _, v := range g.vertices {
+		if len(v.children) == 0 {
+			res = append(res, v)
+		}
+	}
+
+	return res
+}
+
+// descendents return all descendents for a vertex, might contain duplicates
+func (v *vertex[T]) descendents() []string {
+	var vx []string
+	for _, n := range v.children {
+		vx = append(vx, n.key)
+		vx = append(vx, n.descendents()...)
+	}
+	return vx
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/graph/services.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/graph/services.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/graph/services.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/graph/services.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,80 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package graph
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+// InDependencyOrder walk the service graph an invoke VisitorFn in respect to dependency order
+func InDependencyOrder(ctx context.Context, project *types.Project, fn VisitorFn[types.ServiceConfig], options ...func(*Options)) error {
+	_, err := CollectInDependencyOrder[any](ctx, project, func(ctx context.Context, s string, config types.ServiceConfig) (any, error) {
+		return nil, fn(ctx, s, config)
+	}, options...)
+	return err
+}
+
+// CollectInDependencyOrder walk the service graph an invoke CollectorFn in respect to dependency order, then return result for each call
+func CollectInDependencyOrder[T any](ctx context.Context, project *types.Project, fn CollectorFn[types.ServiceConfig, T], options ...func(*Options)) (map[string]T, error) {
+	graph, err := newGraph(project)
+	if err != nil {
+		return nil, err
+	}
+	t := newTraversal(fn)
+	for _, option := range options {
+		option(t.Options)
+	}
+	err = walk(ctx, graph, t)
+	return t.results, err
+}
+
+// newGraph creates a service graph from project
+func newGraph(project *types.Project) (*graph[types.ServiceConfig], error) {
+	g := &graph[types.ServiceConfig]{
+		vertices: map[string]*vertex[types.ServiceConfig]{},
+	}
+
+	for name, s := range project.Services {
+		g.addVertex(name, s)
+	}
+
+	for name, s := range project.Services {
+		src := g.vertices[name]
+		for dep, condition := range s.DependsOn {
+			dest, ok := g.vertices[dep]
+			if !ok {
+				if condition.Required {
+					if ds, exists := project.DisabledServices[dep]; exists {
+						return nil, fmt.Errorf("service %q is required by %q but is disabled. Can be enabled by profiles %s", dep, name, ds.Profiles)
+					}
+					return nil, fmt.Errorf("service %q depends on unknown service %q", name, dep)
+				}
+				delete(s.DependsOn, name)
+				project.Services[name] = s
+				continue
+			}
+			src.children[dep] = dest
+			dest.parents[name] = src
+		}
+	}
+
+	err := g.checkCycle()
+	return g, err
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,211 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package graph
+
+import (
+	"context"
+	"sync"
+
+	"golang.org/x/exp/slices"
+	"golang.org/x/sync/errgroup"
+)
+
+// CollectorFn executes on each graph vertex based on visit order and return associated value
+type CollectorFn[S any, T any] func(context.Context, string, S) (T, error)
+
+// VisitorFn executes on each graph nodes based on visit order
+type VisitorFn[S any] func(context.Context, string, S) error
+
+type traversal[S any, T any] struct {
+	*Options
+	visitor CollectorFn[S, T]
+
+	mu      sync.Mutex
+	status  map[string]int
+	results map[string]T
+}
+
+type Options struct {
+	// inverse reverse the traversal direction
+	inverse bool
+	// maxConcurrency limit the concurrent execution of visitorFn while walking the graph
+	maxConcurrency int
+	// after marks a set of node as starting points walking the graph
+	after []string
+}
+
+const (
+	vertexEntered = iota
+	vertexVisited
+)
+
+func newTraversal[S, T any](fn CollectorFn[S, T]) *traversal[S, T] {
+	return &traversal[S, T]{
+		Options: &Options{},
+		status:  map[string]int{},
+		results: map[string]T{},
+		visitor: fn,
+	}
+}
+
+// WithMaxConcurrency configure traversal to limit concurrency walking graph nodes
+func WithMaxConcurrency(max int) func(*Options) {
+	return func(o *Options) {
+		o.maxConcurrency = max
+	}
+}
+
+// InReverseOrder configure traversal to walk the graph in reverse dependency order
+func InReverseOrder(o *Options) {
+	o.inverse = true
+}
+
+// WithRootNodesAndDown creates a graphTraversal to start from selected nodes
+func WithRootNodesAndDown(nodes []string) func(*Options) {
+	return func(o *Options) {
+		o.after = nodes
+	}
+}
+
+func walk[S, T any](ctx context.Context, g *graph[S], t *traversal[S, T]) error {
+	expect := len(g.vertices)
+	if expect == 0 {
+		return nil
+	}
+	// nodeCh need to allow n=expect writers while reader goroutine could have returned after ctx.Done
+	nodeCh := make(chan *vertex[S], expect)
+	defer close(nodeCh)
+
+	eg, ctx := errgroup.WithContext(ctx)
+	if t.maxConcurrency > 0 {
+		eg.SetLimit(t.maxConcurrency + 1)
+	}
+
+	eg.Go(func() error {
+		for {
+			select {
+			case <-ctx.Done():
+				return nil
+			case node := <-nodeCh:
+				expect--
+				if expect == 0 {
+					return nil
+				}
+
+				for _, adj := range t.adjacentNodes(node) {
+					t.visit(ctx, eg, adj, nodeCh)
+				}
+			}
+		}
+	})
+
+	// select nodes to start walking the graph based on traversal.direction
+	for _, node := range t.extremityNodes(g) {
+		t.visit(ctx, eg, node, nodeCh)
+	}
+
+	return eg.Wait()
+}
+
+func (t *traversal[S, T]) visit(ctx context.Context, eg *errgroup.Group, node *vertex[S], nodeCh chan *vertex[S]) {
+	if !t.ready(node) {
+		// don't visit this service yet as dependencies haven't been visited
+		return
+	}
+	if !t.enter(node) {
+		// another worker already acquired this node
+		return
+	}
+	eg.Go(func() error {
+		var (
+			err    error
+			result T
+		)
+		if !t.skip(node) {
+			result, err = t.visitor(ctx, node.key, *node.service)
+		}
+		t.done(node, result)
+		nodeCh <- node
+		return err
+	})
+}
+
+func (t *traversal[S, T]) extremityNodes(g *graph[S]) []*vertex[S] {
+	if t.inverse {
+		return g.roots()
+	}
+	return g.leaves()
+}
+
+func (t *traversal[S, T]) adjacentNodes(v *vertex[S]) map[string]*vertex[S] {
+	if t.inverse {
+		return v.children
+	}
+	return v.parents
+}
+
+func (t *traversal[S, T]) ready(v *vertex[S]) bool {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	depends := v.children
+	if t.inverse {
+		depends = v.parents
+	}
+	for name := range depends {
+		if t.status[name] != vertexVisited {
+			return false
+		}
+	}
+	return true
+}
+
+func (t *traversal[S, T]) enter(v *vertex[S]) bool {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+
+	if _, ok := t.status[v.key]; ok {
+		return false
+	}
+	t.status[v.key] = vertexEntered
+	return true
+}
+
+func (t *traversal[S, T]) done(v *vertex[S], result T) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.status[v.key] = vertexVisited
+	t.results[v.key] = result
+}
+
+func (t *traversal[S, T]) skip(node *vertex[S]) bool {
+	if len(t.after) == 0 {
+		return false
+	}
+	if slices.Contains(t.after, node.key) {
+		return false
+	}
+
+	// is none of our starting node is a descendent, skip visit
+	ancestors := node.descendents()
+	for _, name := range t.after {
+		if slices.Contains(ancestors, name) {
+			return false
+		}
+	}
+	return true
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,137 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package interpolation
+
+import (
+	"errors"
+	"fmt"
+	"os"
+
+	"github.com/compose-spec/compose-go/v2/template"
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+// Options supported by Interpolate
+type Options struct {
+	// LookupValue from a key
+	LookupValue LookupValue
+	// TypeCastMapping maps key paths to functions to cast to a type
+	TypeCastMapping map[tree.Path]Cast
+	// Substitution function to use
+	Substitute func(string, template.Mapping) (string, error)
+}
+
+// LookupValue is a function which maps from variable names to values.
+// Returns the value as a string and a bool indicating whether
+// the value is present, to distinguish between an empty string
+// and the absence of a value.
+type LookupValue func(key string) (string, bool)
+
+// Cast a value to a new type, or return an error if the value can't be cast
+type Cast func(value string) (interface{}, error)
+
+// Interpolate replaces variables in a string with the values from a mapping
+func Interpolate(config map[string]interface{}, opts Options) (map[string]interface{}, error) {
+	if opts.LookupValue == nil {
+		opts.LookupValue = os.LookupEnv
+	}
+	if opts.TypeCastMapping == nil {
+		opts.TypeCastMapping = make(map[tree.Path]Cast)
+	}
+	if opts.Substitute == nil {
+		opts.Substitute = template.Substitute
+	}
+
+	out := map[string]interface{}{}
+
+	for key, value := range config {
+		interpolatedValue, err := recursiveInterpolate(value, tree.NewPath(key), opts)
+		if err != nil {
+			return out, err
+		}
+		out[key] = interpolatedValue
+	}
+
+	return out, nil
+}
+
+func recursiveInterpolate(value interface{}, path tree.Path, opts Options) (interface{}, error) {
+	switch value := value.(type) {
+	case string:
+		newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue))
+		if err != nil {
+			return value, newPathError(path, err)
+		}
+		caster, ok := opts.getCasterForPath(path)
+		if !ok {
+			return newValue, nil
+		}
+		casted, err := caster(newValue)
+		if err != nil {
+			return casted, newPathError(path, fmt.Errorf("failed to cast to expected type: %w", err))
+		}
+		return casted, nil
+
+	case map[string]interface{}:
+		out := map[string]interface{}{}
+		for key, elem := range value {
+			interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts)
+			if err != nil {
+				return nil, err
+			}
+			out[key] = interpolatedElem
+		}
+		return out, nil
+
+	case []interface{}:
+		out := make([]interface{}, len(value))
+		for i, elem := range value {
+			interpolatedElem, err := recursiveInterpolate(elem, path.Next(tree.PathMatchList), opts)
+			if err != nil {
+				return nil, err
+			}
+			out[i] = interpolatedElem
+		}
+		return out, nil
+
+	default:
+		return value, nil
+	}
+}
+
+func newPathError(path tree.Path, err error) error {
+	var ite *template.InvalidTemplateError
+	switch {
+	case err == nil:
+		return nil
+	case errors.As(err, &ite):
+		return fmt.Errorf(
+			"invalid interpolation format for %s.\nYou may need to escape any $ with another $.\n%s",
+			path, ite.Template)
+	default:
+		return fmt.Errorf("error while interpolating %s: %w", path, err)
+	}
+}
+
+func (o Options) getCasterForPath(path tree.Path) (Cast, bool) {
+	for pattern, caster := range o.TypeCastMapping {
+		if path.Matches(pattern) {
+			return caster, true
+		}
+	}
+	return nil, false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/environment.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/environment.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/environment.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/environment.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,110 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+// ResolveEnvironment update the environment variables for the format {- VAR} (without interpolation)
+func ResolveEnvironment(dict map[string]any, environment types.Mapping) {
+	resolveServicesEnvironment(dict, environment)
+	resolveSecretsEnvironment(dict, environment)
+	resolveConfigsEnvironment(dict, environment)
+}
+
+func resolveServicesEnvironment(dict map[string]any, environment types.Mapping) {
+	services, ok := dict["services"].(map[string]any)
+	if !ok {
+		return
+	}
+
+	for service, cfg := range services {
+		serviceConfig, ok := cfg.(map[string]any)
+		if !ok {
+			continue
+		}
+		serviceEnv, ok := serviceConfig["environment"].([]any)
+		if !ok {
+			continue
+		}
+		envs := []any{}
+		for _, env := range serviceEnv {
+			varEnv, ok := env.(string)
+			if !ok {
+				continue
+			}
+			if found, ok := environment[varEnv]; ok {
+				envs = append(envs, fmt.Sprintf("%s=%s", varEnv, found))
+			} else {
+				// either does not exist or it was already resolved in interpolation
+				envs = append(envs, varEnv)
+			}
+		}
+		serviceConfig["environment"] = envs
+		services[service] = serviceConfig
+	}
+	dict["services"] = services
+}
+
+func resolveSecretsEnvironment(dict map[string]any, environment types.Mapping) {
+	secrets, ok := dict["secrets"].(map[string]any)
+	if !ok {
+		return
+	}
+
+	for name, cfg := range secrets {
+		secret, ok := cfg.(map[string]any)
+		if !ok {
+			continue
+		}
+		env, ok := secret["environment"].(string)
+		if !ok {
+			continue
+		}
+		if found, ok := environment[env]; ok {
+			secret[types.SecretConfigXValue] = found
+		}
+		secrets[name] = secret
+	}
+	dict["secrets"] = secrets
+}
+
+func resolveConfigsEnvironment(dict map[string]any, environment types.Mapping) {
+	configs, ok := dict["configs"].(map[string]any)
+	if !ok {
+		return
+	}
+
+	for name, cfg := range configs {
+		config, ok := cfg.(map[string]any)
+		if !ok {
+			continue
+		}
+		env, ok := config["environment"].(string)
+		if !ok {
+			continue
+		}
+		if found, ok := environment[env]; ok {
+			config["content"] = found
+		}
+		configs[name] = config
+	}
+	dict["configs"] = configs
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,10 @@
+# passed through
+FOO=foo_from_env_file
+ENV.WITH.DOT=ok
+ENV_WITH_UNDERSCORE=ok
+
+# overridden in example2.env
+BAR=bar_from_env_file
+
+# overridden in full-example.yml
+BAZ=baz_from_env_file
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/example1.label 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/example1.label
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/example1.label	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/example1.label	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,10 @@
+# passed through
+FOO=foo_from_label_file
+LABEL.WITH.DOT=ok
+LABEL_WITH_UNDERSCORE=ok
+
+# overridden in example2.label
+BAR=bar_from_label_file
+
+# overridden in full-example.yml
+BAZ=baz_from_label_file
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,4 @@
+BAR=bar_from_env_file_2
+
+# overridden in configDetails.Environment
+QUX=quz_from_env_file_2
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/example2.label 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/example2.label
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/example2.label	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/example2.label	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,4 @@
+BAR=bar_from_label_file_2
+
+# overridden in configDetails.Labels
+QUX=quz_from_label_file_2
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,224 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"context"
+	"fmt"
+	"path/filepath"
+
+	"github.com/compose-spec/compose-go/v2/consts"
+	"github.com/compose-spec/compose-go/v2/override"
+	"github.com/compose-spec/compose-go/v2/paths"
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+// as we use another service definition by `extends`, we must exclude attributes which creates dependency to another service
+// see https://github.com/compose-spec/compose-spec/blob/main/05-services.md#restrictions
+var exclusions = []string{"depends_on", "volumes_from"}
+
+func ApplyExtends(ctx context.Context, dict map[string]any, opts *Options, tracker *cycleTracker, post ...PostProcessor) error {
+	a, ok := dict["services"]
+	if !ok {
+		return nil
+	}
+	services, ok := a.(map[string]any)
+	if !ok {
+		return fmt.Errorf("services must be a mapping")
+	}
+	for name := range services {
+		merged, err := applyServiceExtends(ctx, name, services, opts, tracker, post...)
+		if err != nil {
+			return err
+		}
+		services[name] = merged
+	}
+	dict["services"] = services
+	return nil
+}
+
+func applyServiceExtends(ctx context.Context, name string, services map[string]any, opts *Options, tracker *cycleTracker, post ...PostProcessor) (any, error) {
+	s := services[name]
+	if s == nil {
+		return nil, nil
+	}
+	service, ok := s.(map[string]any)
+	if !ok {
+		return nil, fmt.Errorf("services.%s must be a mapping", name)
+	}
+	extends, ok := service["extends"]
+	if !ok {
+		return s, nil
+	}
+	filename := ctx.Value(consts.ComposeFileKey{}).(string)
+	var (
+		err  error
+		ref  string
+		file any
+	)
+	switch v := extends.(type) {
+	case map[string]any:
+		ref = v["service"].(string)
+		file = v["file"]
+		opts.ProcessEvent("extends", v)
+	case string:
+		ref = v
+		opts.ProcessEvent("extends", map[string]any{"service": ref})
+	}
+
+	var (
+		base      any
+		processor PostProcessor
+	)
+
+	if file != nil {
+		refFilename := file.(string)
+		services, processor, err = getExtendsBaseFromFile(ctx, name, ref, filename, refFilename, opts, tracker)
+		post = append(post, processor)
+		if err != nil {
+			return nil, err
+		}
+		filename = refFilename
+	} else {
+		_, ok := services[ref]
+		if !ok {
+			return nil, fmt.Errorf("cannot extend service %q in %s: service %q not found", name, filename, ref)
+		}
+	}
+
+	tracker, err = tracker.Add(filename, name)
+	if err != nil {
+		return nil, err
+	}
+
+	// recursively apply `extends`
+	base, err = applyServiceExtends(ctx, ref, services, opts, tracker, post...)
+	if err != nil {
+		return nil, err
+	}
+
+	if base == nil {
+		return service, nil
+	}
+	source := deepClone(base).(map[string]any)
+
+	for _, processor := range post {
+		processor.Apply(map[string]any{
+			"services": map[string]any{
+				name: source,
+			},
+		})
+	}
+	for _, exclusion := range exclusions {
+		delete(source, exclusion)
+	}
+	merged, err := override.ExtendService(source, service)
+	if err != nil {
+		return nil, err
+	}
+
+	delete(merged, "extends")
+	services[name] = merged
+	return merged, nil
+}
+
+func getExtendsBaseFromFile(
+	ctx context.Context,
+	name, ref string,
+	path, refPath string,
+	opts *Options,
+	ct *cycleTracker,
+) (map[string]any, PostProcessor, error) {
+	for _, loader := range opts.ResourceLoaders {
+		if !loader.Accept(refPath) {
+			continue
+		}
+		local, err := loader.Load(ctx, refPath)
+		if err != nil {
+			return nil, nil, err
+		}
+		localdir := filepath.Dir(local)
+		relworkingdir := loader.Dir(refPath)
+
+		extendsOpts := opts.clone()
+		// replace localResourceLoader with a new flavour, using extended file base path
+		extendsOpts.ResourceLoaders = append(opts.RemoteResourceLoaders(), localResourceLoader{
+			WorkingDir: localdir,
+		})
+		extendsOpts.ResolvePaths = false // we do relative path resolution after file has been loaded
+		extendsOpts.SkipNormalization = true
+		extendsOpts.SkipConsistencyCheck = true
+		extendsOpts.SkipInclude = true
+		extendsOpts.SkipExtends = true    // we manage extends recursively based on raw service definition
+		extendsOpts.SkipValidation = true // we validate the merge result
+		extendsOpts.SkipDefaultValues = true
+		source, processor, err := loadYamlFile(ctx, types.ConfigFile{Filename: local},
+			extendsOpts, relworkingdir, nil, ct, map[string]any{}, nil)
+		if err != nil {
+			return nil, nil, err
+		}
+		m, ok := source["services"]
+		if !ok {
+			return nil, nil, fmt.Errorf("cannot extend service %q in %s: no services section", name, local)
+		}
+		services, ok := m.(map[string]any)
+		if !ok {
+			return nil, nil, fmt.Errorf("cannot extend service %q in %s: services must be a mapping", name, local)
+		}
+		_, ok = services[ref]
+		if !ok {
+			return nil, nil, fmt.Errorf(
+				"cannot extend service %q in %s: service %q not found in %s",
+				name,
+				path,
+				ref,
+				refPath,
+			)
+		}
+
+		var remotes []paths.RemoteResource
+		for _, loader := range opts.RemoteResourceLoaders() {
+			remotes = append(remotes, loader.Accept)
+		}
+		err = paths.ResolveRelativePaths(source, relworkingdir, remotes)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		return services, processor, nil
+	}
+	return nil, nil, fmt.Errorf("cannot read %s", refPath)
+}
+
+func deepClone(value any) any {
+	switch v := value.(type) {
+	case []any:
+		cp := make([]any, len(v))
+		for i, e := range v {
+			cp[i] = deepClone(e)
+		}
+		return cp
+	case map[string]any:
+		cp := make(map[string]any, len(v))
+		for k, e := range v {
+			cp[k] = deepClone(e)
+		}
+		return cp
+	default:
+		return value
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,36 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+// fixEmptyNotNull is a workaround for https://github.com/xeipuuv/gojsonschema/issues/141
+// as go-yaml `[]` will load as a `[]any(nil)`, which is not the same as an empty array
+func fixEmptyNotNull(value any) interface{} {
+	switch v := value.(type) {
+	case []any:
+		if v == nil {
+			return []any{}
+		}
+		for i, e := range v {
+			v[i] = fixEmptyNotNull(e)
+		}
+	case map[string]any:
+		for k, e := range v {
+			v[k] = fixEmptyNotNull(e)
+		}
+	}
+	return value
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,461 @@
+name: full_example_project_name
+services:
+
+  bar:
+    build:
+      dockerfile_inline: |
+        FROM alpine
+        RUN echo "hello" > /world.txt
+
+  foo:
+    annotations:
+      - com.example.foo=bar
+    build:
+      context: ./dir
+      dockerfile: Dockerfile
+      args:
+        foo: bar
+      ssh:
+        - default
+      target: foo
+      network: foo
+      cache_from:
+        - foo
+        - bar
+      labels: [FOO=BAR]
+      additional_contexts:
+        foo: ./bar
+      secrets:
+        - source: secret1
+          target: /run/secrets/secret1
+        - source: secret2
+          target: my_secret
+          uid: '103'
+          gid: '103'
+          mode: 0440
+      tags:
+        - foo:v1.0.0
+        - docker.io/username/foo:my-other-tag
+        - ${COMPOSE_PROJECT_NAME}:1.0.0
+      platforms:
+        - linux/amd64
+        - linux/arm64
+
+
+    cap_add:
+      - ALL
+
+    cap_drop:
+      - NET_ADMIN
+      - SYS_ADMIN
+
+    cgroup_parent: m-executor-abcd
+
+    # String or list
+    command: bundle exec thin -p 3000
+    # command: ["bundle", "exec", "thin", "-p", "3000"]
+
+    configs:
+      - config1
+      - source: config2
+        target: /my_config
+        uid: '103'
+        gid: '103'
+        mode: 0440
+
+    container_name: my-web-container
+
+    depends_on:
+      - db
+      - redis
+
+    deploy:
+      mode: replicated
+      replicas: 6
+      labels: [FOO=BAR]
+      rollback_config:
+        parallelism: 3
+        delay: 10s
+        failure_action: continue
+        monitor: 60s
+        max_failure_ratio: 0.3
+        order: start-first
+      update_config:
+        parallelism: 3
+        delay: 10s
+        failure_action: continue
+        monitor: 60s
+        max_failure_ratio: 0.3
+        order: start-first
+      resources:
+        limits:
+          cpus: '0.001'
+          memory: 50M
+        reservations:
+          cpus: '0.0001'
+          memory: 20M
+          generic_resources:
+            - discrete_resource_spec:
+                kind: 'gpu'
+                value: 2
+            - discrete_resource_spec:
+                kind: 'ssd'
+                value: 1
+      restart_policy:
+        condition: on-failure
+        delay: 5s
+        max_attempts: 3
+        window: 120s
+      placement:
+        constraints: [node=foo]
+        max_replicas_per_node: 5
+        preferences:
+          - spread: node.labels.az
+      endpoint_mode: dnsrr
+
+    device_cgroup_rules:
+      - "c 1:3 mr"
+      - "a 7:* rmw"
+
+    devices:
+      - source: /dev/ttyUSB0
+        target: /dev/ttyUSB0
+        permissions: rwm
+
+    # String or list
+    # dns: 8.8.8.8
+    dns:
+      - 8.8.8.8
+      - 9.9.9.9
+
+    # String or list
+    # dns_search: example.com
+    dns_search:
+      - dc1.example.com
+      - dc2.example.com
+
+    domainname: foo.com
+
+    # String or list
+    # entrypoint: /code/entrypoint.sh -p 3000
+    entrypoint: ["/code/entrypoint.sh", "-p", "3000"]
+
+    # String or list
+    # env_file: .env
+    env_file:
+      - ./example1.env
+      - path: ./example2.env
+        required: false
+
+    # Mapping or list
+    # Mapping values can be strings, numbers or null
+    # Booleans are not allowed - must be quoted
+    environment:
+      BAZ: baz_from_service_def
+      QUX:
+    # environment:
+    #   - RACK_ENV=development
+    #   - SHOW=true
+    #   - SESSION_SECRET
+
+    # Items can be strings or numbers
+    expose:
+     - "3000"
+     - 8000
+
+    external_links:
+      - redis_1
+      - project_db_1:mysql
+      - project_db_1:postgresql
+
+    # Mapping or list
+    # Mapping values must be strings
+    # extra_hosts:
+    #   somehost: "162.242.195.82"
+    #   otherhost: "50.31.209.229"
+    extra_hosts:
+      - "otherhost:50.31.209.229"
+      - "somehost:162.242.195.82"
+
+    hostname: foo
+
+    healthcheck:
+      test: echo "hello world"
+      interval: 10s
+      timeout: 1s
+      retries: 5
+      start_period: 15s
+      start_interval: 5s
+
+    # Any valid image reference - repo, tag, id, sha
+    image: redis
+    # image: ubuntu:14.04
+    # image: tutum/influxdb
+    # image: example-registry.com:4000/postgresql
+    # image: a4bc65fd
+    # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
+
+    ipc: host
+
+    uts: host
+
+    # Mapping or list
+    # Mapping values can be strings, numbers or null
+    labels:
+      com.example.description: "Accounting webapp"
+      com.example.number: 42
+      com.example.empty-label:
+    # labels:
+    #   - "com.example.description=Accounting webapp"
+    #   - "com.example.number=42"
+    #   - "com.example.empty-label"
+
+    label_file:
+      - ./example1.label
+      - ./example2.label
+
+    links:
+     - db
+     - db:database
+     - redis
+
+    logging:
+      driver: syslog
+      options:
+        syslog-address: "tcp://192.168.0.42:123"
+
+    mac_address: 02:42:ac:11:65:43
+
+    # network_mode: "bridge"
+    # network_mode: "host"
+    # network_mode: "none"
+    # Use the network mode of an arbitrary container from another service
+    # network_mode: "service:db"
+    # Use the network mode of another container, specified by name or id
+    # network_mode: "container:some-container"
+    network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b"
+
+    networks:
+      some-network:
+        aliases:
+         - alias1
+         - alias3
+      other-network:
+        ipv4_address: 172.16.238.10
+        ipv6_address: 2001:3984:3989::10
+        mac_address: 02:42:72:98:65:08
+      other-other-network:
+
+    pid: "host"
+
+    ports:
+      - 3000
+      - "3001-3005"
+      - "8000:8000"
+      - "9090-9091:8080-8081"
+      - "49100:22"
+      - "127.0.0.1:8001:8001"
+      - "127.0.0.1:5000-5010:5000-5010"
+
+    privileged: true
+
+    read_only: true
+
+    restart: always
+
+    secrets:
+      - source: secret1
+        target: /run/secrets/secret1
+      - source: secret2
+        target: my_secret
+        uid: '103'
+        gid: '103'
+        mode: 0440
+
+    security_opt:
+      - label=level:s0:c100,c200
+      - label=type:svirt_apache_t
+
+    stdin_open: true
+
+    stop_grace_period: 20s
+
+    stop_signal: SIGUSR1
+    storage_opt:
+      size: "20G"
+    sysctls:
+      net.core.somaxconn: 1024
+      net.ipv4.tcp_syncookies: 0
+
+    # String or list
+    # tmpfs: /run
+    tmpfs:
+      - /run
+      - /tmp
+
+    tty: true
+
+    ulimits:
+      # Single number or mapping with soft + hard limits
+      nproc: 65535
+      nofile:
+        soft: 20000
+        hard: 40000
+
+    user: someone
+
+    volumes:
+      # Just specify a path and let the Engine create a volume
+      - /var/lib/anonymous
+      # Specify an absolute path mapping
+      - /opt/data:/var/lib/data
+      # Path on the host, relative to the Compose file
+      - .:/code
+      - ./static:/var/www/html
+      # User-relative path
+      - ~/configs:/etc/configs:ro
+      # Named volume
+      - datavolume:/var/lib/volume
+      - type: bind
+        source: ./opt
+        target: /opt/cached
+        consistency: cached
+      - type: tmpfs
+        target: /opt/tmpfs
+        tmpfs:
+          size: 10000
+
+    working_dir: /code
+    x-bar: baz
+    x-foo: bar
+
+networks:
+  # Entries can be null, which specifies simply that a network
+  # called "{project name}_some-network" should be created and
+  # use the default driver
+  some-network:
+
+  other-network:
+    driver: overlay
+
+    driver_opts:
+      # Values can be strings or numbers
+      foo: "bar"
+      baz: 1
+
+    ipam:
+      driver: overlay
+      # driver_opts:
+      #   # Values can be strings or numbers
+      #   com.docker.network.enable_ipv6: "true"
+      #   com.docker.network.numeric_value: 1
+      config:
+      - subnet: 172.28.0.0/16
+        ip_range: 172.28.5.0/24
+        gateway: 172.28.5.254
+        aux_addresses:
+          host1: 172.28.1.5
+          host2: 172.28.1.6
+          host3: 172.28.1.7
+      - subnet: 2001:3984:3989::/64
+        gateway: 2001:3984:3989::1
+
+    labels:
+      foo: bar
+
+  external-network:
+    # Specifies that a pre-existing network called "external-network"
+    # can be referred to within this file as "external-network"
+    external: true
+
+  other-external-network:
+    # Specifies that a pre-existing network called "my-cool-network"
+    # can be referred to within this file as "other-external-network"
+    external:
+      name: my-cool-network
+    x-bar: baz
+    x-foo: bar
+
+volumes:
+  # Entries can be null, which specifies simply that a volume
+  # called "{project name}_some-volume" should be created and
+  # use the default driver
+  some-volume:
+
+  other-volume:
+    driver: flocker
+
+    driver_opts:
+      # Values can be strings or numbers
+      foo: "bar"
+      baz: 1
+    labels:
+      foo: bar
+
+  another-volume:
+    name: "user_specified_name"
+    driver: vsphere
+
+    driver_opts:
+      # Values can be strings or numbers
+      foo: "bar"
+      baz: 1
+
+  external-volume:
+    # Specifies that a pre-existing volume called "external-volume"
+    # can be referred to within this file as "external-volume"
+    external: true
+
+  other-external-volume:
+    # Specifies that a pre-existing volume called "my-cool-volume"
+    # can be referred to within this file as "other-external-volume"
+    # This example uses the deprecated "volume.external.name" (replaced by "volume.name")
+    external:
+      name: my-cool-volume
+
+  external-volume3:
+    # Specifies that a pre-existing volume called "this-is-volume3"
+    # can be referred to within this file as "external-volume3"
+    name: this-is-volume3
+    external: true
+    x-bar: baz
+    x-foo: bar
+
+configs:
+  config1:
+    file: ./config_data
+    labels:
+      foo: bar
+  config2:
+    external:
+      name: my_config
+  config3:
+    external: true
+  config4:
+    name: foo
+    file: ~/config_data
+    x-bar: baz
+    x-foo: bar
+
+secrets:
+  secret1:
+    file: ./secret_data
+    labels:
+      foo: bar
+  secret2:
+    external:
+      name: my_secret
+  secret3:
+    external: true
+  secret4:
+    name: bar
+    environment: BAR
+    x-bar: baz
+    x-foo: bar
+  secret5:
+    file: /abs/secret_data
+x-bar: baz
+x-foo: bar
+x-nested:
+  bar: baz
+  foo: bar
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/include.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/include.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/include.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/include.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,204 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/dotenv"
+	interp "github.com/compose-spec/compose-go/v2/interpolation"
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+// loadIncludeConfig parse the required config from raw yaml
+func loadIncludeConfig(source any) ([]types.IncludeConfig, error) {
+	if source == nil {
+		return nil, nil
+	}
+	configs, ok := source.([]any)
+	if !ok {
+		return nil, fmt.Errorf("`include` must be a list, got %s", source)
+	}
+	for i, config := range configs {
+		if v, ok := config.(string); ok {
+			configs[i] = map[string]any{
+				"path": v,
+			}
+		}
+	}
+	var requires []types.IncludeConfig
+	err := Transform(source, &requires)
+	return requires, err
+}
+
+func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapping, model map[string]any, options *Options, included []string) error {
+	includeConfig, err := loadIncludeConfig(model["include"])
+	if err != nil {
+		return err
+	}
+
+	for _, r := range includeConfig {
+		for _, listener := range options.Listeners {
+			listener("include", map[string]any{
+				"path":       r.Path,
+				"workingdir": workingDir,
+			})
+		}
+
+		var relworkingdir string
+		for i, p := range r.Path {
+			for _, loader := range options.ResourceLoaders {
+				if !loader.Accept(p) {
+					continue
+				}
+				path, err := loader.Load(ctx, p)
+				if err != nil {
+					return err
+				}
+				p = path
+
+				if i == 0 { // This is the "main" file, used to define project-directory. Others are overrides
+
+					switch {
+					case r.ProjectDirectory == "":
+						relworkingdir = loader.Dir(path)
+						r.ProjectDirectory = filepath.Dir(path)
+					case !filepath.IsAbs(r.ProjectDirectory):
+						relworkingdir = loader.Dir(r.ProjectDirectory)
+						r.ProjectDirectory = filepath.Join(workingDir, r.ProjectDirectory)
+
+					default:
+						relworkingdir = r.ProjectDirectory
+
+					}
+					for _, f := range included {
+						if f == path {
+							included = append(included, path)
+							return fmt.Errorf("include cycle detected:\n%s\n include %s", included[0], strings.Join(included[1:], "\n include "))
+						}
+					}
+				}
+			}
+			r.Path[i] = p
+		}
+
+		loadOptions := options.clone()
+		loadOptions.ResolvePaths = true
+		loadOptions.SkipNormalization = true
+		loadOptions.SkipConsistencyCheck = true
+		loadOptions.ResourceLoaders = append(loadOptions.RemoteResourceLoaders(), localResourceLoader{
+			WorkingDir: r.ProjectDirectory,
+		})
+
+		if len(r.EnvFile) == 0 {
+			f := filepath.Join(r.ProjectDirectory, ".env")
+			if s, err := os.Stat(f); err == nil && !s.IsDir() {
+				r.EnvFile = types.StringList{f}
+			}
+		} else {
+			envFile := []string{}
+			for _, f := range r.EnvFile {
+				if !filepath.IsAbs(f) {
+					f = filepath.Join(workingDir, f)
+					s, err := os.Stat(f)
+					if err != nil {
+						return err
+					}
+					if s.IsDir() {
+						return fmt.Errorf("%s is not a file", f)
+					}
+				}
+				envFile = append(envFile, f)
+			}
+			r.EnvFile = envFile
+		}
+
+		envFromFile, err := dotenv.GetEnvFromFile(environment, r.EnvFile)
+		if err != nil {
+			return err
+		}
+
+		config := types.ConfigDetails{
+			WorkingDir:  relworkingdir,
+			ConfigFiles: types.ToConfigFiles(r.Path),
+			Environment: environment.Clone().Merge(envFromFile),
+		}
+		loadOptions.Interpolate = &interp.Options{
+			Substitute:      options.Interpolate.Substitute,
+			LookupValue:     config.LookupEnv,
+			TypeCastMapping: options.Interpolate.TypeCastMapping,
+		}
+		imported, err := loadYamlModel(ctx, config, loadOptions, &cycleTracker{}, included)
+		if err != nil {
+			return err
+		}
+		err = importResources(imported, model)
+		if err != nil {
+			return err
+		}
+	}
+	delete(model, "include")
+	return nil
+}
+
+// importResources import into model all resources defined by imported, and report error on conflict
+func importResources(source map[string]any, target map[string]any) error {
+	if err := importResource(source, target, "services"); err != nil {
+		return err
+	}
+	if err := importResource(source, target, "volumes"); err != nil {
+		return err
+	}
+	if err := importResource(source, target, "networks"); err != nil {
+		return err
+	}
+	if err := importResource(source, target, "secrets"); err != nil {
+		return err
+	}
+	if err := importResource(source, target, "configs"); err != nil {
+		return err
+	}
+	return nil
+}
+
+func importResource(source map[string]any, target map[string]any, key string) error {
+	from := source[key]
+	if from != nil {
+		var to map[string]any
+		if v, ok := target[key]; ok {
+			to = v.(map[string]any)
+		} else {
+			to = map[string]any{}
+		}
+		for name, a := range from.(map[string]any) {
+			if conflict, ok := to[name]; ok {
+				if reflect.DeepEqual(a, conflict) {
+					continue
+				}
+				return fmt.Errorf("%s.%s conflicts with imported resource", key, name)
+			}
+			to[name] = a
+		}
+		target[key] = to
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,117 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	interp "github.com/compose-spec/compose-go/v2/interpolation"
+	"github.com/compose-spec/compose-go/v2/tree"
+	"github.com/sirupsen/logrus"
+)
+
+var interpolateTypeCastMapping = map[tree.Path]interp.Cast{
+	servicePath("configs", tree.PathMatchList, "mode"):             toInt,
+	servicePath("cpu_count"):                                       toInt64,
+	servicePath("cpu_percent"):                                     toFloat,
+	servicePath("cpu_period"):                                      toInt64,
+	servicePath("cpu_quota"):                                       toInt64,
+	servicePath("cpu_rt_period"):                                   toInt64,
+	servicePath("cpu_rt_runtime"):                                  toInt64,
+	servicePath("cpus"):                                            toFloat32,
+	servicePath("cpu_shares"):                                      toInt64,
+	servicePath("init"):                                            toBoolean,
+	servicePath("deploy", "replicas"):                              toInt,
+	servicePath("deploy", "update_config", "parallelism"):          toInt,
+	servicePath("deploy", "update_config", "max_failure_ratio"):    toFloat,
+	servicePath("deploy", "rollback_config", "parallelism"):        toInt,
+	servicePath("deploy", "rollback_config", "max_failure_ratio"):  toFloat,
+	servicePath("deploy", "restart_policy", "max_attempts"):        toInt,
+	servicePath("deploy", "placement", "max_replicas_per_node"):    toInt,
+	servicePath("healthcheck", "retries"):                          toInt,
+	servicePath("healthcheck", "disable"):                          toBoolean,
+	servicePath("oom_kill_disable"):                                toBoolean,
+	servicePath("oom_score_adj"):                                   toInt64,
+	servicePath("pids_limit"):                                      toInt64,
+	servicePath("ports", tree.PathMatchList, "target"):             toInt,
+	servicePath("privileged"):                                      toBoolean,
+	servicePath("read_only"):                                       toBoolean,
+	servicePath("scale"):                                           toInt,
+	servicePath("secrets", tree.PathMatchList, "mode"):             toInt,
+	servicePath("stdin_open"):                                      toBoolean,
+	servicePath("tty"):                                             toBoolean,
+	servicePath("ulimits", tree.PathMatchAll):                      toInt,
+	servicePath("ulimits", tree.PathMatchAll, "hard"):              toInt,
+	servicePath("ulimits", tree.PathMatchAll, "soft"):              toInt,
+	servicePath("volumes", tree.PathMatchList, "read_only"):        toBoolean,
+	servicePath("volumes", tree.PathMatchList, "volume", "nocopy"): toBoolean,
+	iPath("networks", tree.PathMatchAll, "external"):               toBoolean,
+	iPath("networks", tree.PathMatchAll, "internal"):               toBoolean,
+	iPath("networks", tree.PathMatchAll, "attachable"):             toBoolean,
+	iPath("networks", tree.PathMatchAll, "enable_ipv6"):            toBoolean,
+	iPath("volumes", tree.PathMatchAll, "external"):                toBoolean,
+	iPath("secrets", tree.PathMatchAll, "external"):                toBoolean,
+	iPath("configs", tree.PathMatchAll, "external"):                toBoolean,
+}
+
+func iPath(parts ...string) tree.Path {
+	return tree.NewPath(parts...)
+}
+
+func servicePath(parts ...string) tree.Path {
+	return iPath(append([]string{"services", tree.PathMatchAll}, parts...)...)
+}
+
+func toInt(value string) (interface{}, error) {
+	return strconv.Atoi(value)
+}
+
+func toInt64(value string) (interface{}, error) {
+	return strconv.ParseInt(value, 10, 64)
+}
+
+func toFloat(value string) (interface{}, error) {
+	return strconv.ParseFloat(value, 64)
+}
+
+func toFloat32(value string) (interface{}, error) {
+	f, err := strconv.ParseFloat(value, 32)
+	if err != nil {
+		return nil, err
+	}
+	return float32(f), nil
+}
+
+// should match http://yaml.org/type/bool.html
+func toBoolean(value string) (interface{}, error) {
+	switch strings.ToLower(value) {
+	case "true":
+		return true, nil
+	case "false":
+		return false, nil
+	case "y", "yes", "on":
+		logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `true`", value)
+		return true, nil
+	case "n", "no", "off":
+		logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `false`", value)
+		return false, nil
+	default:
+		return nil, fmt.Errorf("invalid boolean: %s", value)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,930 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/consts"
+	"github.com/compose-spec/compose-go/v2/errdefs"
+	interp "github.com/compose-spec/compose-go/v2/interpolation"
+	"github.com/compose-spec/compose-go/v2/override"
+	"github.com/compose-spec/compose-go/v2/paths"
+	"github.com/compose-spec/compose-go/v2/schema"
+	"github.com/compose-spec/compose-go/v2/template"
+	"github.com/compose-spec/compose-go/v2/transform"
+	"github.com/compose-spec/compose-go/v2/tree"
+	"github.com/compose-spec/compose-go/v2/types"
+	"github.com/compose-spec/compose-go/v2/validation"
+	"github.com/go-viper/mapstructure/v2"
+	"github.com/sirupsen/logrus"
+	"golang.org/x/exp/slices"
+	"gopkg.in/yaml.v3"
+)
+
+// Options supported by Load
+type Options struct {
+	// Skip schema validation
+	SkipValidation bool
+	// Skip interpolation
+	SkipInterpolation bool
+	// Skip normalization
+	SkipNormalization bool
+	// Resolve path
+	ResolvePaths bool
+	// Convert Windows path
+	ConvertWindowsPaths bool
+	// Skip consistency check
+	SkipConsistencyCheck bool
+	// Skip extends
+	SkipExtends bool
+	// SkipInclude will ignore `include` and only load model from file(s) set by ConfigDetails
+	SkipInclude bool
+	// SkipResolveEnvironment will ignore computing `environment` for services
+	SkipResolveEnvironment bool
+	// SkipDefaultValues will ignore missing required attributes
+	SkipDefaultValues bool
+	// Interpolation options
+	Interpolate *interp.Options
+	// Discard 'env_file' entries after resolving to 'environment' section
+	discardEnvFiles bool
+	// Set project projectName
+	projectName string
+	// Indicates when the projectName was imperatively set or guessed from path
+	projectNameImperativelySet bool
+	// Profiles set profiles to enable
+	Profiles []string
+	// ResourceLoaders manages support for remote resources
+	ResourceLoaders []ResourceLoader
+	// KnownExtensions manages x-* attribute we know and the corresponding go structs
+	KnownExtensions map[string]any
+	// Metada for telemetry
+	Listeners []Listener
+}
+
+var versionWarning []string
+
+func (o *Options) warnObsoleteVersion(file string) {
+	if !slices.Contains(versionWarning, file) {
+		logrus.Warning(fmt.Sprintf("%s: the attribute `version` is obsolete, it will be ignored, please remove it to avoid potential confusion", file))
+	}
+	versionWarning = append(versionWarning, file)
+}
+
+type Listener = func(event string, metadata map[string]any)
+
+// Invoke all listeners for an event
+func (o *Options) ProcessEvent(event string, metadata map[string]any) {
+	for _, l := range o.Listeners {
+		l(event, metadata)
+	}
+}
+
+// ResourceLoader is a plugable remote resource resolver
+type ResourceLoader interface {
+	// Accept returns `true` is the resource reference matches ResourceLoader supported protocol(s)
+	Accept(path string) bool
+	// Load returns the path to a local copy of remote resource identified by `path`.
+	Load(ctx context.Context, path string) (string, error)
+	// Dir computes path to resource"s parent folder, made relative if possible
+	Dir(path string) string
+}
+
+// RemoteResourceLoaders excludes localResourceLoader from ResourceLoaders
+func (o Options) RemoteResourceLoaders() []ResourceLoader {
+	var loaders []ResourceLoader
+	for i, loader := range o.ResourceLoaders {
+		if _, ok := loader.(localResourceLoader); ok {
+			if i != len(o.ResourceLoaders)-1 {
+				logrus.Warning("misconfiguration of ResourceLoaders: localResourceLoader should be last")
+			}
+			continue
+		}
+		loaders = append(loaders, loader)
+	}
+	return loaders
+}
+
+type localResourceLoader struct {
+	WorkingDir string
+}
+
+func (l localResourceLoader) abs(p string) string {
+	if filepath.IsAbs(p) {
+		return p
+	}
+	return filepath.Join(l.WorkingDir, p)
+}
+
+func (l localResourceLoader) Accept(_ string) bool {
+	// LocalResourceLoader is the last loader tested so it always should accept the config and try to get the content.
+	return true
+}
+
+func (l localResourceLoader) Load(_ context.Context, p string) (string, error) {
+	return l.abs(p), nil
+}
+
+func (l localResourceLoader) Dir(originalPath string) string {
+	path := l.abs(originalPath)
+	if !l.isDir(path) {
+		path = l.abs(filepath.Dir(originalPath))
+	}
+	rel, err := filepath.Rel(l.WorkingDir, path)
+	if err != nil {
+		return path
+	}
+	return rel
+}
+
+func (l localResourceLoader) isDir(path string) bool {
+	fileInfo, err := os.Stat(path)
+	if err != nil {
+		return false
+	}
+	return fileInfo.IsDir()
+}
+
+func (o *Options) clone() *Options {
+	return &Options{
+		SkipValidation:             o.SkipValidation,
+		SkipInterpolation:          o.SkipInterpolation,
+		SkipNormalization:          o.SkipNormalization,
+		ResolvePaths:               o.ResolvePaths,
+		ConvertWindowsPaths:        o.ConvertWindowsPaths,
+		SkipConsistencyCheck:       o.SkipConsistencyCheck,
+		SkipExtends:                o.SkipExtends,
+		SkipInclude:                o.SkipInclude,
+		Interpolate:                o.Interpolate,
+		discardEnvFiles:            o.discardEnvFiles,
+		projectName:                o.projectName,
+		projectNameImperativelySet: o.projectNameImperativelySet,
+		Profiles:                   o.Profiles,
+		ResourceLoaders:            o.ResourceLoaders,
+		KnownExtensions:            o.KnownExtensions,
+		Listeners:                  o.Listeners,
+	}
+}
+
+func (o *Options) SetProjectName(name string, imperativelySet bool) {
+	o.projectName = name
+	o.projectNameImperativelySet = imperativelySet
+}
+
+func (o Options) GetProjectName() (string, bool) {
+	return o.projectName, o.projectNameImperativelySet
+}
+
+// serviceRef identifies a reference to a service. It's used to detect cyclic
+// references in "extends".
+type serviceRef struct {
+	filename string
+	service  string
+}
+
+type cycleTracker struct {
+	loaded []serviceRef
+}
+
+func (ct *cycleTracker) Add(filename, service string) (*cycleTracker, error) {
+	toAdd := serviceRef{filename: filename, service: service}
+	for _, loaded := range ct.loaded {
+		if toAdd == loaded {
+			// Create an error message of the form:
+			// Circular reference:
+			//   service-a in docker-compose.yml
+			//   extends service-b in docker-compose.yml
+			//   extends service-a in docker-compose.yml
+			errLines := []string{
+				"Circular reference:",
+				fmt.Sprintf("  %s in %s", ct.loaded[0].service, ct.loaded[0].filename),
+			}
+			for _, service := range append(ct.loaded[1:], toAdd) {
+				errLines = append(errLines, fmt.Sprintf("  extends %s in %s", service.service, service.filename))
+			}
+
+			return nil, errors.New(strings.Join(errLines, "\n"))
+		}
+	}
+
+	var branch []serviceRef
+	branch = append(branch, ct.loaded...)
+	branch = append(branch, toAdd)
+	return &cycleTracker{
+		loaded: branch,
+	}, nil
+}
+
+// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to
+// the `environment` section
+func WithDiscardEnvFiles(opts *Options) {
+	opts.discardEnvFiles = true
+}
+
+// WithSkipValidation sets the Options to skip validation when loading sections
+func WithSkipValidation(opts *Options) {
+	opts.SkipValidation = true
+}
+
+// WithProfiles sets profiles to be activated
+func WithProfiles(profiles []string) func(*Options) {
+	return func(opts *Options) {
+		opts.Profiles = profiles
+	}
+}
+
+// ParseYAML reads the bytes from a file, parses the bytes into a mapping
+// structure, and returns it.
+func ParseYAML(source []byte) (map[string]interface{}, error) {
+	r := bytes.NewReader(source)
+	decoder := yaml.NewDecoder(r)
+	m, _, err := parseYAML(decoder)
+	return m, err
+}
+
+// PostProcessor is used to tweak compose model based on metadata extracted during yaml Unmarshal phase
+// that hardly can be implemented using go-yaml and mapstructure
+type PostProcessor interface {
+	yaml.Unmarshaler
+
+	// Apply changes to compose model based on recorder metadata
+	Apply(interface{}) error
+}
+
+func parseYAML(decoder *yaml.Decoder) (map[string]interface{}, PostProcessor, error) {
+	var cfg interface{}
+	processor := ResetProcessor{target: &cfg}
+
+	if err := decoder.Decode(&processor); err != nil {
+		return nil, nil, err
+	}
+	stringMap, ok := cfg.(map[string]interface{})
+	if ok {
+		converted, err := convertToStringKeysRecursive(stringMap, "")
+		if err != nil {
+			return nil, nil, err
+		}
+		return converted.(map[string]interface{}), &processor, nil
+	}
+	cfgMap, ok := cfg.(map[interface{}]interface{})
+	if !ok {
+		return nil, nil, errors.New("Top-level object must be a mapping")
+	}
+	converted, err := convertToStringKeysRecursive(cfgMap, "")
+	if err != nil {
+		return nil, nil, err
+	}
+	return converted.(map[string]interface{}), &processor, nil
+}
+
+// LoadConfigFiles ingests config files with ResourceLoader and returns config details with paths to local copies
+func LoadConfigFiles(ctx context.Context, configFiles []string, workingDir string, options ...func(*Options)) (*types.ConfigDetails, error) {
+	if len(configFiles) < 1 {
+		return &types.ConfigDetails{}, fmt.Errorf("no configuration file provided: %w", errdefs.ErrNotFound)
+	}
+
+	opts := &Options{}
+	config := &types.ConfigDetails{
+		ConfigFiles: make([]types.ConfigFile, len(configFiles)),
+	}
+
+	for _, op := range options {
+		op(opts)
+	}
+	opts.ResourceLoaders = append(opts.ResourceLoaders, localResourceLoader{})
+
+	for i, p := range configFiles {
+		if p == "-" {
+			config.ConfigFiles[i] = types.ConfigFile{
+				Filename: p,
+			}
+			continue
+		}
+
+		for _, loader := range opts.ResourceLoaders {
+			_, isLocalResourceLoader := loader.(localResourceLoader)
+			if !loader.Accept(p) {
+				continue
+			}
+			local, err := loader.Load(ctx, p)
+			if err != nil {
+				return nil, err
+			}
+			if config.WorkingDir == "" && !isLocalResourceLoader {
+				config.WorkingDir = filepath.Dir(local)
+			}
+			abs, err := filepath.Abs(local)
+			if err != nil {
+				abs = local
+			}
+			config.ConfigFiles[i] = types.ConfigFile{
+				Filename: abs,
+			}
+			break
+		}
+	}
+	if config.WorkingDir == "" {
+		config.WorkingDir = workingDir
+	}
+	return config, nil
+}
+
+// Load reads a ConfigDetails and returns a fully loaded configuration.
+// Deprecated: use LoadWithContext.
+func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) {
+	return LoadWithContext(context.Background(), configDetails, options...)
+}
+
+// LoadWithContext reads a ConfigDetails and returns a fully loaded configuration as a compose-go Project
+func LoadWithContext(ctx context.Context, configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) {
+	opts := toOptions(&configDetails, options)
+	dict, err := loadModelWithContext(ctx, &configDetails, opts)
+	if err != nil {
+		return nil, err
+	}
+	return modelToProject(dict, opts, configDetails)
+}
+
+// LoadModelWithContext reads a ConfigDetails and returns a fully loaded configuration as a yaml dictionary
+func LoadModelWithContext(ctx context.Context, configDetails types.ConfigDetails, options ...func(*Options)) (map[string]any, error) {
+	opts := toOptions(&configDetails, options)
+	return loadModelWithContext(ctx, &configDetails, opts)
+}
+
+// LoadModelWithContext reads a ConfigDetails and returns a fully loaded configuration as a yaml dictionary
+func loadModelWithContext(ctx context.Context, configDetails *types.ConfigDetails, opts *Options) (map[string]any, error) {
+	if len(configDetails.ConfigFiles) < 1 {
+		return nil, errors.New("No files specified")
+	}
+
+	err := projectName(configDetails, opts)
+	if err != nil {
+		return nil, err
+	}
+
+	return load(ctx, *configDetails, opts, nil)
+}
+
+func toOptions(configDetails *types.ConfigDetails, options []func(*Options)) *Options {
+	opts := &Options{
+		Interpolate: &interp.Options{
+			Substitute:      template.Substitute,
+			LookupValue:     configDetails.LookupEnv,
+			TypeCastMapping: interpolateTypeCastMapping,
+		},
+		ResolvePaths: true,
+	}
+
+	for _, op := range options {
+		op(opts)
+	}
+	opts.ResourceLoaders = append(opts.ResourceLoaders, localResourceLoader{configDetails.WorkingDir})
+	return opts
+}
+
+func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Options, ct *cycleTracker, included []string) (map[string]interface{}, error) {
+	var (
+		dict = map[string]interface{}{}
+		err  error
+	)
+	workingDir, environment := config.WorkingDir, config.Environment
+
+	for _, file := range config.ConfigFiles {
+		dict, _, err = loadYamlFile(ctx, file, opts, workingDir, environment, ct, dict, included)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if !opts.SkipDefaultValues {
+		dict, err = transform.SetDefaultValues(dict)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if !opts.SkipValidation {
+		if err := validation.Validate(dict); err != nil {
+			return nil, err
+		}
+	}
+
+	if opts.ResolvePaths {
+		var remotes []paths.RemoteResource
+		for _, loader := range opts.RemoteResourceLoaders() {
+			remotes = append(remotes, loader.Accept)
+		}
+		err = paths.ResolveRelativePaths(dict, config.WorkingDir, remotes)
+		if err != nil {
+			return nil, err
+		}
+	}
+	ResolveEnvironment(dict, config.Environment)
+
+	return dict, nil
+}
+
+func loadYamlFile(ctx context.Context, file types.ConfigFile, opts *Options, workingDir string, environment types.Mapping, ct *cycleTracker, dict map[string]interface{}, included []string) (map[string]interface{}, PostProcessor, error) {
+	ctx = context.WithValue(ctx, consts.ComposeFileKey{}, file.Filename)
+	if file.Content == nil && file.Config == nil {
+		content, err := os.ReadFile(file.Filename)
+		if err != nil {
+			return nil, nil, err
+		}
+		file.Content = content
+	}
+
+	processRawYaml := func(raw interface{}, processors ...PostProcessor) error {
+		converted, err := convertToStringKeysRecursive(raw, "")
+		if err != nil {
+			return err
+		}
+		cfg, ok := converted.(map[string]interface{})
+		if !ok {
+			return errors.New("Top-level object must be a mapping")
+		}
+
+		if opts.Interpolate != nil && !opts.SkipInterpolation {
+			cfg, err = interp.Interpolate(cfg, *opts.Interpolate)
+			if err != nil {
+				return err
+			}
+		}
+
+		fixEmptyNotNull(cfg)
+
+		if !opts.SkipExtends {
+			err = ApplyExtends(ctx, cfg, opts, ct, processors...)
+			if err != nil {
+				return err
+			}
+		}
+
+		for _, processor := range processors {
+			if err := processor.Apply(dict); err != nil {
+				return err
+			}
+		}
+
+		if !opts.SkipInclude {
+			included = append(included, file.Filename)
+			err = ApplyInclude(ctx, workingDir, environment, cfg, opts, included)
+			if err != nil {
+				return err
+			}
+		}
+
+		dict, err = override.Merge(dict, cfg)
+		if err != nil {
+			return err
+		}
+
+		dict, err = override.EnforceUnicity(dict)
+		if err != nil {
+			return err
+		}
+
+		if !opts.SkipValidation {
+			if err := schema.Validate(dict); err != nil {
+				return fmt.Errorf("validating %s: %w", file.Filename, err)
+			}
+			if _, ok := dict["version"]; ok {
+				opts.warnObsoleteVersion(file.Filename)
+				delete(dict, "version")
+			}
+		}
+
+		dict, err = transform.Canonical(dict, opts.SkipInterpolation)
+		if err != nil {
+			return err
+		}
+
+		dict = OmitEmpty(dict)
+
+		// Canonical transformation can reveal duplicates, typically as ports can be a range and conflict with an override
+		dict, err = override.EnforceUnicity(dict)
+		return err
+	}
+
+	var processor PostProcessor
+	if file.Config == nil {
+		r := bytes.NewReader(file.Content)
+		decoder := yaml.NewDecoder(r)
+		for {
+			var raw interface{}
+			reset := &ResetProcessor{target: &raw}
+			err := decoder.Decode(reset)
+			if err != nil && errors.Is(err, io.EOF) {
+				break
+			}
+			if err != nil {
+				return nil, nil, err
+			}
+			processor = reset
+			if err := processRawYaml(raw, processor); err != nil {
+				return nil, nil, err
+			}
+		}
+	} else {
+		if err := processRawYaml(file.Config); err != nil {
+			return nil, nil, err
+		}
+	}
+	return dict, processor, nil
+}
+
+func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options, loaded []string) (map[string]interface{}, error) {
+	mainFile := configDetails.ConfigFiles[0].Filename
+	for _, f := range loaded {
+		if f == mainFile {
+			loaded = append(loaded, mainFile)
+			return nil, fmt.Errorf("include cycle detected:\n%s\n include %s", loaded[0], strings.Join(loaded[1:], "\n include "))
+		}
+	}
+	loaded = append(loaded, mainFile)
+
+	dict, err := loadYamlModel(ctx, configDetails, opts, &cycleTracker{}, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(dict) == 0 {
+		return nil, errors.New("empty compose file")
+	}
+
+	if opts.projectName == "" {
+		return nil, errors.New("project name must not be empty")
+	}
+
+	if !opts.SkipNormalization {
+		dict["name"] = opts.projectName
+		dict, err = Normalize(dict, configDetails.Environment)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return dict, nil
+}
+
+// modelToProject binds a canonical yaml dict into compose-go structs
+func modelToProject(dict map[string]interface{}, opts *Options, configDetails types.ConfigDetails) (*types.Project, error) {
+	project := &types.Project{
+		Name:        opts.projectName,
+		WorkingDir:  configDetails.WorkingDir,
+		Environment: configDetails.Environment,
+	}
+	delete(dict, "name") // project name set by yaml must be identified by caller as opts.projectName
+
+	var err error
+	dict, err = processExtensions(dict, tree.NewPath(), opts.KnownExtensions)
+	if err != nil {
+		return nil, err
+	}
+
+	err = Transform(dict, project)
+	if err != nil {
+		return nil, err
+	}
+
+	if opts.ConvertWindowsPaths {
+		for i, service := range project.Services {
+			for j, volume := range service.Volumes {
+				service.Volumes[j] = convertVolumePath(volume)
+			}
+			project.Services[i] = service
+		}
+	}
+
+	if project, err = project.WithProfiles(opts.Profiles); err != nil {
+		return nil, err
+	}
+
+	if !opts.SkipConsistencyCheck {
+		err := checkConsistency(project)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if !opts.SkipResolveEnvironment {
+		project, err = project.WithServicesEnvironmentResolved(opts.discardEnvFiles)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	project, err = project.WithServicesLabelsResolved(opts.discardEnvFiles)
+	if err != nil {
+		return nil, err
+	}
+
+	return project, nil
+}
+
+func InvalidProjectNameErr(v string) error {
+	return fmt.Errorf(
+		"invalid project name %q: must consist only of lowercase alphanumeric characters, hyphens, and underscores as well as start with a letter or number",
+		v,
+	)
+}
+
+// projectName determines the canonical name to use for the project considering
+// the loader Options as well as `name` fields in Compose YAML fields (which
+// also support interpolation).
+func projectName(details *types.ConfigDetails, opts *Options) error {
+	defer func() {
+		if details.Environment == nil {
+			details.Environment = map[string]string{}
+		}
+		details.Environment[consts.ComposeProjectName] = opts.projectName
+	}()
+
+	if opts.projectNameImperativelySet {
+		if NormalizeProjectName(opts.projectName) != opts.projectName {
+			return InvalidProjectNameErr(opts.projectName)
+		}
+		return nil
+	}
+
+	type named struct {
+		Name string `yaml:"name"`
+	}
+
+	// if user did NOT provide a name explicitly, then see if one is defined
+	// in any of the config files
+	var pjNameFromConfigFile string
+	for _, configFile := range details.ConfigFiles {
+		content := configFile.Content
+		if content == nil {
+			// This can be hit when Filename is set but Content is not. One
+			// example is when using ToConfigFiles().
+			d, err := os.ReadFile(configFile.Filename)
+			if err != nil {
+				return fmt.Errorf("failed to read file %q: %w", configFile.Filename, err)
+			}
+			content = d
+			configFile.Content = d
+		}
+		var n named
+		r := bytes.NewReader(content)
+		decoder := yaml.NewDecoder(r)
+		for {
+			err := decoder.Decode(&n)
+			if err != nil && errors.Is(err, io.EOF) {
+				break
+			}
+			if err != nil {
+				// HACK: the way that loading is currently structured, this is
+				// a duplicative parse just for the `name`. if it fails, we
+				// give up but don't return the error, knowing that it'll get
+				// caught downstream for us
+				break
+			}
+			if n.Name != "" {
+				pjNameFromConfigFile = n.Name
+			}
+		}
+	}
+	if !opts.SkipInterpolation {
+		interpolated, err := interp.Interpolate(
+			map[string]interface{}{"name": pjNameFromConfigFile},
+			*opts.Interpolate,
+		)
+		if err != nil {
+			return err
+		}
+		pjNameFromConfigFile = interpolated["name"].(string)
+	}
+	pjNameFromConfigFile = NormalizeProjectName(pjNameFromConfigFile)
+	if pjNameFromConfigFile != "" {
+		opts.projectName = pjNameFromConfigFile
+	}
+	return nil
+}
+
+func NormalizeProjectName(s string) string {
+	r := regexp.MustCompile("[a-z0-9_-]")
+	s = strings.ToLower(s)
+	s = strings.Join(r.FindAllString(s, -1), "")
+	return strings.TrimLeft(s, "_-")
+}
+
+var userDefinedKeys = []tree.Path{
+	"services",
+	"services.*.depends_on",
+	"volumes",
+	"networks",
+	"secrets",
+	"configs",
+}
+
+func processExtensions(dict map[string]any, p tree.Path, extensions map[string]any) (map[string]interface{}, error) {
+	extras := map[string]any{}
+	var err error
+	for key, value := range dict {
+		skip := false
+		for _, uk := range userDefinedKeys {
+			if p.Matches(uk) {
+				skip = true
+				break
+			}
+		}
+		if !skip && strings.HasPrefix(key, "x-") {
+			extras[key] = value
+			delete(dict, key)
+			continue
+		}
+		switch v := value.(type) {
+		case map[string]interface{}:
+			dict[key], err = processExtensions(v, p.Next(key), extensions)
+			if err != nil {
+				return nil, err
+			}
+		case []interface{}:
+			for i, e := range v {
+				if m, ok := e.(map[string]interface{}); ok {
+					v[i], err = processExtensions(m, p.Next(strconv.Itoa(i)), extensions)
+					if err != nil {
+						return nil, err
+					}
+				}
+			}
+		}
+	}
+	for name, val := range extras {
+		if typ, ok := extensions[name]; ok {
+			target := reflect.New(reflect.TypeOf(typ)).Elem().Interface()
+			err = Transform(val, &target)
+			if err != nil {
+				return nil, err
+			}
+			extras[name] = target
+		}
+	}
+	if len(extras) > 0 {
+		dict[consts.Extensions] = extras
+	}
+	return dict, nil
+}
+
+// Transform converts the source into the target struct with compose types transformer
+// and the specified transformers if any.
+func Transform(source interface{}, target interface{}) error {
+	data := mapstructure.Metadata{}
+	config := &mapstructure.DecoderConfig{
+		DecodeHook: mapstructure.ComposeDecodeHookFunc(
+			nameServices,
+			decoderHook,
+			cast,
+			secretConfigDecoderHook,
+		),
+		Result:   target,
+		TagName:  "yaml",
+		Metadata: &data,
+	}
+	decoder, err := mapstructure.NewDecoder(config)
+	if err != nil {
+		return err
+	}
+	return decoder.Decode(source)
+}
+
+// nameServices create implicit `name` key for convenience accessing service
+func nameServices(from reflect.Value, to reflect.Value) (interface{}, error) {
+	if to.Type() == reflect.TypeOf(types.Services{}) {
+		nameK := reflect.ValueOf("name")
+		iter := from.MapRange()
+		for iter.Next() {
+			name := iter.Key()
+			elem := iter.Value()
+			elem.Elem().SetMapIndex(nameK, name)
+		}
+	}
+	return from.Interface(), nil
+}
+
+func secretConfigDecoderHook(from, to reflect.Type, data interface{}) (interface{}, error) {
+	// Check if the input is a map and we're decoding into a SecretConfig
+	if from.Kind() == reflect.Map && to == reflect.TypeOf(types.SecretConfig{}) {
+		if v, ok := data.(map[string]interface{}); ok {
+			if ext, ok := v[consts.Extensions].(map[string]interface{}); ok {
+				if val, ok := ext[types.SecretConfigXValue].(string); ok {
+					// Return a map with the Content field populated
+					v["Content"] = val
+					delete(ext, types.SecretConfigXValue)
+
+					if len(ext) == 0 {
+						delete(v, consts.Extensions)
+					}
+				}
+			}
+		}
+	}
+
+	// Return the original data so the rest is handled by default mapstructure logic
+	return data, nil
+}
+
+// keys need to be converted to strings for jsonschema
+func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
+	if mapping, ok := value.(map[string]interface{}); ok {
+		for key, entry := range mapping {
+			var newKeyPrefix string
+			if keyPrefix == "" {
+				newKeyPrefix = key
+			} else {
+				newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, key)
+			}
+			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
+			if err != nil {
+				return nil, err
+			}
+			mapping[key] = convertedEntry
+		}
+		return mapping, nil
+	}
+	if mapping, ok := value.(map[interface{}]interface{}); ok {
+		dict := make(map[string]interface{})
+		for key, entry := range mapping {
+			str, ok := key.(string)
+			if !ok {
+				return nil, formatInvalidKeyError(keyPrefix, key)
+			}
+			var newKeyPrefix string
+			if keyPrefix == "" {
+				newKeyPrefix = str
+			} else {
+				newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
+			}
+			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
+			if err != nil {
+				return nil, err
+			}
+			dict[str] = convertedEntry
+		}
+		return dict, nil
+	}
+	if list, ok := value.([]interface{}); ok {
+		var convertedList []interface{}
+		for index, entry := range list {
+			newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
+			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
+			if err != nil {
+				return nil, err
+			}
+			convertedList = append(convertedList, convertedEntry)
+		}
+		return convertedList, nil
+	}
+	return value, nil
+}
+
+func formatInvalidKeyError(keyPrefix string, key interface{}) error {
+	var location string
+	if keyPrefix == "" {
+		location = "at top level"
+	} else {
+		location = fmt.Sprintf("in %s", keyPrefix)
+	}
+	return fmt.Errorf("Non-string key %s: %#v", location, key)
+}
+
+// Windows path, c:\\my\\path\\shiny, need to be changed to be compatible with
+// the Engine. Volume path are expected to be linux style /c/my/path/shiny/
+func convertVolumePath(volume types.ServiceVolumeConfig) types.ServiceVolumeConfig {
+	volumeName := strings.ToLower(filepath.VolumeName(volume.Source))
+	if len(volumeName) != 2 {
+		return volume
+	}
+
+	convertedSource := fmt.Sprintf("/%c%s", volumeName[0], volume.Source[len(volumeName):])
+	convertedSource = strings.ReplaceAll(convertedSource, "\\", "/")
+
+	volume.Source = convertedSource
+	return volume
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,79 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"reflect"
+	"strconv"
+)
+
+// comparable to yaml.Unmarshaler, decoder allow a type to define it's own custom logic to convert value
+// see https://github.com/mitchellh/mapstructure/pull/294
+type decoder interface {
+	DecodeMapstructure(interface{}) error
+}
+
+// see https://github.com/mitchellh/mapstructure/issues/115#issuecomment-735287466
+// adapted to support types derived from built-in types, as DecodeMapstructure would not be able to mutate internal
+// value, so need to invoke DecodeMapstructure defined by pointer to type
+func decoderHook(from reflect.Value, to reflect.Value) (interface{}, error) {
+	// If the destination implements the decoder interface
+	u, ok := to.Interface().(decoder)
+	if !ok {
+		// for non-struct types we need to invoke func (*type) DecodeMapstructure()
+		if to.CanAddr() {
+			pto := to.Addr()
+			u, ok = pto.Interface().(decoder)
+		}
+		if !ok {
+			return from.Interface(), nil
+		}
+	}
+	// If it is nil and a pointer, create and assign the target value first
+	if to.Type().Kind() == reflect.Ptr && to.IsNil() {
+		to.Set(reflect.New(to.Type().Elem()))
+		u = to.Interface().(decoder)
+	}
+	// Call the custom DecodeMapstructure method
+	if err := u.DecodeMapstructure(from.Interface()); err != nil {
+		return to.Interface(), err
+	}
+	return to.Interface(), nil
+}
+
+func cast(from reflect.Value, to reflect.Value) (interface{}, error) {
+	switch from.Type().Kind() {
+	case reflect.String:
+		switch to.Kind() {
+		case reflect.Bool:
+			return toBoolean(from.String())
+		case reflect.Int:
+			return toInt(from.String())
+		case reflect.Int64:
+			return toInt64(from.String())
+		case reflect.Float32:
+			return toFloat32(from.String())
+		case reflect.Float64:
+			return toFloat(from.String())
+		}
+	case reflect.Int:
+		if to.Kind() == reflect.String {
+			return strconv.FormatInt(from.Int(), 10), nil
+		}
+	}
+	return from.Interface(), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,263 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"fmt"
+	"path"
+	"strconv"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+// Normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults
+func Normalize(dict map[string]any, env types.Mapping) (map[string]any, error) {
+	normalizeNetworks(dict)
+
+	if d, ok := dict["services"]; ok {
+		services := d.(map[string]any)
+		for name, s := range services {
+			service := s.(map[string]any)
+
+			if service["pull_policy"] == types.PullPolicyIfNotPresent {
+				service["pull_policy"] = types.PullPolicyMissing
+			}
+
+			fn := func(s string) (string, bool) {
+				v, ok := env[s]
+				return v, ok
+			}
+
+			if b, ok := service["build"]; ok {
+				build := b.(map[string]any)
+				if build["context"] == nil {
+					build["context"] = "."
+				}
+				if build["dockerfile"] == nil && build["dockerfile_inline"] == nil {
+					build["dockerfile"] = "Dockerfile"
+				}
+
+				if a, ok := build["args"]; ok {
+					build["args"], _ = resolve(a, fn, false)
+				}
+
+				service["build"] = build
+			}
+
+			if e, ok := service["environment"]; ok {
+				service["environment"], _ = resolve(e, fn, true)
+			}
+
+			var dependsOn map[string]any
+			if d, ok := service["depends_on"]; ok {
+				dependsOn = d.(map[string]any)
+			} else {
+				dependsOn = map[string]any{}
+			}
+			if l, ok := service["links"]; ok {
+				links := l.([]any)
+				for _, e := range links {
+					link := e.(string)
+					parts := strings.Split(link, ":")
+					if len(parts) == 2 {
+						link = parts[0]
+					}
+					if _, ok := dependsOn[link]; !ok {
+						dependsOn[link] = map[string]any{
+							"condition": types.ServiceConditionStarted,
+							"restart":   true,
+							"required":  true,
+						}
+					}
+				}
+			}
+
+			for _, namespace := range []string{"network_mode", "ipc", "pid", "uts", "cgroup"} {
+				if n, ok := service[namespace]; ok {
+					ref := n.(string)
+					if strings.HasPrefix(ref, types.ServicePrefix) {
+						shared := ref[len(types.ServicePrefix):]
+						if _, ok := dependsOn[shared]; !ok {
+							dependsOn[shared] = map[string]any{
+								"condition": types.ServiceConditionStarted,
+								"restart":   true,
+								"required":  true,
+							}
+						}
+					}
+				}
+			}
+
+			if v, ok := service["volumes"]; ok {
+				volumes := v.([]any)
+				for i, volume := range volumes {
+					vol := volume.(map[string]any)
+					target := vol["target"].(string)
+					vol["target"] = path.Clean(target)
+					volumes[i] = vol
+				}
+				service["volumes"] = volumes
+			}
+
+			if n, ok := service["volumes_from"]; ok {
+				volumesFrom := n.([]any)
+				for _, v := range volumesFrom {
+					vol := v.(string)
+					if !strings.HasPrefix(vol, types.ContainerPrefix) {
+						spec := strings.Split(vol, ":")
+						if _, ok := dependsOn[spec[0]]; !ok {
+							dependsOn[spec[0]] = map[string]any{
+								"condition": types.ServiceConditionStarted,
+								"restart":   false,
+								"required":  true,
+							}
+						}
+					}
+				}
+			}
+			if len(dependsOn) > 0 {
+				service["depends_on"] = dependsOn
+			}
+			services[name] = service
+		}
+
+		dict["services"] = services
+	}
+	setNameFromKey(dict)
+
+	return dict, nil
+}
+
+func normalizeNetworks(dict map[string]any) {
+	var networks map[string]any
+	if n, ok := dict["networks"]; ok {
+		networks = n.(map[string]any)
+	} else {
+		networks = map[string]any{}
+	}
+
+	// implicit `default` network must be introduced only if actually used by some service
+	usesDefaultNetwork := false
+
+	if s, ok := dict["services"]; ok {
+		services := s.(map[string]any)
+		for name, se := range services {
+			service := se.(map[string]any)
+			if _, ok := service["network_mode"]; ok {
+				continue
+			}
+			if n, ok := service["networks"]; !ok {
+				// If none explicitly declared, service is connected to default network
+				service["networks"] = map[string]any{"default": nil}
+				usesDefaultNetwork = true
+			} else {
+				net := n.(map[string]any)
+				if len(net) == 0 {
+					// networks section declared but empty (corner case)
+					service["networks"] = map[string]any{"default": nil}
+					usesDefaultNetwork = true
+				} else if _, ok := net["default"]; ok {
+					usesDefaultNetwork = true
+				}
+			}
+			services[name] = service
+		}
+		dict["services"] = services
+	}
+
+	if _, ok := networks["default"]; !ok && usesDefaultNetwork {
+		// If not declared explicitly, Compose model involves an implicit "default" network
+		networks["default"] = nil
+	}
+
+	if len(networks) > 0 {
+		dict["networks"] = networks
+	}
+}
+
+func resolve(a any, fn func(s string) (string, bool), keepEmpty bool) (any, bool) {
+	switch v := a.(type) {
+	case []any:
+		var resolved []any
+		for _, val := range v {
+			if r, ok := resolve(val, fn, keepEmpty); ok {
+				resolved = append(resolved, r)
+			}
+		}
+		return resolved, true
+	case map[string]any:
+		resolved := map[string]any{}
+		for key, val := range v {
+			if val != nil {
+				resolved[key] = val
+				continue
+			}
+			if s, ok := fn(key); ok {
+				resolved[key] = s
+			} else if keepEmpty {
+				resolved[key] = nil
+			}
+		}
+		return resolved, true
+	case string:
+		if !strings.Contains(v, "=") {
+			if val, ok := fn(v); ok {
+				return fmt.Sprintf("%s=%s", v, val), true
+			}
+			if keepEmpty {
+				return v, true
+			}
+			return "", false
+		}
+		return v, true
+	default:
+		return v, false
+	}
+}
+
+// Resources with no explicit name are actually named by their key in map
+func setNameFromKey(dict map[string]any) {
+	for _, r := range []string{"networks", "volumes", "configs", "secrets"} {
+		a, ok := dict[r]
+		if !ok {
+			continue
+		}
+		toplevel := a.(map[string]any)
+		for key, r := range toplevel {
+			var resource map[string]any
+			if r != nil {
+				resource = r.(map[string]any)
+			} else {
+				resource = map[string]any{}
+			}
+			if resource["name"] == nil {
+				if x, ok := resource["external"]; ok && isTrue(x) {
+					resource["name"] = key
+				} else {
+					resource["name"] = fmt.Sprintf("%s_%s", dict["name"], key)
+				}
+			}
+			toplevel[key] = resource
+		}
+	}
+}
+
+func isTrue(x any) bool {
+	parseBool, _ := strconv.ParseBool(fmt.Sprint(x))
+	return parseBool
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/omitEmpty.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/omitEmpty.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/omitEmpty.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/omitEmpty.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,74 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import "github.com/compose-spec/compose-go/v2/tree"
+
+var omitempty = []tree.Path{
+	"services.*.dns"}
+
+// OmitEmpty removes empty attributes which are irrelevant when unset
+func OmitEmpty(yaml map[string]any) map[string]any {
+	cleaned := omitEmpty(yaml, tree.NewPath())
+	return cleaned.(map[string]any)
+}
+
+func omitEmpty(data any, p tree.Path) any {
+	switch v := data.(type) {
+	case map[string]any:
+		for k, e := range v {
+			if isEmpty(e) && mustOmit(p) {
+				delete(v, k)
+				continue
+			}
+
+			v[k] = omitEmpty(e, p.Next(k))
+		}
+		return v
+	case []any:
+		var c []any
+		for _, e := range v {
+			if isEmpty(e) && mustOmit(p) {
+				continue
+			}
+
+			c = append(c, omitEmpty(e, p.Next("[]")))
+		}
+		return c
+	default:
+		return data
+	}
+}
+
+func mustOmit(p tree.Path) bool {
+	for _, pattern := range omitempty {
+		if p.Matches(pattern) {
+			return true
+		}
+	}
+	return false
+}
+
+func isEmpty(e any) bool {
+	if e == nil {
+		return true
+	}
+	if v, ok := e.(string); ok && v == "" {
+		return true
+	}
+	return false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,74 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+// ResolveRelativePaths resolves relative paths based on project WorkingDirectory
+func ResolveRelativePaths(project *types.Project) error {
+	absWorkingDir, err := filepath.Abs(project.WorkingDir)
+	if err != nil {
+		return err
+	}
+	project.WorkingDir = absWorkingDir
+
+	absComposeFiles, err := absComposeFiles(project.ComposeFiles)
+	if err != nil {
+		return err
+	}
+	project.ComposeFiles = absComposeFiles
+	return nil
+}
+
+func absPath(workingDir string, filePath string) string {
+	if strings.HasPrefix(filePath, "~") {
+		home, _ := os.UserHomeDir()
+		return filepath.Join(home, filePath[1:])
+	}
+	if filepath.IsAbs(filePath) {
+		return filePath
+	}
+	return filepath.Join(workingDir, filePath)
+}
+
+func absComposeFiles(composeFiles []string) ([]string, error) {
+	for i, composeFile := range composeFiles {
+		absComposefile, err := filepath.Abs(composeFile)
+		if err != nil {
+			return nil, err
+		}
+		composeFiles[i] = absComposefile
+	}
+	return composeFiles, nil
+}
+
+func resolvePaths(basePath string, in types.StringList) types.StringList {
+	if in == nil {
+		return nil
+	}
+	ret := make(types.StringList, len(in))
+	for i := range in {
+		ret[i] = absPath(basePath, in[i])
+	}
+	return ret
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,190 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+	"gopkg.in/yaml.v3"
+)
+
+type ResetProcessor struct {
+	target       interface{}
+	paths        []tree.Path
+	visitedNodes map[*yaml.Node][]string
+}
+
+// UnmarshalYAML implement yaml.Unmarshaler
+func (p *ResetProcessor) UnmarshalYAML(value *yaml.Node) error {
+	p.visitedNodes = make(map[*yaml.Node][]string)
+	resolved, err := p.resolveReset(value, tree.NewPath())
+	p.visitedNodes = nil
+	if err != nil {
+		return err
+	}
+	return resolved.Decode(p.target)
+}
+
+// resolveReset detects `!reset` tag being set on yaml nodes and record position in the yaml tree
+func (p *ResetProcessor) resolveReset(node *yaml.Node, path tree.Path) (*yaml.Node, error) {
+	pathStr := path.String()
+	// If the path contains "<<", removing the "<<" element and merging the path
+	if strings.Contains(pathStr, ".<<") {
+		path = tree.NewPath(strings.Replace(pathStr, ".<<", "", 1))
+	}
+
+	// If the node is an alias, We need to process the alias field in order to consider the !override and !reset tags
+	if node.Kind == yaml.AliasNode {
+		if err := p.checkForCycle(node.Alias, path); err != nil {
+			return nil, err
+		}
+
+		return p.resolveReset(node.Alias, path)
+	}
+
+	if node.Tag == "!reset" {
+		p.paths = append(p.paths, path)
+		return nil, nil
+	}
+	if node.Tag == "!override" {
+		p.paths = append(p.paths, path)
+		return node, nil
+	}
+	switch node.Kind {
+	case yaml.SequenceNode:
+		var nodes []*yaml.Node
+		for idx, v := range node.Content {
+			next := path.Next(strconv.Itoa(idx))
+			resolved, err := p.resolveReset(v, next)
+			if err != nil {
+				return nil, err
+			}
+			if resolved != nil {
+				nodes = append(nodes, resolved)
+			}
+		}
+		node.Content = nodes
+	case yaml.MappingNode:
+		var key string
+		var nodes []*yaml.Node
+		for idx, v := range node.Content {
+			if idx%2 == 0 {
+				key = v.Value
+			} else {
+				resolved, err := p.resolveReset(v, path.Next(key))
+				if err != nil {
+					return nil, err
+				}
+				if resolved != nil {
+					nodes = append(nodes, node.Content[idx-1], resolved)
+				}
+			}
+		}
+		node.Content = nodes
+	}
+	return node, nil
+}
+
+// Apply finds the go attributes matching recorded paths and reset them to zero value
+func (p *ResetProcessor) Apply(target any) error {
+	return p.applyNullOverrides(target, tree.NewPath())
+}
+
+// applyNullOverrides set val to Zero if it matches any of the recorded paths
+func (p *ResetProcessor) applyNullOverrides(target any, path tree.Path) error {
+	switch v := target.(type) {
+	case map[string]any:
+	KEYS:
+		for k, e := range v {
+			next := path.Next(k)
+			for _, pattern := range p.paths {
+				if next.Matches(pattern) {
+					delete(v, k)
+					continue KEYS
+				}
+			}
+			err := p.applyNullOverrides(e, next)
+			if err != nil {
+				return err
+			}
+		}
+	case []any:
+	ITER:
+		for i, e := range v {
+			next := path.Next(fmt.Sprintf("[%d]", i))
+			for _, pattern := range p.paths {
+				if next.Matches(pattern) {
+					continue ITER
+					// TODO(ndeloof) support removal from sequence
+				}
+			}
+			err := p.applyNullOverrides(e, next)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (p *ResetProcessor) checkForCycle(node *yaml.Node, path tree.Path) error {
+	paths := p.visitedNodes[node]
+	pathStr := path.String()
+
+	for _, prevPath := range paths {
+		// If we're visiting the exact same path, it's not a cycle
+		if pathStr == prevPath {
+			continue
+		}
+
+		// If either path is using a merge key, it's legitimate YAML merging
+		if strings.Contains(prevPath, "<<") || strings.Contains(pathStr, "<<") {
+			continue
+		}
+
+		// Only consider it a cycle if one path is contained within the other
+		// and they're not in different service definitions
+		if (strings.HasPrefix(pathStr, prevPath+".") ||
+			strings.HasPrefix(prevPath, pathStr+".")) &&
+			!areInDifferentServices(pathStr, prevPath) {
+			return fmt.Errorf("cycle detected: node at path %s references node at path %s", pathStr, prevPath)
+		}
+	}
+
+	p.visitedNodes[node] = append(paths, pathStr)
+	return nil
+}
+
+// areInDifferentServices checks if two paths are in different service definitions
+func areInDifferentServices(path1, path2 string) bool {
+	// Split paths into components
+	parts1 := strings.Split(path1, ".")
+	parts2 := strings.Split(path2, ".")
+
+	// Look for the services component and compare the service names
+	for i := 0; i < len(parts1) && i < len(parts2); i++ {
+		if parts1[i] == "services" && i+1 < len(parts1) &&
+			parts2[i] == "services" && i+1 < len(parts2) {
+			// If they're different services, it's not a cycle
+			return parts1[i+1] != parts2[i+1]
+		}
+	}
+	return false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,176 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package loader
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/errdefs"
+	"github.com/compose-spec/compose-go/v2/graph"
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+// checkConsistency validate a compose model is consistent
+func checkConsistency(project *types.Project) error {
+	for _, s := range project.Services {
+		if s.Build == nil && s.Image == "" {
+			return fmt.Errorf("service %q has neither an image nor a build context specified: %w", s.Name, errdefs.ErrInvalid)
+		}
+
+		if s.Build != nil {
+			if s.Build.DockerfileInline != "" && s.Build.Dockerfile != "" {
+				return fmt.Errorf("service %q declares mutualy exclusive dockerfile and dockerfile_inline: %w", s.Name, errdefs.ErrInvalid)
+			}
+
+			if len(s.Build.Platforms) > 0 && s.Platform != "" {
+				var found bool
+				for _, platform := range s.Build.Platforms {
+					if platform == s.Platform {
+						found = true
+						break
+					}
+				}
+				if !found {
+					return fmt.Errorf("service.build.platforms MUST include service.platform %q: %w", s.Platform, errdefs.ErrInvalid)
+				}
+			}
+		}
+
+		if s.NetworkMode != "" && len(s.Networks) > 0 {
+			return fmt.Errorf("service %s declares mutually exclusive `network_mode` and `networks`: %w", s.Name, errdefs.ErrInvalid)
+		}
+		for network := range s.Networks {
+			if _, ok := project.Networks[network]; !ok {
+				return fmt.Errorf("service %q refers to undefined network %s: %w", s.Name, network, errdefs.ErrInvalid)
+			}
+		}
+
+		if s.HealthCheck != nil && len(s.HealthCheck.Test) > 0 {
+			switch s.HealthCheck.Test[0] {
+			case "CMD", "CMD-SHELL", "NONE":
+			default:
+				return errors.New(`healthcheck.test must start either by "CMD", "CMD-SHELL" or "NONE"`)
+			}
+		}
+
+		for dependedService, cfg := range s.DependsOn {
+			if _, err := project.GetService(dependedService); err != nil {
+				if errors.Is(err, errdefs.ErrDisabled) && !cfg.Required {
+					continue
+				}
+				return fmt.Errorf("service %q depends on undefined service %q: %w", s.Name, dependedService, errdefs.ErrInvalid)
+			}
+		}
+
+		if strings.HasPrefix(s.NetworkMode, types.ServicePrefix) {
+			serviceName := s.NetworkMode[len(types.ServicePrefix):]
+			if _, err := project.GetServices(serviceName); err != nil {
+				return fmt.Errorf("service %q not found for network_mode 'service:%s'", serviceName, serviceName)
+			}
+		}
+
+		for _, volume := range s.Volumes {
+			if volume.Type == types.VolumeTypeVolume && volume.Source != "" { // non anonymous volumes
+				if _, ok := project.Volumes[volume.Source]; !ok {
+					return fmt.Errorf("service %q refers to undefined volume %s: %w", s.Name, volume.Source, errdefs.ErrInvalid)
+				}
+			}
+		}
+		if s.Build != nil {
+			for _, secret := range s.Build.Secrets {
+				if _, ok := project.Secrets[secret.Source]; !ok {
+					return fmt.Errorf("service %q refers to undefined build secret %s: %w", s.Name, secret.Source, errdefs.ErrInvalid)
+				}
+			}
+		}
+		for _, config := range s.Configs {
+			if _, ok := project.Configs[config.Source]; !ok {
+				return fmt.Errorf("service %q refers to undefined config %s: %w", s.Name, config.Source, errdefs.ErrInvalid)
+			}
+		}
+
+		for _, secret := range s.Secrets {
+			if _, ok := project.Secrets[secret.Source]; !ok {
+				return fmt.Errorf("service %q refers to undefined secret %s: %w", s.Name, secret.Source, errdefs.ErrInvalid)
+			}
+		}
+
+		if s.Scale != nil && s.Deploy != nil {
+			if s.Deploy.Replicas != nil && *s.Scale != *s.Deploy.Replicas {
+				return fmt.Errorf("services.%s: can't set distinct values on 'scale' and 'deploy.replicas': %w",
+					s.Name, errdefs.ErrInvalid)
+			}
+			s.Deploy.Replicas = s.Scale
+		}
+
+		if s.CPUS != 0 && s.Deploy != nil {
+			if s.Deploy.Resources.Limits != nil && s.Deploy.Resources.Limits.NanoCPUs.Value() != s.CPUS {
+				return fmt.Errorf("services.%s: can't set distinct values on 'cpus' and 'deploy.resources.limits.cpus': %w",
+					s.Name, errdefs.ErrInvalid)
+			}
+		}
+		if s.MemLimit != 0 && s.Deploy != nil {
+			if s.Deploy.Resources.Limits != nil && s.Deploy.Resources.Limits.MemoryBytes != s.MemLimit {
+				return fmt.Errorf("services.%s: can't set distinct values on 'mem_limit' and 'deploy.resources.limits.memory': %w",
+					s.Name, errdefs.ErrInvalid)
+			}
+		}
+		if s.MemReservation != 0 && s.Deploy != nil {
+			if s.Deploy.Resources.Reservations != nil && s.Deploy.Resources.Reservations.MemoryBytes != s.MemReservation {
+				return fmt.Errorf("services.%s: can't set distinct values on 'mem_reservation' and 'deploy.resources.reservations.memory': %w",
+					s.Name, errdefs.ErrInvalid)
+			}
+		}
+		if s.PidsLimit != 0 && s.Deploy != nil {
+			if s.Deploy.Resources.Limits != nil && s.Deploy.Resources.Limits.Pids != s.PidsLimit {
+				return fmt.Errorf("services.%s: can't set distinct values on 'pids_limit' and 'deploy.resources.limits.pids': %w",
+					s.Name, errdefs.ErrInvalid)
+			}
+		}
+
+		if s.GetScale() > 1 && s.ContainerName != "" {
+			attr := "scale"
+			if s.Scale == nil {
+				attr = "deploy.replicas"
+			}
+			return fmt.Errorf("services.%s: can't set container_name and %s as container name must be unique: %w", attr,
+				s.Name, errdefs.ErrInvalid)
+		}
+
+		if s.Develop != nil && s.Develop.Watch != nil {
+			for _, watch := range s.Develop.Watch {
+				if watch.Target == "" && watch.Action != types.WatchActionRebuild && watch.Action != types.WatchActionRestart {
+					return fmt.Errorf("services.%s.develop.watch: target is required for non-rebuild actions: %w", s.Name, errdefs.ErrInvalid)
+				}
+			}
+
+		}
+	}
+
+	for name, secret := range project.Secrets {
+		if secret.External {
+			continue
+		}
+		if secret.File == "" && secret.Environment == "" {
+			return fmt.Errorf("secret %q must declare either `file` or `environment`: %w", name, errdefs.ErrInvalid)
+		}
+	}
+
+	return graph.CheckCycle(project)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/override/extends.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/override/extends.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/override/extends.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/override/extends.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,27 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package override
+
+import "github.com/compose-spec/compose-go/v2/tree"
+
+func ExtendService(base, override map[string]any) (map[string]any, error) {
+	yaml, err := mergeYaml(base, override, tree.NewPath("services.x"))
+	if err != nil {
+		return nil, err
+	}
+	return yaml.(map[string]any), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/override/merge.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/override/merge.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/override/merge.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/override/merge.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,292 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package override
+
+import (
+	"cmp"
+	"fmt"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+	"golang.org/x/exp/slices"
+)
+
+// Merge applies overrides to a config model
+func Merge(right, left map[string]any) (map[string]any, error) {
+	merged, err := mergeYaml(right, left, tree.NewPath())
+	if err != nil {
+		return nil, err
+	}
+	return merged.(map[string]any), nil
+}
+
+type merger func(any, any, tree.Path) (any, error)
+
+// mergeSpecials defines the custom rules applied by compose when merging yaml trees
+var mergeSpecials = map[tree.Path]merger{}
+
+func init() {
+	mergeSpecials["networks.*.ipam.config"] = mergeIPAMConfig
+	mergeSpecials["networks.*.labels"] = mergeToSequence
+	mergeSpecials["volumes.*.labels"] = mergeToSequence
+	mergeSpecials["services.*.annotations"] = mergeToSequence
+	mergeSpecials["services.*.build"] = mergeBuild
+	mergeSpecials["services.*.build.args"] = mergeToSequence
+	mergeSpecials["services.*.build.additional_contexts"] = mergeToSequence
+	mergeSpecials["services.*.build.extra_hosts"] = mergeExtraHosts
+	mergeSpecials["services.*.build.labels"] = mergeToSequence
+	mergeSpecials["services.*.command"] = override
+	mergeSpecials["services.*.depends_on"] = mergeDependsOn
+	mergeSpecials["services.*.deploy.labels"] = mergeToSequence
+	mergeSpecials["services.*.dns"] = mergeToSequence
+	mergeSpecials["services.*.dns_opt"] = mergeToSequence
+	mergeSpecials["services.*.dns_search"] = mergeToSequence
+	mergeSpecials["services.*.entrypoint"] = override
+	mergeSpecials["services.*.env_file"] = mergeToSequence
+	mergeSpecials["services.*.label_file"] = mergeToSequence
+	mergeSpecials["services.*.environment"] = mergeToSequence
+	mergeSpecials["services.*.extra_hosts"] = mergeExtraHosts
+	mergeSpecials["services.*.healthcheck.test"] = override
+	mergeSpecials["services.*.labels"] = mergeToSequence
+	mergeSpecials["services.*.logging"] = mergeLogging
+	mergeSpecials["services.*.networks"] = mergeNetworks
+	mergeSpecials["services.*.sysctls"] = mergeToSequence
+	mergeSpecials["services.*.tmpfs"] = mergeToSequence
+	mergeSpecials["services.*.ulimits.*"] = mergeUlimit
+}
+
+// mergeYaml merges map[string]any yaml trees handling special rules
+func mergeYaml(e any, o any, p tree.Path) (any, error) {
+	for pattern, merger := range mergeSpecials {
+		if p.Matches(pattern) {
+			merged, err := merger(e, o, p)
+			if err != nil {
+				return nil, err
+			}
+			return merged, nil
+		}
+	}
+	if o == nil {
+		return e, nil
+	}
+	switch value := e.(type) {
+	case map[string]any:
+		other, ok := o.(map[string]any)
+		if !ok {
+			return nil, fmt.Errorf("cannot override %s", p)
+		}
+		return mergeMappings(value, other, p)
+	case []any:
+		other, ok := o.([]any)
+		if !ok {
+			return nil, fmt.Errorf("cannot override %s", p)
+		}
+		return append(value, other...), nil
+	default:
+		return o, nil
+	}
+}
+
+func mergeMappings(mapping map[string]any, other map[string]any, p tree.Path) (map[string]any, error) {
+	for k, v := range other {
+		e, ok := mapping[k]
+		if !ok || strings.HasPrefix(k, "x-") {
+			mapping[k] = v
+			continue
+		}
+		next := p.Next(k)
+		merged, err := mergeYaml(e, v, next)
+		if err != nil {
+			return nil, err
+		}
+		mapping[k] = merged
+	}
+	return mapping, nil
+}
+
+// logging driver options are merged only when both compose file define the same driver
+func mergeLogging(c any, o any, p tree.Path) (any, error) {
+	config := c.(map[string]any)
+	other := o.(map[string]any)
+	// we override logging config if source and override have the same driver set, or none
+	d, ok1 := other["driver"]
+	o, ok2 := config["driver"]
+	if d == o || !ok1 || !ok2 {
+		return mergeMappings(config, other, p)
+	}
+	return other, nil
+}
+
+func mergeBuild(c any, o any, path tree.Path) (any, error) {
+	toBuild := func(c any) map[string]any {
+		switch v := c.(type) {
+		case string:
+			return map[string]any{
+				"context": v,
+			}
+		case map[string]any:
+			return v
+		}
+		return nil
+	}
+	return mergeMappings(toBuild(c), toBuild(o), path)
+}
+
+func mergeDependsOn(c any, o any, path tree.Path) (any, error) {
+	right := convertIntoMapping(c, map[string]any{
+		"condition": "service_started",
+		"required":  true,
+	})
+	left := convertIntoMapping(o, map[string]any{
+		"condition": "service_started",
+		"required":  true,
+	})
+	return mergeMappings(right, left, path)
+}
+
+func mergeNetworks(c any, o any, path tree.Path) (any, error) {
+	right := convertIntoMapping(c, nil)
+	left := convertIntoMapping(o, nil)
+	return mergeMappings(right, left, path)
+}
+
+func mergeExtraHosts(c any, o any, _ tree.Path) (any, error) {
+	right := convertIntoSequence(c)
+	left := convertIntoSequence(o)
+	// Rewrite content of left slice to remove duplicate elements
+	i := 0
+	for _, v := range left {
+		if !slices.Contains(right, v) {
+			left[i] = v
+			i++
+		}
+	}
+	// keep only not duplicated elements from left slice
+	left = left[:i]
+	return append(right, left...), nil
+}
+
+func mergeToSequence(c any, o any, _ tree.Path) (any, error) {
+	right := convertIntoSequence(c)
+	left := convertIntoSequence(o)
+	return append(right, left...), nil
+}
+
+func convertIntoSequence(value any) []any {
+	switch v := value.(type) {
+	case map[string]any:
+		var seq []any
+		for k, val := range v {
+			if val == nil {
+				seq = append(seq, k)
+			} else {
+				switch vl := val.(type) {
+				// if val is an array we need to add the key with each value one by one
+				case []any:
+					for _, vlv := range vl {
+						seq = append(seq, fmt.Sprintf("%s=%v", k, vlv))
+					}
+				default:
+					seq = append(seq, fmt.Sprintf("%s=%v", k, val))
+				}
+			}
+		}
+		slices.SortFunc(seq, func(a, b any) int {
+			return cmp.Compare(a.(string), b.(string))
+		})
+		return seq
+	case []any:
+		return v
+	case string:
+		return []any{v}
+	}
+	return nil
+}
+
+func mergeUlimit(_ any, o any, p tree.Path) (any, error) {
+	over, ismapping := o.(map[string]any)
+	if base, ok := o.(map[string]any); ok && ismapping {
+		return mergeMappings(base, over, p)
+	}
+	return o, nil
+}
+
+func mergeIPAMConfig(c any, o any, path tree.Path) (any, error) {
+	var ipamConfigs []any
+	for _, original := range c.([]any) {
+		right := convertIntoMapping(original, nil)
+		for _, override := range o.([]any) {
+			left := convertIntoMapping(override, nil)
+			if left["subnet"] != right["subnet"] {
+				// check if left is already in ipamConfigs, add it if not and continue with the next config
+				if !slices.ContainsFunc(ipamConfigs, func(a any) bool {
+					return a.(map[string]any)["subnet"] == left["subnet"]
+				}) {
+					ipamConfigs = append(ipamConfigs, left)
+					continue
+				}
+			}
+			merged, err := mergeMappings(right, left, path)
+			if err != nil {
+				return nil, err
+			}
+			// find index of potential previous config with the same subnet in ipamConfigs
+			indexIfExist := slices.IndexFunc(ipamConfigs, func(a any) bool {
+				return a.(map[string]any)["subnet"] == merged["subnet"]
+			})
+			// if a previous config is already in ipamConfigs, replace it
+			if indexIfExist >= 0 {
+				ipamConfigs[indexIfExist] = merged
+			} else {
+				// or add the new config to ipamConfigs
+				ipamConfigs = append(ipamConfigs, merged)
+			}
+		}
+	}
+	return ipamConfigs, nil
+}
+
+func convertIntoMapping(a any, defaultValue map[string]any) map[string]any {
+	switch v := a.(type) {
+	case map[string]any:
+		return v
+	case []any:
+		converted := map[string]any{}
+		for _, s := range v {
+			if defaultValue == nil {
+				converted[s.(string)] = nil
+			} else {
+				// Create a new map for each key
+				converted[s.(string)] = copyMap(defaultValue)
+			}
+		}
+		return converted
+	}
+	return nil
+}
+
+func copyMap(m map[string]any) map[string]any {
+	c := make(map[string]any)
+	for k, v := range m {
+		c[k] = v
+	}
+	return c
+}
+
+func override(_ any, other any, _ tree.Path) (any, error) {
+	return other, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,229 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package override
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/format"
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+type indexer func(any, tree.Path) (string, error)
+
+// mergeSpecials defines the custom rules applied by compose when merging yaml trees
+var unique = map[tree.Path]indexer{}
+
+func init() {
+	unique["networks.*.labels"] = keyValueIndexer
+	unique["networks.*.ipam.options"] = keyValueIndexer
+	unique["services.*.annotations"] = keyValueIndexer
+	unique["services.*.build.args"] = keyValueIndexer
+	unique["services.*.build.additional_contexts"] = keyValueIndexer
+	unique["services.*.build.platform"] = keyValueIndexer
+	unique["services.*.build.tags"] = keyValueIndexer
+	unique["services.*.build.labels"] = keyValueIndexer
+	unique["services.*.cap_add"] = keyValueIndexer
+	unique["services.*.cap_drop"] = keyValueIndexer
+	unique["services.*.devices"] = volumeIndexer
+	unique["services.*.configs"] = mountIndexer("")
+	unique["services.*.deploy.labels"] = keyValueIndexer
+	unique["services.*.dns"] = keyValueIndexer
+	unique["services.*.dns_opt"] = keyValueIndexer
+	unique["services.*.dns_search"] = keyValueIndexer
+	unique["services.*.environment"] = keyValueIndexer
+	unique["services.*.env_file"] = envFileIndexer
+	unique["services.*.expose"] = exposeIndexer
+	unique["services.*.labels"] = keyValueIndexer
+	unique["services.*.links"] = keyValueIndexer
+	unique["services.*.networks.*.aliases"] = keyValueIndexer
+	unique["services.*.networks.*.link_local_ips"] = keyValueIndexer
+	unique["services.*.ports"] = portIndexer
+	unique["services.*.profiles"] = keyValueIndexer
+	unique["services.*.secrets"] = mountIndexer("/run/secrets")
+	unique["services.*.sysctls"] = keyValueIndexer
+	unique["services.*.tmpfs"] = keyValueIndexer
+	unique["services.*.volumes"] = volumeIndexer
+	unique["services.*.devices"] = deviceMappingIndexer
+}
+
+// EnforceUnicity removes redefinition of elements declared in a sequence
+func EnforceUnicity(value map[string]any) (map[string]any, error) {
+	uniq, err := enforceUnicity(value, tree.NewPath())
+	if err != nil {
+		return nil, err
+	}
+	return uniq.(map[string]any), nil
+}
+
+func enforceUnicity(value any, p tree.Path) (any, error) {
+	switch v := value.(type) {
+	case map[string]any:
+		for k, e := range v {
+			u, err := enforceUnicity(e, p.Next(k))
+			if err != nil {
+				return nil, err
+			}
+			v[k] = u
+		}
+		return v, nil
+	case []any:
+		for pattern, indexer := range unique {
+			if p.Matches(pattern) {
+				seq := []any{}
+				keys := map[string]int{}
+				for i, entry := range v {
+					key, err := indexer(entry, p.Next(fmt.Sprintf("[%d]", i)))
+					if err != nil {
+						return nil, err
+					}
+					if j, ok := keys[key]; ok {
+						seq[j] = entry
+					} else {
+						seq = append(seq, entry)
+						keys[key] = len(seq) - 1
+					}
+				}
+				return seq, nil
+			}
+		}
+	}
+	return value, nil
+}
+
+func keyValueIndexer(v any, p tree.Path) (string, error) {
+	switch value := v.(type) {
+	case string:
+		key, _, found := strings.Cut(value, "=")
+		if found {
+			return key, nil
+		}
+		return value, nil
+	default:
+		return "", fmt.Errorf("%s: unexpected type %T", p, v)
+	}
+}
+
+func volumeIndexer(y any, p tree.Path) (string, error) {
+	switch value := y.(type) {
+	case map[string]any:
+		target, ok := value["target"].(string)
+		if !ok {
+			return "", fmt.Errorf("service volume %s is missing a mount target", p)
+		}
+		return target, nil
+	case string:
+		volume, err := format.ParseVolume(value)
+		if err != nil {
+			return "", err
+		}
+		return volume.Target, nil
+	}
+	return "", nil
+}
+
+func deviceMappingIndexer(y any, p tree.Path) (string, error) {
+	switch value := y.(type) {
+	case map[string]any:
+		target, ok := value["target"].(string)
+		if !ok {
+			return "", fmt.Errorf("service device %s is missing a mount target", p)
+		}
+		return target, nil
+	case string:
+		arr := strings.Split(value, ":")
+		if len(arr) == 1 {
+			return arr[0], nil
+		}
+		return arr[1], nil
+	}
+	return "", nil
+}
+
+func exposeIndexer(a any, path tree.Path) (string, error) {
+	switch v := a.(type) {
+	case string:
+		return v, nil
+	case int:
+		return strconv.Itoa(v), nil
+	default:
+		return "", fmt.Errorf("%s: unsupported expose value %s", path, a)
+	}
+}
+
+func mountIndexer(defaultPath string) indexer {
+	return func(a any, path tree.Path) (string, error) {
+		switch v := a.(type) {
+		case string:
+			return fmt.Sprintf("%s/%s", defaultPath, v), nil
+		case map[string]any:
+			t, ok := v["target"]
+			if ok {
+				return t.(string), nil
+			}
+			return fmt.Sprintf("%s/%s", defaultPath, v["source"]), nil
+		default:
+			return "", fmt.Errorf("%s: unsupported expose value %s", path, a)
+		}
+	}
+}
+
+func portIndexer(y any, p tree.Path) (string, error) {
+	switch value := y.(type) {
+	case int:
+		return strconv.Itoa(value), nil
+	case map[string]any:
+		target, ok := value["target"]
+		if !ok {
+			return "", fmt.Errorf("service ports %s is missing a target port", p)
+		}
+		published, ok := value["published"]
+		if !ok {
+			// try to parse it as an int
+			if pub, ok := value["published"]; ok {
+				published = fmt.Sprintf("%d", pub)
+			}
+		}
+		host, ok := value["host_ip"]
+		if !ok {
+			host = "0.0.0.0"
+		}
+		protocol, ok := value["protocol"]
+		if !ok {
+			protocol = "tcp"
+		}
+		return fmt.Sprintf("%s:%s:%d/%s", host, published, target, protocol), nil
+	case string:
+		return value, nil
+	}
+	return "", nil
+}
+
+func envFileIndexer(y any, p tree.Path) (string, error) {
+	switch value := y.(type) {
+	case string:
+		return value, nil
+	case map[string]any:
+		if pathValue, ok := value["path"]; ok {
+			return pathValue.(string), nil
+		}
+		return "", fmt.Errorf("environment path attribute %s is missing", p)
+	}
+	return "", nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/context.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/context.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/context.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/context.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,44 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package paths
+
+import "strings"
+
+func (r *relativePathsResolver) absContextPath(value any) (any, error) {
+	v := value.(string)
+	if strings.Contains(v, "://") { // `docker-image://` or any builder specific context type
+		return v, nil
+	}
+	if isRemoteContext(v) {
+		return v, nil
+	}
+	return r.absPath(v)
+}
+
+// isRemoteContext returns true if the value is a Git reference or HTTP(S) URL.
+//
+// Any other value is assumed to be a local filesystem path and returns false.
+//
+// See: https://github.com/moby/buildkit/blob/18fc875d9bfd6e065cd8211abc639434ba65aa56/frontend/dockerui/context.go#L76-L79
+func isRemoteContext(maybeURL string) bool {
+	for _, prefix := range []string{"https://", "http://", "git://", "ssh://", "github.com/", "git@"} {
+		if strings.HasPrefix(maybeURL, prefix) {
+			return true
+		}
+	}
+	return false
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,25 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package paths
+
+func (r *relativePathsResolver) absExtendsPath(value any) (any, error) {
+	v := value.(string)
+	if r.isRemoteResource(v) {
+		return v, nil
+	}
+	return r.absPath(v)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/home.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/home.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/home.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/home.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,37 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package paths
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/sirupsen/logrus"
+)
+
+func ExpandUser(p string) string {
+	if strings.HasPrefix(p, "~") {
+		home, err := os.UserHomeDir()
+		if err != nil {
+			logrus.Warn("cannot expand '~', because the environment lacks HOME")
+			return p
+		}
+		return filepath.Join(home, p[1:])
+	}
+	return p
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,169 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package paths
+
+import (
+	"errors"
+	"fmt"
+	"path/filepath"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+	"github.com/compose-spec/compose-go/v2/types"
+)
+
+type resolver func(any) (any, error)
+
+// ResolveRelativePaths make relative paths absolute
+func ResolveRelativePaths(project map[string]any, base string, remotes []RemoteResource) error {
+	r := relativePathsResolver{
+		workingDir: base,
+		remotes:    remotes,
+	}
+	r.resolvers = map[tree.Path]resolver{
+		"services.*.build.context":               r.absContextPath,
+		"services.*.build.additional_contexts.*": r.absContextPath,
+		"services.*.build.ssh.*":                 r.maybeUnixPath,
+		"services.*.env_file.*.path":             r.absPath,
+		"services.*.label_file.*":                r.absPath,
+		"services.*.extends.file":                r.absExtendsPath,
+		"services.*.develop.watch.*.path":        r.absSymbolicLink,
+		"services.*.volumes.*":                   r.absVolumeMount,
+		"configs.*.file":                         r.maybeUnixPath,
+		"secrets.*.file":                         r.maybeUnixPath,
+		"include.path":                           r.absPath,
+		"include.project_directory":              r.absPath,
+		"include.env_file":                       r.absPath,
+		"volumes.*":                              r.volumeDriverOpts,
+	}
+	_, err := r.resolveRelativePaths(project, tree.NewPath())
+	return err
+}
+
+type RemoteResource func(path string) bool
+
+type relativePathsResolver struct {
+	workingDir string
+	remotes    []RemoteResource
+	resolvers  map[tree.Path]resolver
+}
+
+func (r *relativePathsResolver) isRemoteResource(path string) bool {
+	for _, remote := range r.remotes {
+		if remote(path) {
+			return true
+		}
+	}
+	return false
+}
+
+func (r *relativePathsResolver) resolveRelativePaths(value any, p tree.Path) (any, error) {
+	for pattern, resolver := range r.resolvers {
+		if p.Matches(pattern) {
+			return resolver(value)
+		}
+	}
+	switch v := value.(type) {
+	case map[string]any:
+		for k, e := range v {
+			resolved, err := r.resolveRelativePaths(e, p.Next(k))
+			if err != nil {
+				return nil, err
+			}
+			v[k] = resolved
+		}
+	case []any:
+		for i, e := range v {
+			resolved, err := r.resolveRelativePaths(e, p.Next("[]"))
+			if err != nil {
+				return nil, err
+			}
+			v[i] = resolved
+		}
+	}
+	return value, nil
+}
+
+func (r *relativePathsResolver) absPath(value any) (any, error) {
+	switch v := value.(type) {
+	case []any:
+		for i, s := range v {
+			abs, err := r.absPath(s)
+			if err != nil {
+				return nil, err
+			}
+			v[i] = abs
+		}
+		return v, nil
+	case string:
+		v = ExpandUser(v)
+		if filepath.IsAbs(v) {
+			return v, nil
+		}
+		if v != "" {
+			return filepath.Join(r.workingDir, v), nil
+		}
+		return v, nil
+	}
+
+	return nil, fmt.Errorf("unexpected type %T", value)
+}
+
+func (r *relativePathsResolver) absVolumeMount(a any) (any, error) {
+	switch vol := a.(type) {
+	case map[string]any:
+		if vol["type"] != types.VolumeTypeBind {
+			return vol, nil
+		}
+		src, ok := vol["source"]
+		if !ok {
+			return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`)
+		}
+		abs, err := r.maybeUnixPath(src.(string))
+		if err != nil {
+			return nil, err
+		}
+		vol["source"] = abs
+		return vol, nil
+	default:
+		// not using canonical format, skip
+		return a, nil
+	}
+}
+
+func (r *relativePathsResolver) volumeDriverOpts(a any) (any, error) {
+	if a == nil {
+		return nil, nil
+	}
+	vol := a.(map[string]any)
+	if vol["driver"] != "local" {
+		return vol, nil
+	}
+	do, ok := vol["driver_opts"]
+	if !ok {
+		return vol, nil
+	}
+	opts := do.(map[string]any)
+	if dev, ok := opts["device"]; opts["o"] == "bind" && ok {
+		// This is actually a bind mount
+		path, err := r.maybeUnixPath(dev)
+		if err != nil {
+			return nil, err
+		}
+		opts["device"] = path
+	}
+	return vol, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,57 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package paths
+
+import (
+	"path"
+	"path/filepath"
+
+	"github.com/compose-spec/compose-go/v2/utils"
+)
+
+func (r *relativePathsResolver) maybeUnixPath(a any) (any, error) {
+	p, ok := a.(string)
+	if !ok {
+		return a, nil
+	}
+	p = ExpandUser(p)
+	// Check if source is an absolute path (either Unix or Windows), to
+	// handle a Windows client with a Unix daemon or vice-versa.
+	//
+	// Note that this is not required for Docker for Windows when specifying
+	// a local Windows path, because Docker for Windows translates the Windows
+	// path into a valid path within the VM.
+	if !path.IsAbs(p) && !isWindowsAbs(p) {
+		if filepath.IsAbs(p) {
+			return p, nil
+		}
+		return filepath.Join(r.workingDir, p), nil
+	}
+	return p, nil
+}
+
+func (r *relativePathsResolver) absSymbolicLink(value any) (any, error) {
+	abs, err := r.absPath(value)
+	if err != nil {
+		return nil, err
+	}
+	str, ok := abs.(string)
+	if !ok {
+		return abs, nil
+	}
+	return utils.ResolveSymbolicLink(str)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,82 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package paths
+
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// https://github.com/golang/go/blob/master/LICENSE
+
+// This file contains utilities to check for Windows absolute paths on Linux.
+// The code in this file was largely copied from the Golang filepath package
+// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_windows.go#L12-L65
+
+func isSlash(c uint8) bool {
+	return c == '\\' || c == '/'
+}
+
+// isAbs reports whether the path is a Windows absolute path.
+func isWindowsAbs(path string) (b bool) {
+	l := volumeNameLen(path)
+	if l == 0 {
+		return false
+	}
+	path = path[l:]
+	if path == "" {
+		return false
+	}
+	return isSlash(path[0])
+}
+
+// volumeNameLen returns length of the leading volume name on Windows.
+// It returns 0 elsewhere.
+// nolint: gocyclo
+func volumeNameLen(path string) int {
+	if len(path) < 2 {
+		return 0
+	}
+	// with drive letter
+	c := path[0]
+	if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+		return 2
+	}
+	// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+	if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
+		!isSlash(path[2]) && path[2] != '.' {
+		// first, leading `\\` and next shouldn't be `\`. its server name.
+		for n := 3; n < l-1; n++ {
+			// second, next '\' shouldn't be repeated.
+			if isSlash(path[n]) {
+				n++
+				// third, following something characters. its share name.
+				if !isSlash(path[n]) {
+					if path[n] == '.' {
+						break
+					}
+					for ; n < l; n++ {
+						if isSlash(path[n]) {
+							break
+						}
+					}
+					return n
+				}
+				break
+			}
+		}
+	}
+	return 0
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,1024 @@
+{
+  "$schema": "https://json-schema.org/draft/2019-09/schema#",
+  "id": "compose_spec.json",
+  "type": "object",
+  "title": "Compose Specification",
+  "description": "The Compose file is a YAML file defining a multi-containers based application.",
+
+  "properties": {
+    "version": {
+      "type": "string",
+      "description": "declared for backward compatibility, ignored."
+    },
+
+    "name": {
+      "type": "string",
+      "description": "define the Compose project name, until user defines one explicitly."
+    },
+
+    "include": {
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/include"
+      },
+      "description": "compose sub-projects to be included."
+    },
+
+    "services": {
+      "id": "#/properties/services",
+      "type": "object",
+      "patternProperties": {
+        "^[a-zA-Z0-9._-]+$": {
+          "$ref": "#/definitions/service"
+        }
+      },
+      "additionalProperties": false
+    },
+
+    "networks": {
+      "id": "#/properties/networks",
+      "type": "object",
+      "patternProperties": {
+        "^[a-zA-Z0-9._-]+$": {
+          "$ref": "#/definitions/network"
+        }
+      }
+    },
+
+    "volumes": {
+      "id": "#/properties/volumes",
+      "type": "object",
+      "patternProperties": {
+        "^[a-zA-Z0-9._-]+$": {
+          "$ref": "#/definitions/volume"
+        }
+      },
+      "additionalProperties": false
+    },
+
+    "secrets": {
+      "id": "#/properties/secrets",
+      "type": "object",
+      "patternProperties": {
+        "^[a-zA-Z0-9._-]+$": {
+          "$ref": "#/definitions/secret"
+        }
+      },
+      "additionalProperties": false
+    },
+
+    "configs": {
+      "id": "#/properties/configs",
+      "type": "object",
+      "patternProperties": {
+        "^[a-zA-Z0-9._-]+$": {
+          "$ref": "#/definitions/config"
+        }
+      },
+      "additionalProperties": false
+    }
+  },
+
+  "patternProperties": {"^x-": {}},
+  "additionalProperties": false,
+
+  "definitions": {
+
+    "service": {
+      "id": "#/definitions/service",
+      "type": "object",
+
+      "properties": {
+        "develop": {"$ref": "#/definitions/development"},
+        "deploy": {"$ref": "#/definitions/deployment"},
+        "annotations": {"$ref": "#/definitions/list_or_dict"},
+        "attach": {"type": ["boolean", "string"]},
+        "build": {
+          "oneOf": [
+            {"type": "string"},
+            {
+              "type": "object",
+              "properties": {
+                "context": {"type": "string"},
+                "dockerfile": {"type": "string"},
+                "dockerfile_inline": {"type": "string"},
+                "entitlements": {"type": "array", "items": {"type": "string"}},
+                "args": {"$ref": "#/definitions/list_or_dict"},
+                "ssh": {"$ref": "#/definitions/list_or_dict"},
+                "labels": {"$ref": "#/definitions/list_or_dict"},
+                "cache_from": {"type": "array", "items": {"type": "string"}},
+                "cache_to": {"type": "array", "items": {"type": "string"}},
+                "no_cache": {"type": ["boolean", "string"]},
+                "additional_contexts": {"$ref": "#/definitions/list_or_dict"},
+                "network": {"type": "string"},
+                "pull": {"type": ["boolean", "string"]},
+                "target": {"type": "string"},
+                "shm_size": {"type": ["integer", "string"]},
+                "extra_hosts": {"$ref": "#/definitions/extra_hosts"},
+                "isolation": {"type": "string"},
+                "privileged": {"type": ["boolean", "string"]},
+                "secrets": {"$ref": "#/definitions/service_config_or_secret"},
+                "tags": {"type": "array", "items": {"type": "string"}},
+                "ulimits": {"$ref": "#/definitions/ulimits"},
+                "platforms": {"type": "array", "items": {"type": "string"}}
+              },
+              "additionalProperties": false,
+              "patternProperties": {"^x-": {}}
+            }
+          ]
+        },
+        "blkio_config": {
+          "type": "object",
+          "properties": {
+            "device_read_bps": {
+              "type": "array",
+              "items": {"$ref": "#/definitions/blkio_limit"}
+            },
+            "device_read_iops": {
+              "type": "array",
+              "items": {"$ref": "#/definitions/blkio_limit"}
+            },
+            "device_write_bps": {
+              "type": "array",
+              "items": {"$ref": "#/definitions/blkio_limit"}
+            },
+            "device_write_iops": {
+              "type": "array",
+              "items": {"$ref": "#/definitions/blkio_limit"}
+            },
+            "weight": {"type": ["integer", "string"]},
+            "weight_device": {
+              "type": "array",
+              "items": {"$ref": "#/definitions/blkio_weight"}
+            }
+          },
+          "additionalProperties": false
+        },
+        "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "cgroup": {"type": "string", "enum": ["host", "private"]},
+        "cgroup_parent": {"type": "string"},
+        "command": {"$ref": "#/definitions/command"},
+        "configs": {"$ref": "#/definitions/service_config_or_secret"},
+        "container_name": {"type": "string"},
+        "cpu_count": {"oneOf": [
+          {"type": "string"},
+          {"type": "integer", "minimum": 0}
+        ]},
+        "cpu_percent": {"oneOf": [
+          {"type": "string"},
+          {"type": "integer", "minimum": 0, "maximum": 100}
+        ]},
+        "cpu_shares": {"type": ["number", "string"]},
+        "cpu_quota": {"type": ["number", "string"]},
+        "cpu_period": {"type": ["number", "string"]},
+        "cpu_rt_period": {"type": ["number", "string"]},
+        "cpu_rt_runtime": {"type": ["number", "string"]},
+        "cpus": {"type": ["number", "string"]},
+        "cpuset": {"type": "string"},
+        "credential_spec": {
+          "type": "object",
+          "properties": {
+            "config": {"type": "string"},
+            "file": {"type": "string"},
+            "registry": {"type": "string"}
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "depends_on": {
+          "oneOf": [
+            {"$ref": "#/definitions/list_of_strings"},
+            {
+              "type": "object",
+              "additionalProperties": false,
+              "patternProperties": {
+                "^[a-zA-Z0-9._-]+$": {
+                  "type": "object",
+                  "additionalProperties": false,
+                  "patternProperties": {"^x-": {}},
+                  "properties": {
+                    "restart": {"type": ["boolean", "string"]},
+                    "required": {
+                      "type":  "boolean",
+                      "default": true
+                    },
+                    "condition": {
+                      "type": "string",
+                      "enum": ["service_started", "service_healthy", "service_completed_successfully"]
+                    }
+                  },
+                  "required": ["condition"]
+                }
+              }
+            }
+          ]
+        },
+        "device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"},
+        "devices": {
+          "type": "array",
+          "items": {
+            "oneOf": [
+              {"type": "string"},
+              {
+                "type": "object",
+                "required": ["source"],
+                "properties": {
+                  "source": {"type": "string"},
+                  "target": {"type": "string"},
+                  "permissions": {"type": "string"}
+                },
+                "additionalProperties": false,
+                "patternProperties": {"^x-": {}}
+              }
+            ]
+          }
+        },
+        "dns": {"$ref": "#/definitions/string_or_list"},
+        "dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true},
+        "dns_search": {"$ref": "#/definitions/string_or_list"},
+        "domainname": {"type": "string"},
+        "entrypoint": {"$ref": "#/definitions/command"},
+        "env_file": {"$ref": "#/definitions/env_file"},
+        "label_file": {"$ref": "#/definitions/label_file"},
+        "environment": {"$ref": "#/definitions/list_or_dict"},
+
+        "expose": {
+          "type": "array",
+          "items": {
+            "type": ["string", "number"],
+            "format": "expose"
+          },
+          "uniqueItems": true
+        },
+        "extends": {
+          "oneOf": [
+            {"type": "string"},
+            {
+              "type": "object",
+
+              "properties": {
+                "service": {"type": "string"},
+                "file": {"type": "string"}
+              },
+              "required": ["service"],
+              "additionalProperties": false
+            }
+          ]
+        },
+        "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "extra_hosts": {"$ref": "#/definitions/extra_hosts"},
+        "gpus": {"$ref": "#/definitions/gpus"},
+        "group_add": {
+          "type": "array",
+          "items": {
+            "type": ["string", "number"]
+          },
+          "uniqueItems": true
+        },
+        "healthcheck": {"$ref": "#/definitions/healthcheck"},
+        "hostname": {"type": "string"},
+        "image": {"type": "string"},
+        "init": {"type": ["boolean", "string"]},
+        "ipc": {"type": "string"},
+        "isolation": {"type": "string"},
+        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "logging": {
+          "type": "object",
+
+          "properties": {
+            "driver": {"type": "string"},
+            "options": {
+              "type": "object",
+              "patternProperties": {
+                "^.+$": {"type": ["string", "number", "null"]}
+              }
+            }
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "mac_address": {"type": "string"},
+        "mem_limit": {"type": ["number", "string"]},
+        "mem_reservation": {"type": ["string", "integer"]},
+        "mem_swappiness": {"type": ["integer", "string"]},
+        "memswap_limit": {"type": ["number", "string"]},
+        "network_mode": {"type": "string"},
+        "networks": {
+          "oneOf": [
+            {"$ref": "#/definitions/list_of_strings"},
+            {
+              "type": "object",
+              "patternProperties": {
+                "^[a-zA-Z0-9._-]+$": {
+                  "oneOf": [
+                    {
+                      "type": "object",
+                      "properties": {
+                        "aliases": {"$ref": "#/definitions/list_of_strings"},
+                        "ipv4_address": {"type": "string"},
+                        "ipv6_address": {"type": "string"},
+                        "link_local_ips": {"$ref": "#/definitions/list_of_strings"},
+                        "mac_address": {"type": "string"},
+                        "driver_opts": {
+                          "type": "object",
+                          "patternProperties": {
+                            "^.+$": {"type": ["string", "number"]}
+                          }
+                        },
+                        "priority": {"type": "number"}
+                      },
+                      "additionalProperties": false,
+                      "patternProperties": {"^x-": {}}
+                    },
+                    {"type": "null"}
+                  ]
+                }
+              },
+              "additionalProperties": false
+            }
+          ]
+        },
+        "oom_kill_disable": {"type": ["boolean", "string"]},
+        "oom_score_adj": {"oneOf": [
+          {"type": "string"},
+          {"type": "integer", "minimum": -1000, "maximum": 1000}
+        ]},
+        "pid": {"type": ["string", "null"]},
+        "pids_limit": {"type": ["number", "string"]},
+        "platform": {"type": "string"},
+        "ports": {
+          "type": "array",
+          "items": {
+            "oneOf": [
+              {"type": "number"},
+              {"type": "string"},
+              {
+                "type": "object",
+                "properties": {
+                  "name": {"type": "string"},
+                  "mode": {"type": "string"},
+                  "host_ip": {"type": "string"},
+                  "target": {"type": ["integer", "string"]},
+                  "published": {"type": ["string", "integer"]},
+                  "protocol": {"type": "string"},
+                  "app_protocol": {"type": "string"}
+                },
+                "additionalProperties": false,
+                "patternProperties": {"^x-": {}}
+              }
+            ]
+          },
+          "uniqueItems": true
+        },
+        "post_start": {"type": "array", "items": {"$ref": "#/definitions/service_hook"}},
+        "pre_stop": {"type": "array", "items": {"$ref": "#/definitions/service_hook"}},
+        "privileged": {"type": ["boolean", "string"]},
+        "profiles": {"$ref": "#/definitions/list_of_strings"},
+        "pull_policy": {"type": "string", "enum": [
+          "always", "never", "if_not_present", "build", "missing"
+        ]},
+        "read_only": {"type": ["boolean", "string"]},
+        "restart": {"type": "string"},
+        "runtime": {
+          "type": "string"
+        },
+        "scale": {
+          "type": ["integer", "string"]
+        },
+        "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "shm_size": {"type": ["number", "string"]},
+        "secrets": {"$ref": "#/definitions/service_config_or_secret"},
+        "sysctls": {"$ref": "#/definitions/list_or_dict"},
+        "stdin_open": {"type": ["boolean", "string"]},
+        "stop_grace_period": {"type": "string"},
+        "stop_signal": {"type": "string"},
+        "storage_opt": {"type": "object"},
+        "tmpfs": {"$ref": "#/definitions/string_or_list"},
+        "tty": {"type": ["boolean", "string"]},
+        "ulimits": {"$ref": "#/definitions/ulimits"},
+        "user": {"type": "string"},
+        "uts": {"type": "string"},
+        "userns_mode": {"type": "string"},
+        "volumes": {
+          "type": "array",
+          "items": {
+            "oneOf": [
+              {"type": "string"},
+              {
+                "type": "object",
+                "required": ["type"],
+                "properties": {
+                  "type": {"type": "string"},
+                  "source": {"type": "string"},
+                  "target": {"type": "string"},
+                  "read_only": {"type": ["boolean", "string"]},
+                  "consistency": {"type": "string"},
+                  "bind": {
+                    "type": "object",
+                    "properties": {
+                      "propagation": {"type": "string"},
+                      "create_host_path": {"type": ["boolean", "string"]},
+                      "recursive": {"type": "string", "enum": ["enabled", "disabled", "writable", "readonly"]},
+                      "selinux": {"type": "string", "enum": ["z", "Z"]}
+                    },
+                    "additionalProperties": false,
+                    "patternProperties": {"^x-": {}}
+                  },
+                  "volume": {
+                    "type": "object",
+                    "properties": {
+                      "nocopy": {"type": ["boolean", "string"]},
+                      "subpath": {"type": "string"}
+                    },
+                    "additionalProperties": false,
+                    "patternProperties": {"^x-": {}}
+                  },
+                  "tmpfs": {
+                    "type": "object",
+                    "properties": {
+                      "size": {
+                        "oneOf": [
+                          {"type": "integer", "minimum": 0},
+                          {"type": "string"}
+                        ]
+                      },
+                      "mode": {"type": ["number", "string"]}
+                    },
+                    "additionalProperties": false,
+                    "patternProperties": {"^x-": {}}
+                  }
+                },
+                "additionalProperties": false,
+                "patternProperties": {"^x-": {}}
+              }
+            ]
+          },
+          "uniqueItems": true
+        },
+        "volumes_from": {
+          "type": "array",
+          "items": {"type": "string"},
+          "uniqueItems": true
+        },
+        "working_dir": {"type": "string"}
+      },
+      "patternProperties": {"^x-": {}},
+      "additionalProperties": false
+    },
+
+    "healthcheck": {
+      "id": "#/definitions/healthcheck",
+      "type": "object",
+      "properties": {
+        "disable": {"type": ["boolean", "string"]},
+        "interval": {"type": "string"},
+        "retries": {"type": ["number", "string"]},
+        "test": {
+          "oneOf": [
+            {"type": "string"},
+            {"type": "array", "items": {"type": "string"}}
+          ]
+        },
+        "timeout": {"type": "string"},
+        "start_period": {"type": "string"},
+        "start_interval": {"type": "string"}
+      },
+      "additionalProperties": false,
+      "patternProperties": {"^x-": {}}
+    },
+    "development": {
+      "id": "#/definitions/development",
+      "type": ["object", "null"],
+      "properties": {
+        "watch": {
+          "type": "array",
+          "items": {
+            "type": "object",
+            "required": ["path", "action"],
+            "properties": {
+              "ignore": {"type": "array", "items": {"type": "string"}},
+              "path": {"type": "string"},
+              "action": {"type": "string", "enum": ["rebuild", "sync", "restart", "sync+restart", "sync+exec"]},
+              "target": {"type": "string"},
+              "exec": {"$ref": "#/definitions/service_hook"}
+            },
+            "additionalProperties": false,
+            "patternProperties": {"^x-": {}}
+          }
+        }
+      },
+      "additionalProperties": false,
+      "patternProperties": {"^x-": {}}
+    },
+    "deployment": {
+      "id": "#/definitions/deployment",
+      "type": ["object", "null"],
+      "properties": {
+        "mode": {"type": "string"},
+        "endpoint_mode": {"type": "string"},
+        "replicas": {"type": ["integer", "string"]},
+        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "rollback_config": {
+          "type": "object",
+          "properties": {
+            "parallelism": {"type": ["integer", "string"]},
+            "delay": {"type": "string"},
+            "failure_action": {"type": "string"},
+            "monitor": {"type": "string"},
+            "max_failure_ratio": {"type": ["number", "string"]},
+            "order": {"type": "string", "enum": [
+              "start-first", "stop-first"
+            ]}
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "update_config": {
+          "type": "object",
+          "properties": {
+            "parallelism": {"type": ["integer", "string"]},
+            "delay": {"type": "string"},
+            "failure_action": {"type": "string"},
+            "monitor": {"type": "string"},
+            "max_failure_ratio": {"type": ["number", "string"]},
+            "order": {"type": "string", "enum": [
+              "start-first", "stop-first"
+            ]}
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "resources": {
+          "type": "object",
+          "properties": {
+            "limits": {
+              "type": "object",
+              "properties": {
+                "cpus": {"type": ["number", "string"]},
+                "memory": {"type": "string"},
+                "pids": {"type": ["integer", "string"]}
+              },
+              "additionalProperties": false,
+              "patternProperties": {"^x-": {}}
+            },
+            "reservations": {
+              "type": "object",
+              "properties": {
+                "cpus": {"type": ["number", "string"]},
+                "memory": {"type": "string"},
+                "generic_resources": {"$ref": "#/definitions/generic_resources"},
+                "devices": {"$ref": "#/definitions/devices"}
+              },
+              "additionalProperties": false,
+              "patternProperties": {"^x-": {}}
+            }
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "restart_policy": {
+          "type": "object",
+          "properties": {
+            "condition": {"type": "string"},
+            "delay": {"type": "string"},
+            "max_attempts": {"type": ["integer", "string"]},
+            "window": {"type": "string"}
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "placement": {
+          "type": "object",
+          "properties": {
+            "constraints": {"type": "array", "items": {"type": "string"}},
+            "preferences": {
+              "type": "array",
+              "items": {
+                "type": "object",
+                "properties": {
+                  "spread": {"type": "string"}
+                },
+                "additionalProperties": false,
+                "patternProperties": {"^x-": {}}
+              }
+            },
+            "max_replicas_per_node": {"type": ["integer", "string"]}
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        }
+      },
+      "additionalProperties": false,
+      "patternProperties": {"^x-": {}}
+    },
+
+    "generic_resources": {
+      "id": "#/definitions/generic_resources",
+      "type": "array",
+      "items": {
+        "type": "object",
+        "properties": {
+          "discrete_resource_spec": {
+            "type": "object",
+            "properties": {
+              "kind": {"type": "string"},
+              "value": {"type": ["number", "string"]}
+            },
+            "additionalProperties": false,
+            "patternProperties": {"^x-": {}}
+          }
+        },
+        "additionalProperties": false,
+        "patternProperties": {"^x-": {}}
+      }
+    },
+
+    "devices": {
+      "id": "#/definitions/devices",
+      "type": "array",
+      "items": {
+        "type": "object",
+        "properties": {
+          "capabilities": {"$ref": "#/definitions/list_of_strings"},
+          "count": {"type": ["string", "integer"]},
+          "device_ids": {"$ref": "#/definitions/list_of_strings"},
+          "driver":{"type": "string"},
+          "options":{"$ref": "#/definitions/list_or_dict"}
+        },
+        "additionalProperties": false,
+        "patternProperties": {"^x-": {}},
+        "required": [
+          "capabilities"
+        ]
+      }
+    },
+
+    "gpus": {
+      "id": "#/definitions/gpus",
+      "type": "array",
+      "items": {
+        "type": "object",
+        "properties": {
+          "capabilities": {"$ref": "#/definitions/list_of_strings"},
+          "count": {"type": ["string", "integer"]},
+          "device_ids": {"$ref": "#/definitions/list_of_strings"},
+          "driver":{"type": "string"},
+          "options":{"$ref": "#/definitions/list_or_dict"}
+        },
+        "additionalProperties": false,
+        "patternProperties": {"^x-": {}}
+      }
+    },
+
+    "include": {
+      "id": "#/definitions/include",
+      "oneOf": [
+        {"type": "string"},
+        {
+          "type": "object",
+          "properties": {
+            "path": {"$ref": "#/definitions/string_or_list"},
+            "env_file": {"$ref": "#/definitions/string_or_list"},
+            "project_directory": {"type": "string"}
+          },
+          "additionalProperties": false
+        }
+      ]
+    },
+
+    "network": {
+      "id": "#/definitions/network",
+      "type": ["object", "null"],
+      "properties": {
+        "name": {"type": "string"},
+        "driver": {"type": "string"},
+        "driver_opts": {
+          "type": "object",
+          "patternProperties": {
+            "^.+$": {"type": ["string", "number"]}
+          }
+        },
+        "ipam": {
+          "type": "object",
+          "properties": {
+            "driver": {"type": "string"},
+            "config": {
+              "type": "array",
+              "items": {
+                "type": "object",
+                "properties": {
+                  "subnet": {"type": "string"},
+                  "ip_range": {"type": "string"},
+                  "gateway": {"type": "string"},
+                  "aux_addresses": {
+                    "type": "object",
+                    "additionalProperties": false,
+                    "patternProperties": {"^.+$": {"type": "string"}}
+                  }
+                },
+                "additionalProperties": false,
+                "patternProperties": {"^x-": {}}
+              }
+            },
+            "options": {
+              "type": "object",
+              "additionalProperties": false,
+              "patternProperties": {"^.+$": {"type": "string"}}
+            }
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "external": {
+          "type": ["boolean", "string", "object"],
+          "properties": {
+            "name": {
+              "deprecated": true,
+              "type": "string"
+            }
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "internal": {"type": ["boolean", "string"]},
+        "enable_ipv6": {"type": ["boolean", "string"]},
+        "attachable": {"type": ["boolean", "string"]},
+        "labels": {"$ref": "#/definitions/list_or_dict"}
+      },
+      "additionalProperties": false,
+      "patternProperties": {"^x-": {}}
+    },
+
+    "volume": {
+      "id": "#/definitions/volume",
+      "type": ["object", "null"],
+      "properties": {
+        "name": {"type": "string"},
+        "driver": {"type": "string"},
+        "driver_opts": {
+          "type": "object",
+          "patternProperties": {
+            "^.+$": {"type": ["string", "number"]}
+          }
+        },
+        "external": {
+          "type": ["boolean", "string", "object"],
+          "properties": {
+            "name": {
+              "deprecated": true,
+              "type": "string"
+            }
+          },
+          "additionalProperties": false,
+          "patternProperties": {"^x-": {}}
+        },
+        "labels": {"$ref": "#/definitions/list_or_dict"}
+      },
+      "additionalProperties": false,
+      "patternProperties": {"^x-": {}}
+    },
+
+    "secret": {
+      "id": "#/definitions/secret",
+      "type": "object",
+      "properties": {
+        "name": {"type": "string"},
+        "environment": {"type": "string"},
+        "file": {"type": "string"},
+        "external": {
+          "type": ["boolean", "string", "object"],
+          "properties": {
+            "name": {"type": "string"}
+          }
+        },
+        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "driver": {"type": "string"},
+        "driver_opts": {
+          "type": "object",
+          "patternProperties": {
+            "^.+$": {"type": ["string", "number"]}
+          }
+        },
+        "template_driver": {"type": "string"}
+      },
+      "additionalProperties": false,
+      "patternProperties": {"^x-": {}}
+    },
+
+    "config": {
+      "id": "#/definitions/config",
+      "type": "object",
+      "properties": {
+        "name": {"type": "string"},
+        "content": {"type": "string"},
+        "environment": {"type": "string"},
+        "file": {"type": "string"},
+        "external": {
+          "type": ["boolean", "string", "object"],
+          "properties": {
+            "name": {
+              "deprecated": true,
+              "type": "string"
+            }
+          }
+        },
+        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "template_driver": {"type": "string"}
+      },
+      "additionalProperties": false,
+      "patternProperties": {"^x-": {}}
+    },
+
+    "command": {
+      "oneOf": [
+        {"type": "null"},
+        {"type": "string"},
+        {"type": "array","items": {"type": "string"}}
+      ]
+    },
+
+    "service_hook": {
+      "id": "#/definitions/service_hook",
+      "type": "object",
+      "properties": {
+        "command": {"$ref": "#/definitions/command"},
+        "user": {"type": "string"},
+        "privileged": {"type": ["boolean", "string"]},
+        "working_dir": {"type": "string"},
+        "environment": {"$ref": "#/definitions/list_or_dict"}
+      },
+      "additionalProperties": false,
+      "patternProperties": {"^x-": {}}
+    },
+
+    "env_file": {
+      "oneOf": [
+        {"type": "string"},
+        {
+          "type": "array",
+          "items": {
+            "oneOf": [
+              {"type": "string"},
+              {
+                "type": "object",
+                "additionalProperties": false,
+                "properties": {
+                  "path": {
+                    "type": "string"
+                  },
+                  "format": {
+                    "type": "string"
+                  },
+                  "required": {
+                    "type": ["boolean", "string"],
+                    "default": true
+                  }
+                },
+                "required": [
+                  "path"
+                ]
+              }
+            ]
+          }
+        }
+      ]
+    },
+
+    "label_file": {
+      "oneOf": [
+        {"type": "string"},
+        {
+          "type": "array",
+          "items": {"type": "string"}
+        }
+      ]
+    },
+
+    "string_or_list": {
+      "oneOf": [
+        {"type": "string"},
+        {"$ref": "#/definitions/list_of_strings"}
+      ]
+    },
+
+    "list_of_strings": {
+      "type": "array",
+      "items": {"type": "string"},
+      "uniqueItems": true
+    },
+
+    "list_or_dict": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": ["string", "number", "boolean", "null"]
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
+    "extra_hosts": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "oneOf": [
+                {
+                  "type": "string"
+                },
+                {
+                  "type": "array",
+                  "items": {
+                    "type": "string"
+                  },
+                  "uniqueItems": false
+                }
+              ]
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
+    "blkio_limit": {
+      "type": "object",
+      "properties": {
+        "path": {"type": "string"},
+        "rate": {"type": ["integer", "string"]}
+      },
+      "additionalProperties": false
+    },
+    "blkio_weight": {
+      "type": "object",
+      "properties": {
+        "path": {"type": "string"},
+        "weight": {"type": ["integer", "string"]}
+      },
+      "additionalProperties": false
+    },
+    "service_config_or_secret": {
+      "type": "array",
+      "items": {
+        "oneOf": [
+          {"type": "string"},
+          {
+            "type": "object",
+            "properties": {
+              "source": {"type": "string"},
+              "target": {"type": "string"},
+              "uid": {"type": "string"},
+              "gid": {"type": "string"},
+              "mode": {"type": ["number", "string"]}
+            },
+            "additionalProperties": false,
+            "patternProperties": {"^x-": {}}
+          }
+        ]
+      }
+    },
+    "ulimits": {
+      "type": "object",
+      "patternProperties": {
+        "^[a-z]+$": {
+          "oneOf": [
+            {"type": ["integer", "string"]},
+            {
+              "type": "object",
+              "properties": {
+                "hard": {"type": ["integer", "string"]},
+                "soft": {"type": ["integer", "string"]}
+              },
+              "required": ["soft", "hard"],
+              "additionalProperties": false,
+              "patternProperties": {"^x-": {}}
+            }
+          ]
+        }
+      }
+    },
+    "constraints": {
+      "service": {
+        "id": "#/definitions/constraints/service",
+        "anyOf": [
+          {"required": ["build"]},
+          {"required": ["image"]}
+        ],
+        "properties": {
+          "build": {
+            "required": ["context"]
+          }
+        }
+      }
+    }
+  }
+}
\ No newline at end of file
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,164 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package schema
+
+import (
+	// Enable support for embedded static resources
+	_ "embed"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/xeipuuv/gojsonschema"
+)
+
+type portsFormatChecker struct{}
+
+func (checker portsFormatChecker) IsFormat(_ interface{}) bool {
+	// TODO: implement this
+	return true
+}
+
+type durationFormatChecker struct{}
+
+func (checker durationFormatChecker) IsFormat(input interface{}) bool {
+	value, ok := input.(string)
+	if !ok {
+		return false
+	}
+	_, err := time.ParseDuration(value)
+	return err == nil
+}
+
+func init() {
+	gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
+	gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
+	gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
+}
+
+// Schema is the compose-spec JSON schema
+//
+//go:embed compose-spec.json
+var Schema string
+
+// Validate uses the jsonschema to validate the configuration
+func Validate(config map[string]interface{}) error {
+	schemaLoader := gojsonschema.NewStringLoader(Schema)
+	dataLoader := gojsonschema.NewGoLoader(config)
+
+	result, err := gojsonschema.Validate(schemaLoader, dataLoader)
+	if err != nil {
+		return err
+	}
+
+	if !result.Valid() {
+		return toError(result)
+	}
+
+	return nil
+}
+
+func toError(result *gojsonschema.Result) error {
+	err := getMostSpecificError(result.Errors())
+	return err
+}
+
+const (
+	jsonschemaOneOf = "number_one_of"
+	jsonschemaAnyOf = "number_any_of"
+)
+
+func getDescription(err validationError) string {
+	switch err.parent.Type() {
+	case "invalid_type":
+		if expectedType, ok := err.parent.Details()["expected"].(string); ok {
+			return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
+		}
+	case jsonschemaOneOf, jsonschemaAnyOf:
+		if err.child == nil {
+			return err.parent.Description()
+		}
+		return err.child.Description()
+	}
+	return err.parent.Description()
+}
+
+func humanReadableType(definition string) string {
+	if definition[0:1] == "[" {
+		allTypes := strings.Split(definition[1:len(definition)-1], ",")
+		for i, t := range allTypes {
+			allTypes[i] = humanReadableType(t)
+		}
+		return fmt.Sprintf(
+			"%s or %s",
+			strings.Join(allTypes[0:len(allTypes)-1], ", "),
+			allTypes[len(allTypes)-1],
+		)
+	}
+	if definition == "object" {
+		return "mapping"
+	}
+	if definition == "array" {
+		return "list"
+	}
+	return definition
+}
+
+type validationError struct {
+	parent gojsonschema.ResultError
+	child  gojsonschema.ResultError
+}
+
+func (err validationError) Error() string {
+	description := getDescription(err)
+	return fmt.Sprintf("%s %s", err.parent.Field(), description)
+}
+
+func getMostSpecificError(errors []gojsonschema.ResultError) validationError {
+	mostSpecificError := 0
+	for i, err := range errors {
+		if specificity(err) > specificity(errors[mostSpecificError]) {
+			mostSpecificError = i
+			continue
+		}
+
+		if specificity(err) == specificity(errors[mostSpecificError]) {
+			// Invalid type errors win in a tie-breaker for most specific field name
+			if err.Type() == "invalid_type" && errors[mostSpecificError].Type() != "invalid_type" {
+				mostSpecificError = i
+			}
+		}
+	}
+
+	if mostSpecificError+1 == len(errors) {
+		return validationError{parent: errors[mostSpecificError]}
+	}
+
+	switch errors[mostSpecificError].Type() {
+	case "number_one_of", "number_any_of":
+		return validationError{
+			parent: errors[mostSpecificError],
+			child:  errors[mostSpecificError+1],
+		}
+	default:
+		return validationError{parent: errors[mostSpecificError]}
+	}
+}
+
+func specificity(err gojsonschema.ResultError) int {
+	return len(strings.Split(err.Field(), "."))
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,123 @@
+name: ${VARIABLE}
+services:
+  foo:
+    deploy:
+      mode: ${VARIABLE}
+      replicas: ${VARIABLE}
+      rollback_config:
+        parallelism: ${VARIABLE}
+        delay: ${VARIABLE}
+        failure_action: ${VARIABLE}
+        monitor: ${VARIABLE}
+        max_failure_ratio: ${VARIABLE}
+      update_config:
+        parallelism: ${VARIABLE}
+        delay: ${VARIABLE}
+        failure_action: ${VARIABLE}
+        monitor: ${VARIABLE}
+        max_failure_ratio: ${VARIABLE}
+      resources:
+        limits:
+          memory: ${VARIABLE}
+        reservations:
+          memory: ${VARIABLE}
+          generic_resources:
+            - discrete_resource_spec:
+                kind: ${VARIABLE}
+                value: ${VARIABLE}
+            - discrete_resource_spec:
+                kind: ${VARIABLE}
+                value: ${VARIABLE}
+      restart_policy:
+        condition: ${VARIABLE}
+        delay: ${VARIABLE}
+        max_attempts: ${VARIABLE}
+        window: ${VARIABLE}
+      placement:
+        max_replicas_per_node: ${VARIABLE}
+        preferences:
+          - spread: ${VARIABLE}
+      endpoint_mode: ${VARIABLE}
+    expose:
+      - ${VARIABLE}
+    external_links:
+      - ${VARIABLE}
+    extra_hosts:
+      - ${VARIABLE}
+    hostname: ${VARIABLE}
+
+    healthcheck:
+      test: ${VARIABLE}
+      interval: ${VARIABLE}
+      timeout: ${VARIABLE}
+      retries: ${VARIABLE}
+      start_period: ${VARIABLE}
+      start_interval: ${VARIABLE}
+    image: ${VARIABLE}
+    mac_address: ${VARIABLE}
+    networks:
+      some-network:
+        aliases:
+          - ${VARIABLE}
+      other-network:
+        ipv4_address: ${VARIABLE}
+        ipv6_address: ${VARIABLE}
+        mac_address: ${VARIABLE}
+    ports:
+      - ${VARIABLE}
+    privileged: ${VARIABLE}
+    read_only: ${VARIABLE}
+    restart: ${VARIABLE}
+    secrets:
+      - source: ${VARIABLE}
+        target: ${VARIABLE}
+        uid: ${VARIABLE}
+        gid: ${VARIABLE}
+        mode: ${VARIABLE}
+    stdin_open: ${VARIABLE}
+    stop_grace_period: ${VARIABLE}
+    stop_signal: ${VARIABLE}
+    storage_opt:
+      size: ${VARIABLE}
+    sysctls:
+      net.core.somaxconn: ${VARIABLE}
+    tmpfs:
+      - ${VARIABLE}
+    tty: ${VARIABLE}
+    ulimits:
+      nproc: ${VARIABLE}
+      nofile:
+        soft: ${VARIABLE}
+        hard: ${VARIABLE}
+    user: ${VARIABLE}
+    volumes:
+      - ${VARIABLE}:${VARIABLE}
+      - type: tmpfs
+        target: ${VARIABLE}
+        tmpfs:
+          size: ${VARIABLE}
+
+networks:
+  network:
+    ipam:
+      driver: ${VARIABLE}
+      config:
+        - subnet: ${VARIABLE}
+          ip_range: ${VARIABLE}
+          gateway: ${VARIABLE}
+          aux_addresses:
+            host1: ${VARIABLE}
+  external-network:
+    external: ${VARIABLE}
+
+volumes:
+  external-volume:
+    external: ${VARIABLE}
+
+configs:
+  config1:
+    external: ${VARIABLE}
+
+secrets:
+  secret1:
+    external: ${VARIABLE}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/template/template.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/template/template.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/template/template.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/template/template.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,377 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package template
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"sort"
+	"strings"
+
+	"github.com/sirupsen/logrus"
+)
+
+var delimiter = "\\$"
+var substitutionNamed = "[_a-z][_a-z0-9]*"
+var substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-+?](.*))?"
+
+var groupEscaped = "escaped"
+var groupNamed = "named"
+var groupBraced = "braced"
+var groupInvalid = "invalid"
+
+var patternString = fmt.Sprintf(
+	"%s(?i:(?P<%s>%s)|(?P<%s>%s)|{(?:(?P<%s>%s)}|(?P<%s>)))",
+	delimiter,
+	groupEscaped, delimiter,
+	groupNamed, substitutionNamed,
+	groupBraced, substitutionBraced,
+	groupInvalid,
+)
+
+var DefaultPattern = regexp.MustCompile(patternString)
+
+// InvalidTemplateError is returned when a variable template is not in a valid
+// format
+type InvalidTemplateError struct {
+	Template string
+}
+
+func (e InvalidTemplateError) Error() string {
+	return fmt.Sprintf("Invalid template: %#v", e.Template)
+}
+
+// MissingRequiredError is returned when a variable template is missing
+type MissingRequiredError struct {
+	Variable string
+	Reason   string
+}
+
+func (e MissingRequiredError) Error() string {
+	if e.Reason != "" {
+		return fmt.Sprintf("required variable %s is missing a value: %s", e.Variable, e.Reason)
+	}
+	return fmt.Sprintf("required variable %s is missing a value", e.Variable)
+}
+
+// Mapping is a user-supplied function which maps from variable names to values.
+// Returns the value as a string and a bool indicating whether
+// the value is present, to distinguish between an empty string
+// and the absence of a value.
+type Mapping func(string) (string, bool)
+
+// SubstituteFunc is a user-supplied function that apply substitution.
+// Returns the value as a string, a bool indicating if the function could apply
+// the substitution and an error.
+type SubstituteFunc func(string, Mapping) (string, bool, error)
+
+// ReplacementFunc is a user-supplied function that is apply to the matching
+// substring. Returns the value as a string and an error.
+type ReplacementFunc func(string, Mapping, *Config) (string, error)
+
+type Config struct {
+	pattern         *regexp.Regexp
+	substituteFunc  SubstituteFunc
+	replacementFunc ReplacementFunc
+	logging         bool
+}
+
+type Option func(*Config)
+
+func WithPattern(pattern *regexp.Regexp) Option {
+	return func(cfg *Config) {
+		cfg.pattern = pattern
+	}
+}
+
+func WithSubstitutionFunction(subsFunc SubstituteFunc) Option {
+	return func(cfg *Config) {
+		cfg.substituteFunc = subsFunc
+	}
+}
+
+func WithReplacementFunction(replacementFunc ReplacementFunc) Option {
+	return func(cfg *Config) {
+		cfg.replacementFunc = replacementFunc
+	}
+}
+
+func WithoutLogging(cfg *Config) {
+	cfg.logging = false
+}
+
+// SubstituteWithOptions substitute variables in the string with their values.
+// It accepts additional options such as a custom function or pattern.
+func SubstituteWithOptions(template string, mapping Mapping, options ...Option) (string, error) {
+	var returnErr error
+
+	cfg := &Config{
+		pattern:         DefaultPattern,
+		replacementFunc: DefaultReplacementFunc,
+		logging:         true,
+	}
+	for _, o := range options {
+		o(cfg)
+	}
+
+	result := cfg.pattern.ReplaceAllStringFunc(template, func(substring string) string {
+		replacement, err := cfg.replacementFunc(substring, mapping, cfg)
+		if err != nil {
+			// Add the template for template errors
+			var tmplErr *InvalidTemplateError
+			if errors.As(err, &tmplErr) {
+				if tmplErr.Template == "" {
+					tmplErr.Template = template
+				}
+			}
+			// Save the first error to be returned
+			if returnErr == nil {
+				returnErr = err
+			}
+
+		}
+		return replacement
+	})
+
+	return result, returnErr
+}
+
+func DefaultReplacementFunc(substring string, mapping Mapping, cfg *Config) (string, error) {
+	value, _, err := DefaultReplacementAppliedFunc(substring, mapping, cfg)
+	return value, err
+}
+
+func DefaultReplacementAppliedFunc(substring string, mapping Mapping, cfg *Config) (string, bool, error) {
+	pattern := cfg.pattern
+	subsFunc := cfg.substituteFunc
+	if subsFunc == nil {
+		_, subsFunc = getSubstitutionFunctionForTemplate(substring)
+	}
+
+	closingBraceIndex := getFirstBraceClosingIndex(substring)
+	rest := ""
+	if closingBraceIndex > -1 {
+		rest = substring[closingBraceIndex+1:]
+		substring = substring[0 : closingBraceIndex+1]
+	}
+
+	matches := pattern.FindStringSubmatch(substring)
+	groups := matchGroups(matches, pattern)
+	if escaped := groups[groupEscaped]; escaped != "" {
+		return escaped, true, nil
+	}
+
+	braced := false
+	substitution := groups[groupNamed]
+	if substitution == "" {
+		substitution = groups[groupBraced]
+		braced = true
+	}
+
+	if substitution == "" {
+		return "", false, &InvalidTemplateError{}
+	}
+
+	if braced {
+		value, applied, err := subsFunc(substitution, mapping)
+		if err != nil {
+			return "", false, err
+		}
+		if applied {
+			interpolatedNested, err := SubstituteWith(rest, mapping, pattern)
+			if err != nil {
+				return "", false, err
+			}
+			return value + interpolatedNested, true, nil
+		}
+	}
+
+	value, ok := mapping(substitution)
+	if !ok && cfg.logging {
+		logrus.Warnf("The %q variable is not set. Defaulting to a blank string.", substitution)
+	}
+
+	return value, ok, nil
+}
+
+// SubstituteWith substitute variables in the string with their values.
+// It accepts additional substitute function.
+func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
+	options := []Option{
+		WithPattern(pattern),
+	}
+	if len(subsFuncs) > 0 {
+		options = append(options, WithSubstitutionFunction(subsFuncs[0]))
+	}
+
+	return SubstituteWithOptions(template, mapping, options...)
+}
+
+func getSubstitutionFunctionForTemplate(template string) (string, SubstituteFunc) {
+	interpolationMapping := []struct {
+		string
+		SubstituteFunc
+	}{
+		{":?", requiredErrorWhenEmptyOrUnset},
+		{"?", requiredErrorWhenUnset},
+		{":-", defaultWhenEmptyOrUnset},
+		{"-", defaultWhenUnset},
+		{":+", defaultWhenNotEmpty},
+		{"+", defaultWhenSet},
+	}
+	sort.Slice(interpolationMapping, func(i, j int) bool {
+		idxI := strings.Index(template, interpolationMapping[i].string)
+		idxJ := strings.Index(template, interpolationMapping[j].string)
+		if idxI < 0 {
+			return false
+		}
+		if idxJ < 0 {
+			return true
+		}
+		return idxI < idxJ
+	})
+
+	return interpolationMapping[0].string, interpolationMapping[0].SubstituteFunc
+}
+
+func getFirstBraceClosingIndex(s string) int {
+	openVariableBraces := 0
+	for i := 0; i < len(s); i++ {
+		if s[i] == '}' {
+			openVariableBraces--
+			if openVariableBraces == 0 {
+				return i
+			}
+		}
+		if s[i] == '{' {
+			openVariableBraces++
+			i++
+		}
+	}
+	return -1
+}
+
+// Substitute variables in the string with their values
+func Substitute(template string, mapping Mapping) (string, error) {
+	return SubstituteWith(template, mapping, DefaultPattern)
+}
+
+// Soft default (fall back if unset or empty)
+func defaultWhenEmptyOrUnset(substitution string, mapping Mapping) (string, bool, error) {
+	return withDefaultWhenAbsence(substitution, mapping, true)
+}
+
+// Hard default (fall back if-and-only-if empty)
+func defaultWhenUnset(substitution string, mapping Mapping) (string, bool, error) {
+	return withDefaultWhenAbsence(substitution, mapping, false)
+}
+
+func defaultWhenNotEmpty(substitution string, mapping Mapping) (string, bool, error) {
+	return withDefaultWhenPresence(substitution, mapping, true)
+}
+
+func defaultWhenSet(substitution string, mapping Mapping) (string, bool, error) {
+	return withDefaultWhenPresence(substitution, mapping, false)
+}
+
+func requiredErrorWhenEmptyOrUnset(substitution string, mapping Mapping) (string, bool, error) {
+	return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" })
+}
+
+func requiredErrorWhenUnset(substitution string, mapping Mapping) (string, bool, error) {
+	return withRequired(substitution, mapping, "?", func(_ string) bool { return true })
+}
+
+func withDefaultWhenPresence(substitution string, mapping Mapping, notEmpty bool) (string, bool, error) {
+	sep := "+"
+	if notEmpty {
+		sep = ":+"
+	}
+	if !strings.Contains(substitution, sep) {
+		return "", false, nil
+	}
+	name, defaultValue := partition(substitution, sep)
+	defaultValue, err := Substitute(defaultValue, mapping)
+	if err != nil {
+		return "", false, err
+	}
+	value, ok := mapping(name)
+	if ok && (!notEmpty || (notEmpty && value != "")) {
+		return defaultValue, true, nil
+	}
+	return value, true, nil
+}
+
+func withDefaultWhenAbsence(substitution string, mapping Mapping, emptyOrUnset bool) (string, bool, error) {
+	sep := "-"
+	if emptyOrUnset {
+		sep = ":-"
+	}
+	if !strings.Contains(substitution, sep) {
+		return "", false, nil
+	}
+	name, defaultValue := partition(substitution, sep)
+	defaultValue, err := Substitute(defaultValue, mapping)
+	if err != nil {
+		return "", false, err
+	}
+	value, ok := mapping(name)
+	if !ok || (emptyOrUnset && value == "") {
+		return defaultValue, true, nil
+	}
+	return value, true, nil
+}
+
+func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) {
+	if !strings.Contains(substitution, sep) {
+		return "", false, nil
+	}
+	name, errorMessage := partition(substitution, sep)
+	errorMessage, err := Substitute(errorMessage, mapping)
+	if err != nil {
+		return "", false, err
+	}
+	value, ok := mapping(name)
+	if !ok || !valid(value) {
+		return "", true, &MissingRequiredError{
+			Reason:   errorMessage,
+			Variable: name,
+		}
+	}
+	return value, true, nil
+}
+
+func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string {
+	groups := make(map[string]string)
+	for i, name := range pattern.SubexpNames()[1:] {
+		groups[name] = matches[i+1]
+	}
+	return groups
+}
+
+// Split the string at the first occurrence of sep, and return the part before the separator,
+// and the part after the separator.
+//
+// If the separator is not found, return the string itself, followed by an empty string.
+func partition(s, sep string) (string, string) {
+	if strings.Contains(s, sep) {
+		parts := strings.SplitN(s, sep, 2)
+		return parts[0], parts[1]
+	}
+	return s, ""
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/template/variables.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/template/variables.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/template/variables.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/template/variables.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,158 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package template
+
+import (
+	"regexp"
+	"strings"
+)
+
+type Variable struct {
+	Name          string
+	DefaultValue  string
+	PresenceValue string
+	Required      bool
+}
+
+// ExtractVariables returns a map of all the variables defined in the specified
+// compose file (dict representation) and their default value if any.
+func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]Variable {
+	if pattern == nil {
+		pattern = DefaultPattern
+	}
+	return recurseExtract(configDict, pattern)
+}
+
+func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variable {
+	m := map[string]Variable{}
+
+	switch value := value.(type) {
+	case string:
+		if values, is := extractVariable(value, pattern); is {
+			for _, v := range values {
+				m[v.Name] = v
+			}
+		}
+	case map[string]interface{}:
+		for _, elem := range value {
+			submap := recurseExtract(elem, pattern)
+			for key, value := range submap {
+				m[key] = value
+			}
+		}
+
+	case []interface{}:
+		for _, elem := range value {
+			if values, is := extractVariable(elem, pattern); is {
+				for _, v := range values {
+					m[v.Name] = v
+				}
+			}
+		}
+	}
+
+	return m
+}
+
+func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) {
+	sValue, ok := value.(string)
+	if !ok {
+		return []Variable{}, false
+	}
+	matches := pattern.FindAllStringSubmatch(sValue, -1)
+	if len(matches) == 0 {
+		return []Variable{}, false
+	}
+	values := []Variable{}
+	for _, match := range matches {
+		groups := matchGroups(match, pattern)
+		if escaped := groups[groupEscaped]; escaped != "" {
+			continue
+		}
+		val := groups[groupNamed]
+		if val == "" {
+			val = groups[groupBraced]
+			s := match[0]
+			i := getFirstBraceClosingIndex(s)
+			if i > 0 {
+				val = s[2:i]
+				if len(s) > i {
+					if v, b := extractVariable(s[i+1:], pattern); b {
+						values = append(values, v...)
+					}
+				}
+			}
+		}
+		name := val
+		var defaultValue string
+		var presenceValue string
+		var required bool
+		i := strings.IndexFunc(val, func(r rune) bool {
+			if r >= 'a' && r <= 'z' {
+				return false
+			}
+			if r >= 'A' && r <= 'Z' {
+				return false
+			}
+			if r >= '0' && r <= '9' {
+				return false
+			}
+			if r == '_' {
+				return false
+			}
+			return true
+		})
+
+		if i > 0 {
+			name = val[:i]
+			rest := val[i:]
+			switch {
+			case strings.HasPrefix(rest, ":?"):
+				required = true
+			case strings.HasPrefix(rest, "?"):
+				required = true
+			case strings.HasPrefix(rest, ":-"):
+				defaultValue = rest[2:]
+			case strings.HasPrefix(rest, "-"):
+				defaultValue = rest[1:]
+			case strings.HasPrefix(rest, ":+"):
+				presenceValue = rest[2:]
+			case strings.HasPrefix(rest, "+"):
+				presenceValue = rest[1:]
+			}
+		}
+
+		values = append(values, Variable{
+			Name:          name,
+			DefaultValue:  defaultValue,
+			PresenceValue: presenceValue,
+			Required:      required,
+		})
+
+		if defaultValue != "" {
+			if v, b := extractVariable(defaultValue, pattern); b {
+				values = append(values, v...)
+			}
+		}
+		if presenceValue != "" {
+			if v, b := extractVariable(presenceValue, pattern); b {
+				values = append(values, v...)
+			}
+		}
+	}
+	return values, len(values) > 0
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/build.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/build.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/build.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/build.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,48 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformBuild(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return transformMapping(v, p, ignoreParseError)
+	case string:
+		return map[string]any{
+			"context": v,
+		}, nil
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for build", p, v)
+	}
+}
+
+func defaultBuildContext(data any, _ tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		if _, ok := v["context"]; !ok {
+			v["context"] = "."
+		}
+		return v, nil
+	default:
+		return data, nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,119 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+type transformFunc func(data any, p tree.Path, ignoreParseError bool) (any, error)
+
+var transformers = map[tree.Path]transformFunc{}
+
+func init() {
+	transformers["services.*"] = transformService
+	transformers["services.*.build.secrets.*"] = transformFileMount
+	transformers["services.*.build.additional_contexts"] = transformKeyValue
+	transformers["services.*.depends_on"] = transformDependsOn
+	transformers["services.*.env_file"] = transformEnvFile
+	transformers["services.*.label_file"] = transformStringOrList
+	transformers["services.*.extends"] = transformExtends
+	transformers["services.*.networks"] = transformServiceNetworks
+	transformers["services.*.volumes.*"] = transformVolumeMount
+	transformers["services.*.dns"] = transformStringOrList
+	transformers["services.*.devices.*"] = transformDeviceMapping
+	transformers["services.*.secrets.*"] = transformFileMount
+	transformers["services.*.configs.*"] = transformFileMount
+	transformers["services.*.ports"] = transformPorts
+	transformers["services.*.build"] = transformBuild
+	transformers["services.*.build.ssh"] = transformSSH
+	transformers["services.*.ulimits.*"] = transformUlimits
+	transformers["services.*.build.ulimits.*"] = transformUlimits
+	transformers["volumes.*"] = transformMaybeExternal
+	transformers["networks.*"] = transformMaybeExternal
+	transformers["secrets.*"] = transformMaybeExternal
+	transformers["configs.*"] = transformMaybeExternal
+	transformers["include.*"] = transformInclude
+}
+
+func transformStringOrList(data any, _ tree.Path, _ bool) (any, error) {
+	switch t := data.(type) {
+	case string:
+		return []any{t}, nil
+	default:
+		return data, nil
+	}
+}
+
+// Canonical transforms a compose model into canonical syntax
+func Canonical(yaml map[string]any, ignoreParseError bool) (map[string]any, error) {
+	canonical, err := transform(yaml, tree.NewPath(), ignoreParseError)
+	if err != nil {
+		return nil, err
+	}
+	return canonical.(map[string]any), nil
+}
+
+func transform(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	for pattern, transformer := range transformers {
+		if p.Matches(pattern) {
+			t, err := transformer(data, p, ignoreParseError)
+			if err != nil {
+				return nil, err
+			}
+			return t, nil
+		}
+	}
+	switch v := data.(type) {
+	case map[string]any:
+		a, err := transformMapping(v, p, ignoreParseError)
+		if err != nil {
+			return a, err
+		}
+		return v, nil
+	case []any:
+		a, err := transformSequence(v, p, ignoreParseError)
+		if err != nil {
+			return a, err
+		}
+		return v, nil
+	default:
+		return data, nil
+	}
+}
+
+func transformSequence(v []any, p tree.Path, ignoreParseError bool) ([]any, error) {
+	for i, e := range v {
+		t, err := transform(e, p.Next("[]"), ignoreParseError)
+		if err != nil {
+			return nil, err
+		}
+		v[i] = t
+	}
+	return v, nil
+}
+
+func transformMapping(v map[string]any, p tree.Path, ignoreParseError bool) (map[string]any, error) {
+	for k, e := range v {
+		t, err := transform(e, p.Next(k), ignoreParseError)
+		if err != nil {
+			return nil, err
+		}
+		v[k] = t
+	}
+	return v, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,90 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+var defaultValues = map[tree.Path]transformFunc{}
+
+func init() {
+	defaultValues["services.*.build"] = defaultBuildContext
+	defaultValues["services.*.secrets.*"] = defaultSecretMount
+	defaultValues["services.*.ports.*"] = portDefaults
+	defaultValues["services.*.deploy.resources.reservations.devices.*"] = deviceRequestDefaults
+	defaultValues["services.*.gpus.*"] = deviceRequestDefaults
+}
+
+// SetDefaultValues transforms a compose model to set default values to missing attributes
+func SetDefaultValues(yaml map[string]any) (map[string]any, error) {
+	result, err := setDefaults(yaml, tree.NewPath())
+	if err != nil {
+		return nil, err
+	}
+	return result.(map[string]any), nil
+}
+
+func setDefaults(data any, p tree.Path) (any, error) {
+	for pattern, transformer := range defaultValues {
+		if p.Matches(pattern) {
+			t, err := transformer(data, p, false)
+			if err != nil {
+				return nil, err
+			}
+			return t, nil
+		}
+	}
+	switch v := data.(type) {
+	case map[string]any:
+		a, err := setDefaultsMapping(v, p)
+		if err != nil {
+			return a, err
+		}
+		return v, nil
+	case []any:
+		a, err := setDefaultsSequence(v, p)
+		if err != nil {
+			return a, err
+		}
+		return v, nil
+	default:
+		return data, nil
+	}
+}
+
+func setDefaultsSequence(v []any, p tree.Path) ([]any, error) {
+	for i, e := range v {
+		t, err := setDefaults(e, p.Next("[]"))
+		if err != nil {
+			return nil, err
+		}
+		v[i] = t
+	}
+	return v, nil
+}
+
+func setDefaultsMapping(v map[string]any, p tree.Path) (map[string]any, error) {
+	for k, e := range v {
+		t, err := setDefaults(e, p.Next(k))
+		if err != nil {
+			return nil, err
+		}
+		v[k] = t
+	}
+	return v, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,53 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformDependsOn(data any, p tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		for i, e := range v {
+			d, ok := e.(map[string]any)
+			if !ok {
+				return nil, fmt.Errorf("%s.%s: unsupported value %s", p, i, v)
+			}
+			if _, ok := d["condition"]; !ok {
+				d["condition"] = "service_started"
+			}
+			if _, ok := d["required"]; !ok {
+				d["required"] = true
+			}
+		}
+		return v, nil
+	case []any:
+		d := map[string]any{}
+		for _, k := range v {
+			d[k.(string)] = map[string]any{
+				"condition": "service_started",
+				"required":  true,
+			}
+		}
+		return d, nil
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for depend_on", p, v)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/device.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/device.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/device.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/device.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,60 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformDeviceMapping(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return v, nil
+	case string:
+		src := ""
+		dst := ""
+		permissions := "rwm"
+		arr := strings.Split(v, ":")
+		switch len(arr) {
+		case 3:
+			permissions = arr[2]
+			fallthrough
+		case 2:
+			dst = arr[1]
+			fallthrough
+		case 1:
+			src = arr[0]
+		default:
+			if !ignoreParseError {
+				return nil, fmt.Errorf("confusing device mapping, please use long syntax: %s", v)
+			}
+		}
+		if dst == "" {
+			dst = src
+		}
+		return map[string]any{
+			"source":      src,
+			"target":      dst,
+			"permissions": permissions,
+		}, nil
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for service volume mount", p, v)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/devices.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/devices.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/devices.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/devices.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,36 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func deviceRequestDefaults(data any, p tree.Path, _ bool) (any, error) {
+	v, ok := data.(map[string]any)
+	if !ok {
+		return data, fmt.Errorf("%s: invalid type %T for device request", p, v)
+	}
+	_, hasCount := v["count"]
+	_, hasIds := v["device_ids"]
+	if !hasCount && !hasIds {
+		v["count"] = "all"
+	}
+	return v, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,55 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformEnvFile(data any, p tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case string:
+		return []any{
+			transformEnvFileValue(v),
+		}, nil
+	case []any:
+		for i, e := range v {
+			v[i] = transformEnvFileValue(e)
+		}
+		return v, nil
+	default:
+		return nil, fmt.Errorf("%s: invalid type %T for env_file", p, v)
+	}
+}
+
+func transformEnvFileValue(data any) any {
+	switch v := data.(type) {
+	case string:
+		return map[string]any{
+			"path":     v,
+			"required": true,
+		}
+	case map[string]any:
+		if _, ok := v["required"]; !ok {
+			v["required"] = true
+		}
+		return v
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,36 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformExtends(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return transformMapping(v, p, ignoreParseError)
+	case string:
+		return map[string]any{
+			"service": v,
+		}, nil
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for extends", p, v)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/external.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/external.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/external.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/external.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,54 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+	"github.com/sirupsen/logrus"
+)
+
+func transformMaybeExternal(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	if data == nil {
+		return nil, nil
+	}
+	resource, err := transformMapping(data.(map[string]any), p, ignoreParseError)
+	if err != nil {
+		return nil, err
+	}
+
+	if ext, ok := resource["external"]; ok {
+		name, named := resource["name"]
+		if external, ok := ext.(map[string]any); ok {
+			resource["external"] = true
+			if extname, extNamed := external["name"]; extNamed {
+				logrus.Warnf("%s: external.name is deprecated. Please set name and external: true", p)
+				if named && extname != name {
+					return nil, fmt.Errorf("%s: name and external.name conflict; only use name", p)
+				}
+				if !named {
+					// adopt (deprecated) external.name if set
+					resource["name"] = extname
+					return resource, nil
+				}
+			}
+		}
+	}
+
+	return resource, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/include.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/include.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/include.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/include.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,36 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformInclude(data any, p tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return v, nil
+	case string:
+		return map[string]any{
+			"path": v,
+		}, nil
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for external", p, v)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,46 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformKeyValue(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return v, nil
+	case []any:
+		mapping := map[string]any{}
+		for _, e := range v {
+			before, after, found := strings.Cut(e.(string), "=")
+			if !found {
+				if ignoreParseError {
+					return data, nil
+				}
+				return nil, fmt.Errorf("%s: invalid value %s, expected key=value", p, e)
+			}
+			mapping[before] = after
+		}
+		return mapping, nil
+	default:
+		return nil, fmt.Errorf("%s: invalid type %T", p, v)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,104 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+	"github.com/compose-spec/compose-go/v2/types"
+	"github.com/go-viper/mapstructure/v2"
+)
+
+func transformPorts(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	switch entries := data.(type) {
+	case []any:
+		// We process the list instead of individual items here.
+		// The reason is that one entry might be mapped to multiple ServicePortConfig.
+		// Therefore we take an input of a list and return an output of a list.
+		var ports []any
+		for _, entry := range entries {
+			switch value := entry.(type) {
+			case int:
+				parsed, err := types.ParsePortConfig(fmt.Sprint(value))
+				if err != nil {
+					return data, err
+				}
+				for _, v := range parsed {
+					m, err := encode(v)
+					if err != nil {
+						return nil, err
+					}
+					ports = append(ports, m)
+				}
+			case string:
+				parsed, err := types.ParsePortConfig(value)
+				if err != nil {
+					if ignoreParseError {
+						return data, nil
+					}
+					return nil, err
+				}
+				if err != nil {
+					return nil, err
+				}
+				for _, v := range parsed {
+					m, err := encode(v)
+					if err != nil {
+						return nil, err
+					}
+					ports = append(ports, m)
+				}
+			case map[string]any:
+				ports = append(ports, value)
+			default:
+				return data, fmt.Errorf("%s: invalid type %T for port", p, value)
+			}
+		}
+		return ports, nil
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for port", p, entries)
+	}
+}
+
+func encode(v any) (map[string]any, error) {
+	m := map[string]any{}
+	decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
+		Result:  &m,
+		TagName: "yaml",
+	})
+	if err != nil {
+		return nil, err
+	}
+	err = decoder.Decode(v)
+	return m, err
+}
+
+func portDefaults(data any, _ tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		if _, ok := v["protocol"]; !ok {
+			v["protocol"] = "tcp"
+		}
+		if _, ok := v["mode"]; !ok {
+			v["mode"] = "ingress"
+		}
+		return v, nil
+	default:
+		return data, nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,49 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformFileMount(data any, p tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return data, nil
+	case string:
+		return map[string]any{
+			"source": v,
+		}, nil
+	default:
+		return nil, fmt.Errorf("%s: unsupported type %T", p, data)
+	}
+}
+
+func defaultSecretMount(data any, p tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		source := v["source"]
+		if _, ok := v["target"]; !ok {
+			v["target"] = fmt.Sprintf("/run/secrets/%s", source)
+		}
+		return v, nil
+	default:
+		return nil, fmt.Errorf("%s: unsupported type %T", p, data)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/services.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/services.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/services.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/services.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,41 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformService(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	switch value := data.(type) {
+	case map[string]any:
+		return transformMapping(value, p, ignoreParseError)
+	default:
+		return value, nil
+	}
+}
+
+func transformServiceNetworks(data any, _ tree.Path, _ bool) (any, error) {
+	if slice, ok := data.([]any); ok {
+		networks := make(map[string]any, len(slice))
+		for _, net := range slice {
+			networks[net.(string)] = nil
+		}
+		return networks, nil
+	}
+	return data, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,51 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformSSH(data any, p tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return v, nil
+	case []any:
+		result := make(map[string]any, len(v))
+		for _, e := range v {
+			s, ok := e.(string)
+			if !ok {
+				return nil, fmt.Errorf("invalid ssh key type %T", e)
+			}
+			id, path, ok := strings.Cut(s, "=")
+			if !ok {
+				if id != "default" {
+					return nil, fmt.Errorf("invalid ssh key %q", s)
+				}
+				result[id] = nil
+				continue
+			}
+			result[id] = path
+		}
+		return result, nil
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for ssh", p, v)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,34 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformUlimits(data any, p tree.Path, _ bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return v, nil
+	case int:
+		return v, nil
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for external", p, v)
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,52 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package transform
+
+import (
+	"fmt"
+	"path"
+
+	"github.com/compose-spec/compose-go/v2/format"
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func transformVolumeMount(data any, p tree.Path, ignoreParseError bool) (any, error) {
+	switch v := data.(type) {
+	case map[string]any:
+		return v, nil
+	case string:
+		volume, err := format.ParseVolume(v) // TODO(ndeloof) ParseVolume should not rely on types and return map[string]
+		if err != nil {
+			if ignoreParseError {
+				return v, nil
+			}
+			return nil, err
+		}
+		volume.Target = cleanTarget(volume.Target)
+
+		return encode(volume)
+	default:
+		return data, fmt.Errorf("%s: invalid type %T for service volume mount", p, v)
+	}
+}
+
+func cleanTarget(target string) string {
+	if target == "" {
+		return ""
+	}
+	return path.Clean(target)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/tree/path.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/tree/path.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/tree/path.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/tree/path.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,87 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package tree
+
+import (
+	"strings"
+)
+
+const pathSeparator = "."
+
+// PathMatchAll is a token used as part of a Path to match any key at that level
+// in the nested structure
+const PathMatchAll = "*"
+
+// PathMatchList is a token used as part of a Path to match items in a list
+const PathMatchList = "[]"
+
+// Path is a dotted path of keys to a value in a nested mapping structure. A *
+// section in a path will match any key in the mapping structure.
+type Path string
+
+// NewPath returns a new Path
+func NewPath(items ...string) Path {
+	return Path(strings.Join(items, pathSeparator))
+}
+
+// Next returns a new path by append part to the current path
+func (p Path) Next(part string) Path {
+	if p == "" {
+		return Path(part)
+	}
+	part = strings.ReplaceAll(part, pathSeparator, "👻")
+	return Path(string(p) + pathSeparator + part)
+}
+
+func (p Path) Parts() []string {
+	return strings.Split(string(p), pathSeparator)
+}
+
+func (p Path) Matches(pattern Path) bool {
+	patternParts := pattern.Parts()
+	parts := p.Parts()
+
+	if len(patternParts) != len(parts) {
+		return false
+	}
+	for index, part := range parts {
+		switch patternParts[index] {
+		case PathMatchAll, part:
+			continue
+		default:
+			return false
+		}
+	}
+	return true
+}
+
+func (p Path) Last() string {
+	parts := p.Parts()
+	return parts[len(parts)-1]
+}
+
+func (p Path) Parent() Path {
+	index := strings.LastIndex(string(p), pathSeparator)
+	if index > 0 {
+		return p[0:index]
+	}
+	return ""
+}
+
+func (p Path) String() string {
+	return strings.ReplaceAll(string(p), "👻", pathSeparator)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,48 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+
+	"github.com/docker/go-units"
+)
+
+// UnitBytes is the bytes type
+type UnitBytes int64
+
+// MarshalYAML makes UnitBytes implement yaml.Marshaller
+func (u UnitBytes) MarshalYAML() (interface{}, error) {
+	return fmt.Sprintf("%d", u), nil
+}
+
+// MarshalJSON makes UnitBytes implement json.Marshaler
+func (u UnitBytes) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`"%d"`, u)), nil
+}
+
+func (u *UnitBytes) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case int:
+		*u = UnitBytes(v)
+	case string:
+		b, err := units.RAMInBytes(fmt.Sprint(value))
+		*u = UnitBytes(b)
+		return err
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/command.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/command.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/command.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/command.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,86 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import "github.com/mattn/go-shellwords"
+
+// ShellCommand is a string or list of string args.
+//
+// When marshaled to YAML, nil command fields will be omitted if `omitempty`
+// is specified as a struct tag. Explicitly empty commands (i.e. `[]` or
+// empty string will serialize to an empty array (`[]`).
+//
+// When marshaled to JSON, the `omitempty` struct must NOT be specified.
+// If the command field is nil, it will be serialized as `null`.
+// Explicitly empty commands (i.e. `[]` or empty string) will serialize to
+// an empty array (`[]`).
+//
+// The distinction between nil and explicitly empty is important to distinguish
+// between an unset value and a provided, but empty, value, which should be
+// preserved so that it can override any base value (e.g. container entrypoint).
+//
+// The different semantics between YAML and JSON are due to limitations with
+// JSON marshaling + `omitempty` in the Go stdlib, while gopkg.in/yaml.v3 gives
+// us more flexibility via the yaml.IsZeroer interface.
+//
+// In the future, it might make sense to make fields of this type be
+// `*ShellCommand` to avoid this situation, but that would constitute a
+// breaking change.
+type ShellCommand []string
+
+// IsZero returns true if the slice is nil.
+//
+// Empty (but non-nil) slices are NOT considered zero values.
+func (s ShellCommand) IsZero() bool {
+	// we do NOT want len(s) == 0, ONLY explicitly nil
+	return s == nil
+}
+
+// MarshalYAML returns nil (which will be serialized as `null`) for nil slices
+// and delegates to the standard marshaller behavior otherwise.
+//
+// NOTE: Typically the nil case here is not hit because IsZero has already
+// short-circuited marshalling, but this ensures that the type serializes
+// accurately if the `omitempty` struct tag is omitted/forgotten.
+//
+// A similar MarshalJSON() implementation is not needed because the Go stdlib
+// already serializes nil slices to `null`, whereas gopkg.in/yaml.v3 by default
+// serializes nil slices to `[]`.
+func (s ShellCommand) MarshalYAML() (interface{}, error) {
+	if s == nil {
+		return nil, nil
+	}
+	return []string(s), nil
+}
+
+func (s *ShellCommand) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case string:
+		cmd, err := shellwords.Parse(v)
+		if err != nil {
+			return err
+		}
+		*s = cmd
+	case []interface{}:
+		cmd := make([]string, len(v))
+		for i, s := range v {
+			cmd[i] = s.(string)
+		}
+		*s = cmd
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/config.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/config.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/config.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/config.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,145 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"encoding/json"
+	"runtime"
+	"strings"
+
+	"github.com/go-viper/mapstructure/v2"
+)
+
+var (
+	// isCaseInsensitiveEnvVars is true on platforms where environment variable names are treated case-insensitively.
+	isCaseInsensitiveEnvVars = (runtime.GOOS == "windows")
+)
+
+// ConfigDetails are the details about a group of ConfigFiles
+type ConfigDetails struct {
+	Version     string
+	WorkingDir  string
+	ConfigFiles []ConfigFile
+	Environment Mapping
+}
+
+// LookupEnv provides a lookup function for environment variables
+func (cd *ConfigDetails) LookupEnv(key string) (string, bool) {
+	v, ok := cd.Environment[key]
+	if !isCaseInsensitiveEnvVars || ok {
+		return v, ok
+	}
+	// variable names must be treated case-insensitively on some platforms (that is, Windows).
+	// Resolves in this way:
+	// * Return the value if its name matches with the passed name case-sensitively.
+	// * Otherwise, return the value if its lower-cased name matches lower-cased passed name.
+	//     * The value is indefinite if multiple variables match.
+	lowerKey := strings.ToLower(key)
+	for k, v := range cd.Environment {
+		if strings.ToLower(k) == lowerKey {
+			return v, true
+		}
+	}
+	return "", false
+}
+
+// ConfigFile is a filename and the contents of the file as a Dict
+type ConfigFile struct {
+	// Filename is the name of the yaml configuration file
+	Filename string
+	// Content is the raw yaml content. Will be loaded from Filename if not set
+	Content []byte
+	// Config if the yaml tree for this config file. Will be parsed from Content if not set
+	Config map[string]interface{}
+}
+
+func (cf ConfigFile) IsStdin() bool {
+	return cf.Filename == "-"
+}
+
+func ToConfigFiles(path []string) (f []ConfigFile) {
+	for _, p := range path {
+		f = append(f, ConfigFile{Filename: p})
+	}
+	return
+}
+
+// Config is a full compose file configuration and model
+type Config struct {
+	Filename   string          `yaml:"-" json:"-"`
+	Name       string          `yaml:"name,omitempty" json:"name,omitempty"`
+	Services   Services        `yaml:"services" json:"services"`
+	Networks   Networks        `yaml:"networks,omitempty" json:"networks,omitempty"`
+	Volumes    Volumes         `yaml:"volumes,omitempty" json:"volumes,omitempty"`
+	Secrets    Secrets         `yaml:"secrets,omitempty" json:"secrets,omitempty"`
+	Configs    Configs         `yaml:"configs,omitempty" json:"configs,omitempty"`
+	Extensions Extensions      `yaml:",inline" json:"-"`
+	Include    []IncludeConfig `yaml:"include,omitempty" json:"include,omitempty"`
+}
+
+// Volumes is a map of VolumeConfig
+type Volumes map[string]VolumeConfig
+
+// Networks is a map of NetworkConfig
+type Networks map[string]NetworkConfig
+
+// Secrets is a map of SecretConfig
+type Secrets map[string]SecretConfig
+
+// Configs is a map of ConfigObjConfig
+type Configs map[string]ConfigObjConfig
+
+// Extensions is a map of custom extension
+type Extensions map[string]any
+
+func (e Extensions) DeepCopy(t Extensions) {
+	for k, v := range e {
+		t[k] = v
+	}
+}
+
+// MarshalJSON makes Config implement json.Marshaler
+func (c Config) MarshalJSON() ([]byte, error) {
+	m := map[string]interface{}{
+		"services": c.Services,
+	}
+
+	if len(c.Networks) > 0 {
+		m["networks"] = c.Networks
+	}
+	if len(c.Volumes) > 0 {
+		m["volumes"] = c.Volumes
+	}
+	if len(c.Secrets) > 0 {
+		m["secrets"] = c.Secrets
+	}
+	if len(c.Configs) > 0 {
+		m["configs"] = c.Configs
+	}
+	for k, v := range c.Extensions {
+		m[k] = v
+	}
+	return json.Marshal(m)
+}
+
+func (e Extensions) Get(name string, target interface{}) (bool, error) {
+	if v, ok := e[name]; ok {
+		err := mapstructure.Decode(v, target)
+		return true, err
+	}
+	return false, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/cpus.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/cpus.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/cpus.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/cpus.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,48 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+	"strconv"
+)
+
+type NanoCPUs float32
+
+func (n *NanoCPUs) DecodeMapstructure(a any) error {
+	switch v := a.(type) {
+	case string:
+		f, err := strconv.ParseFloat(v, 64)
+		if err != nil {
+			return err
+		}
+		*n = NanoCPUs(f)
+	case int:
+		*n = NanoCPUs(v)
+	case float32:
+		*n = NanoCPUs(v)
+	case float64:
+		*n = NanoCPUs(v)
+	default:
+		return fmt.Errorf("unexpected value type %T for cpus", v)
+	}
+	return nil
+}
+
+func (n *NanoCPUs) Value() float32 {
+	return float32(*n)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,2248 @@
+// Code generated by goderive DO NOT EDIT.
+
+package types
+
+// deriveDeepCopyProject recursively copies the contents of src into dst.
+func deriveDeepCopyProject(dst, src *Project) {
+	dst.Name = src.Name
+	dst.WorkingDir = src.WorkingDir
+	if src.Services != nil {
+		dst.Services = make(map[string]ServiceConfig, len(src.Services))
+		deriveDeepCopy(dst.Services, src.Services)
+	} else {
+		dst.Services = nil
+	}
+	if src.Networks != nil {
+		dst.Networks = make(map[string]NetworkConfig, len(src.Networks))
+		deriveDeepCopy_(dst.Networks, src.Networks)
+	} else {
+		dst.Networks = nil
+	}
+	if src.Volumes != nil {
+		dst.Volumes = make(map[string]VolumeConfig, len(src.Volumes))
+		deriveDeepCopy_1(dst.Volumes, src.Volumes)
+	} else {
+		dst.Volumes = nil
+	}
+	if src.Secrets != nil {
+		dst.Secrets = make(map[string]SecretConfig, len(src.Secrets))
+		deriveDeepCopy_2(dst.Secrets, src.Secrets)
+	} else {
+		dst.Secrets = nil
+	}
+	if src.Configs != nil {
+		dst.Configs = make(map[string]ConfigObjConfig, len(src.Configs))
+		deriveDeepCopy_3(dst.Configs, src.Configs)
+	} else {
+		dst.Configs = nil
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+	if src.ComposeFiles == nil {
+		dst.ComposeFiles = nil
+	} else {
+		if dst.ComposeFiles != nil {
+			if len(src.ComposeFiles) > len(dst.ComposeFiles) {
+				if cap(dst.ComposeFiles) >= len(src.ComposeFiles) {
+					dst.ComposeFiles = (dst.ComposeFiles)[:len(src.ComposeFiles)]
+				} else {
+					dst.ComposeFiles = make([]string, len(src.ComposeFiles))
+				}
+			} else if len(src.ComposeFiles) < len(dst.ComposeFiles) {
+				dst.ComposeFiles = (dst.ComposeFiles)[:len(src.ComposeFiles)]
+			}
+		} else {
+			dst.ComposeFiles = make([]string, len(src.ComposeFiles))
+		}
+		copy(dst.ComposeFiles, src.ComposeFiles)
+	}
+	if src.Environment != nil {
+		dst.Environment = make(map[string]string, len(src.Environment))
+		deriveDeepCopy_4(dst.Environment, src.Environment)
+	} else {
+		dst.Environment = nil
+	}
+	if src.DisabledServices != nil {
+		dst.DisabledServices = make(map[string]ServiceConfig, len(src.DisabledServices))
+		deriveDeepCopy(dst.DisabledServices, src.DisabledServices)
+	} else {
+		dst.DisabledServices = nil
+	}
+	if src.Profiles == nil {
+		dst.Profiles = nil
+	} else {
+		if dst.Profiles != nil {
+			if len(src.Profiles) > len(dst.Profiles) {
+				if cap(dst.Profiles) >= len(src.Profiles) {
+					dst.Profiles = (dst.Profiles)[:len(src.Profiles)]
+				} else {
+					dst.Profiles = make([]string, len(src.Profiles))
+				}
+			} else if len(src.Profiles) < len(dst.Profiles) {
+				dst.Profiles = (dst.Profiles)[:len(src.Profiles)]
+			}
+		} else {
+			dst.Profiles = make([]string, len(src.Profiles))
+		}
+		copy(dst.Profiles, src.Profiles)
+	}
+}
+
+// deriveDeepCopyService recursively copies the contents of src into dst.
+func deriveDeepCopyService(dst, src *ServiceConfig) {
+	dst.Name = src.Name
+	if src.Profiles == nil {
+		dst.Profiles = nil
+	} else {
+		if dst.Profiles != nil {
+			if len(src.Profiles) > len(dst.Profiles) {
+				if cap(dst.Profiles) >= len(src.Profiles) {
+					dst.Profiles = (dst.Profiles)[:len(src.Profiles)]
+				} else {
+					dst.Profiles = make([]string, len(src.Profiles))
+				}
+			} else if len(src.Profiles) < len(dst.Profiles) {
+				dst.Profiles = (dst.Profiles)[:len(src.Profiles)]
+			}
+		} else {
+			dst.Profiles = make([]string, len(src.Profiles))
+		}
+		copy(dst.Profiles, src.Profiles)
+	}
+	if src.Annotations != nil {
+		dst.Annotations = make(map[string]string, len(src.Annotations))
+		deriveDeepCopy_4(dst.Annotations, src.Annotations)
+	} else {
+		dst.Annotations = nil
+	}
+	if src.Attach == nil {
+		dst.Attach = nil
+	} else {
+		dst.Attach = new(bool)
+		*dst.Attach = *src.Attach
+	}
+	if src.Build == nil {
+		dst.Build = nil
+	} else {
+		dst.Build = new(BuildConfig)
+		deriveDeepCopy_5(dst.Build, src.Build)
+	}
+	if src.Develop == nil {
+		dst.Develop = nil
+	} else {
+		dst.Develop = new(DevelopConfig)
+		deriveDeepCopy_6(dst.Develop, src.Develop)
+	}
+	if src.BlkioConfig == nil {
+		dst.BlkioConfig = nil
+	} else {
+		dst.BlkioConfig = new(BlkioConfig)
+		deriveDeepCopy_7(dst.BlkioConfig, src.BlkioConfig)
+	}
+	if src.CapAdd == nil {
+		dst.CapAdd = nil
+	} else {
+		if dst.CapAdd != nil {
+			if len(src.CapAdd) > len(dst.CapAdd) {
+				if cap(dst.CapAdd) >= len(src.CapAdd) {
+					dst.CapAdd = (dst.CapAdd)[:len(src.CapAdd)]
+				} else {
+					dst.CapAdd = make([]string, len(src.CapAdd))
+				}
+			} else if len(src.CapAdd) < len(dst.CapAdd) {
+				dst.CapAdd = (dst.CapAdd)[:len(src.CapAdd)]
+			}
+		} else {
+			dst.CapAdd = make([]string, len(src.CapAdd))
+		}
+		copy(dst.CapAdd, src.CapAdd)
+	}
+	if src.CapDrop == nil {
+		dst.CapDrop = nil
+	} else {
+		if dst.CapDrop != nil {
+			if len(src.CapDrop) > len(dst.CapDrop) {
+				if cap(dst.CapDrop) >= len(src.CapDrop) {
+					dst.CapDrop = (dst.CapDrop)[:len(src.CapDrop)]
+				} else {
+					dst.CapDrop = make([]string, len(src.CapDrop))
+				}
+			} else if len(src.CapDrop) < len(dst.CapDrop) {
+				dst.CapDrop = (dst.CapDrop)[:len(src.CapDrop)]
+			}
+		} else {
+			dst.CapDrop = make([]string, len(src.CapDrop))
+		}
+		copy(dst.CapDrop, src.CapDrop)
+	}
+	dst.CgroupParent = src.CgroupParent
+	dst.Cgroup = src.Cgroup
+	dst.CPUCount = src.CPUCount
+	dst.CPUPercent = src.CPUPercent
+	dst.CPUPeriod = src.CPUPeriod
+	dst.CPUQuota = src.CPUQuota
+	dst.CPURTPeriod = src.CPURTPeriod
+	dst.CPURTRuntime = src.CPURTRuntime
+	dst.CPUS = src.CPUS
+	dst.CPUSet = src.CPUSet
+	dst.CPUShares = src.CPUShares
+	if src.Command == nil {
+		dst.Command = nil
+	} else {
+		if dst.Command != nil {
+			if len(src.Command) > len(dst.Command) {
+				if cap(dst.Command) >= len(src.Command) {
+					dst.Command = (dst.Command)[:len(src.Command)]
+				} else {
+					dst.Command = make([]string, len(src.Command))
+				}
+			} else if len(src.Command) < len(dst.Command) {
+				dst.Command = (dst.Command)[:len(src.Command)]
+			}
+		} else {
+			dst.Command = make([]string, len(src.Command))
+		}
+		copy(dst.Command, src.Command)
+	}
+	if src.Configs == nil {
+		dst.Configs = nil
+	} else {
+		if dst.Configs != nil {
+			if len(src.Configs) > len(dst.Configs) {
+				if cap(dst.Configs) >= len(src.Configs) {
+					dst.Configs = (dst.Configs)[:len(src.Configs)]
+				} else {
+					dst.Configs = make([]ServiceConfigObjConfig, len(src.Configs))
+				}
+			} else if len(src.Configs) < len(dst.Configs) {
+				dst.Configs = (dst.Configs)[:len(src.Configs)]
+			}
+		} else {
+			dst.Configs = make([]ServiceConfigObjConfig, len(src.Configs))
+		}
+		deriveDeepCopy_8(dst.Configs, src.Configs)
+	}
+	dst.ContainerName = src.ContainerName
+	if src.CredentialSpec == nil {
+		dst.CredentialSpec = nil
+	} else {
+		dst.CredentialSpec = new(CredentialSpecConfig)
+		deriveDeepCopy_9(dst.CredentialSpec, src.CredentialSpec)
+	}
+	if src.DependsOn != nil {
+		dst.DependsOn = make(map[string]ServiceDependency, len(src.DependsOn))
+		deriveDeepCopy_10(dst.DependsOn, src.DependsOn)
+	} else {
+		dst.DependsOn = nil
+	}
+	if src.Deploy == nil {
+		dst.Deploy = nil
+	} else {
+		dst.Deploy = new(DeployConfig)
+		deriveDeepCopy_11(dst.Deploy, src.Deploy)
+	}
+	if src.DeviceCgroupRules == nil {
+		dst.DeviceCgroupRules = nil
+	} else {
+		if dst.DeviceCgroupRules != nil {
+			if len(src.DeviceCgroupRules) > len(dst.DeviceCgroupRules) {
+				if cap(dst.DeviceCgroupRules) >= len(src.DeviceCgroupRules) {
+					dst.DeviceCgroupRules = (dst.DeviceCgroupRules)[:len(src.DeviceCgroupRules)]
+				} else {
+					dst.DeviceCgroupRules = make([]string, len(src.DeviceCgroupRules))
+				}
+			} else if len(src.DeviceCgroupRules) < len(dst.DeviceCgroupRules) {
+				dst.DeviceCgroupRules = (dst.DeviceCgroupRules)[:len(src.DeviceCgroupRules)]
+			}
+		} else {
+			dst.DeviceCgroupRules = make([]string, len(src.DeviceCgroupRules))
+		}
+		copy(dst.DeviceCgroupRules, src.DeviceCgroupRules)
+	}
+	if src.Devices == nil {
+		dst.Devices = nil
+	} else {
+		if dst.Devices != nil {
+			if len(src.Devices) > len(dst.Devices) {
+				if cap(dst.Devices) >= len(src.Devices) {
+					dst.Devices = (dst.Devices)[:len(src.Devices)]
+				} else {
+					dst.Devices = make([]DeviceMapping, len(src.Devices))
+				}
+			} else if len(src.Devices) < len(dst.Devices) {
+				dst.Devices = (dst.Devices)[:len(src.Devices)]
+			}
+		} else {
+			dst.Devices = make([]DeviceMapping, len(src.Devices))
+		}
+		deriveDeepCopy_12(dst.Devices, src.Devices)
+	}
+	if src.DNS == nil {
+		dst.DNS = nil
+	} else {
+		if dst.DNS != nil {
+			if len(src.DNS) > len(dst.DNS) {
+				if cap(dst.DNS) >= len(src.DNS) {
+					dst.DNS = (dst.DNS)[:len(src.DNS)]
+				} else {
+					dst.DNS = make([]string, len(src.DNS))
+				}
+			} else if len(src.DNS) < len(dst.DNS) {
+				dst.DNS = (dst.DNS)[:len(src.DNS)]
+			}
+		} else {
+			dst.DNS = make([]string, len(src.DNS))
+		}
+		copy(dst.DNS, src.DNS)
+	}
+	if src.DNSOpts == nil {
+		dst.DNSOpts = nil
+	} else {
+		if dst.DNSOpts != nil {
+			if len(src.DNSOpts) > len(dst.DNSOpts) {
+				if cap(dst.DNSOpts) >= len(src.DNSOpts) {
+					dst.DNSOpts = (dst.DNSOpts)[:len(src.DNSOpts)]
+				} else {
+					dst.DNSOpts = make([]string, len(src.DNSOpts))
+				}
+			} else if len(src.DNSOpts) < len(dst.DNSOpts) {
+				dst.DNSOpts = (dst.DNSOpts)[:len(src.DNSOpts)]
+			}
+		} else {
+			dst.DNSOpts = make([]string, len(src.DNSOpts))
+		}
+		copy(dst.DNSOpts, src.DNSOpts)
+	}
+	if src.DNSSearch == nil {
+		dst.DNSSearch = nil
+	} else {
+		if dst.DNSSearch != nil {
+			if len(src.DNSSearch) > len(dst.DNSSearch) {
+				if cap(dst.DNSSearch) >= len(src.DNSSearch) {
+					dst.DNSSearch = (dst.DNSSearch)[:len(src.DNSSearch)]
+				} else {
+					dst.DNSSearch = make([]string, len(src.DNSSearch))
+				}
+			} else if len(src.DNSSearch) < len(dst.DNSSearch) {
+				dst.DNSSearch = (dst.DNSSearch)[:len(src.DNSSearch)]
+			}
+		} else {
+			dst.DNSSearch = make([]string, len(src.DNSSearch))
+		}
+		copy(dst.DNSSearch, src.DNSSearch)
+	}
+	dst.Dockerfile = src.Dockerfile
+	dst.DomainName = src.DomainName
+	if src.Entrypoint == nil {
+		dst.Entrypoint = nil
+	} else {
+		if dst.Entrypoint != nil {
+			if len(src.Entrypoint) > len(dst.Entrypoint) {
+				if cap(dst.Entrypoint) >= len(src.Entrypoint) {
+					dst.Entrypoint = (dst.Entrypoint)[:len(src.Entrypoint)]
+				} else {
+					dst.Entrypoint = make([]string, len(src.Entrypoint))
+				}
+			} else if len(src.Entrypoint) < len(dst.Entrypoint) {
+				dst.Entrypoint = (dst.Entrypoint)[:len(src.Entrypoint)]
+			}
+		} else {
+			dst.Entrypoint = make([]string, len(src.Entrypoint))
+		}
+		copy(dst.Entrypoint, src.Entrypoint)
+	}
+	if src.Environment != nil {
+		dst.Environment = make(map[string]*string, len(src.Environment))
+		deriveDeepCopy_13(dst.Environment, src.Environment)
+	} else {
+		dst.Environment = nil
+	}
+	if src.EnvFiles == nil {
+		dst.EnvFiles = nil
+	} else {
+		if dst.EnvFiles != nil {
+			if len(src.EnvFiles) > len(dst.EnvFiles) {
+				if cap(dst.EnvFiles) >= len(src.EnvFiles) {
+					dst.EnvFiles = (dst.EnvFiles)[:len(src.EnvFiles)]
+				} else {
+					dst.EnvFiles = make([]EnvFile, len(src.EnvFiles))
+				}
+			} else if len(src.EnvFiles) < len(dst.EnvFiles) {
+				dst.EnvFiles = (dst.EnvFiles)[:len(src.EnvFiles)]
+			}
+		} else {
+			dst.EnvFiles = make([]EnvFile, len(src.EnvFiles))
+		}
+		copy(dst.EnvFiles, src.EnvFiles)
+	}
+	if src.Expose == nil {
+		dst.Expose = nil
+	} else {
+		if dst.Expose != nil {
+			if len(src.Expose) > len(dst.Expose) {
+				if cap(dst.Expose) >= len(src.Expose) {
+					dst.Expose = (dst.Expose)[:len(src.Expose)]
+				} else {
+					dst.Expose = make([]string, len(src.Expose))
+				}
+			} else if len(src.Expose) < len(dst.Expose) {
+				dst.Expose = (dst.Expose)[:len(src.Expose)]
+			}
+		} else {
+			dst.Expose = make([]string, len(src.Expose))
+		}
+		copy(dst.Expose, src.Expose)
+	}
+	if src.Extends == nil {
+		dst.Extends = nil
+	} else {
+		dst.Extends = new(ExtendsConfig)
+		*dst.Extends = *src.Extends
+	}
+	if src.ExternalLinks == nil {
+		dst.ExternalLinks = nil
+	} else {
+		if dst.ExternalLinks != nil {
+			if len(src.ExternalLinks) > len(dst.ExternalLinks) {
+				if cap(dst.ExternalLinks) >= len(src.ExternalLinks) {
+					dst.ExternalLinks = (dst.ExternalLinks)[:len(src.ExternalLinks)]
+				} else {
+					dst.ExternalLinks = make([]string, len(src.ExternalLinks))
+				}
+			} else if len(src.ExternalLinks) < len(dst.ExternalLinks) {
+				dst.ExternalLinks = (dst.ExternalLinks)[:len(src.ExternalLinks)]
+			}
+		} else {
+			dst.ExternalLinks = make([]string, len(src.ExternalLinks))
+		}
+		copy(dst.ExternalLinks, src.ExternalLinks)
+	}
+	if src.ExtraHosts != nil {
+		dst.ExtraHosts = make(map[string][]string, len(src.ExtraHosts))
+		deriveDeepCopy_14(dst.ExtraHosts, src.ExtraHosts)
+	} else {
+		dst.ExtraHosts = nil
+	}
+	if src.GroupAdd == nil {
+		dst.GroupAdd = nil
+	} else {
+		if dst.GroupAdd != nil {
+			if len(src.GroupAdd) > len(dst.GroupAdd) {
+				if cap(dst.GroupAdd) >= len(src.GroupAdd) {
+					dst.GroupAdd = (dst.GroupAdd)[:len(src.GroupAdd)]
+				} else {
+					dst.GroupAdd = make([]string, len(src.GroupAdd))
+				}
+			} else if len(src.GroupAdd) < len(dst.GroupAdd) {
+				dst.GroupAdd = (dst.GroupAdd)[:len(src.GroupAdd)]
+			}
+		} else {
+			dst.GroupAdd = make([]string, len(src.GroupAdd))
+		}
+		copy(dst.GroupAdd, src.GroupAdd)
+	}
+	if src.Gpus == nil {
+		dst.Gpus = nil
+	} else {
+		if dst.Gpus != nil {
+			if len(src.Gpus) > len(dst.Gpus) {
+				if cap(dst.Gpus) >= len(src.Gpus) {
+					dst.Gpus = (dst.Gpus)[:len(src.Gpus)]
+				} else {
+					dst.Gpus = make([]DeviceRequest, len(src.Gpus))
+				}
+			} else if len(src.Gpus) < len(dst.Gpus) {
+				dst.Gpus = (dst.Gpus)[:len(src.Gpus)]
+			}
+		} else {
+			dst.Gpus = make([]DeviceRequest, len(src.Gpus))
+		}
+		deriveDeepCopy_15(dst.Gpus, src.Gpus)
+	}
+	dst.Hostname = src.Hostname
+	if src.HealthCheck == nil {
+		dst.HealthCheck = nil
+	} else {
+		dst.HealthCheck = new(HealthCheckConfig)
+		deriveDeepCopy_16(dst.HealthCheck, src.HealthCheck)
+	}
+	dst.Image = src.Image
+	if src.Init == nil {
+		dst.Init = nil
+	} else {
+		dst.Init = new(bool)
+		*dst.Init = *src.Init
+	}
+	dst.Ipc = src.Ipc
+	dst.Isolation = src.Isolation
+	if src.Labels != nil {
+		dst.Labels = make(map[string]string, len(src.Labels))
+		deriveDeepCopy_4(dst.Labels, src.Labels)
+	} else {
+		dst.Labels = nil
+	}
+	if src.LabelFiles == nil {
+		dst.LabelFiles = nil
+	} else {
+		if dst.LabelFiles != nil {
+			if len(src.LabelFiles) > len(dst.LabelFiles) {
+				if cap(dst.LabelFiles) >= len(src.LabelFiles) {
+					dst.LabelFiles = (dst.LabelFiles)[:len(src.LabelFiles)]
+				} else {
+					dst.LabelFiles = make([]string, len(src.LabelFiles))
+				}
+			} else if len(src.LabelFiles) < len(dst.LabelFiles) {
+				dst.LabelFiles = (dst.LabelFiles)[:len(src.LabelFiles)]
+			}
+		} else {
+			dst.LabelFiles = make([]string, len(src.LabelFiles))
+		}
+		copy(dst.LabelFiles, src.LabelFiles)
+	}
+	if src.CustomLabels != nil {
+		dst.CustomLabels = make(map[string]string, len(src.CustomLabels))
+		deriveDeepCopy_4(dst.CustomLabels, src.CustomLabels)
+	} else {
+		dst.CustomLabels = nil
+	}
+	if src.Links == nil {
+		dst.Links = nil
+	} else {
+		if dst.Links != nil {
+			if len(src.Links) > len(dst.Links) {
+				if cap(dst.Links) >= len(src.Links) {
+					dst.Links = (dst.Links)[:len(src.Links)]
+				} else {
+					dst.Links = make([]string, len(src.Links))
+				}
+			} else if len(src.Links) < len(dst.Links) {
+				dst.Links = (dst.Links)[:len(src.Links)]
+			}
+		} else {
+			dst.Links = make([]string, len(src.Links))
+		}
+		copy(dst.Links, src.Links)
+	}
+	if src.Logging == nil {
+		dst.Logging = nil
+	} else {
+		dst.Logging = new(LoggingConfig)
+		deriveDeepCopy_17(dst.Logging, src.Logging)
+	}
+	dst.LogDriver = src.LogDriver
+	if src.LogOpt != nil {
+		dst.LogOpt = make(map[string]string, len(src.LogOpt))
+		deriveDeepCopy_4(dst.LogOpt, src.LogOpt)
+	} else {
+		dst.LogOpt = nil
+	}
+	dst.MemLimit = src.MemLimit
+	dst.MemReservation = src.MemReservation
+	dst.MemSwapLimit = src.MemSwapLimit
+	dst.MemSwappiness = src.MemSwappiness
+	dst.MacAddress = src.MacAddress
+	dst.Net = src.Net
+	dst.NetworkMode = src.NetworkMode
+	if src.Networks != nil {
+		dst.Networks = make(map[string]*ServiceNetworkConfig, len(src.Networks))
+		deriveDeepCopy_18(dst.Networks, src.Networks)
+	} else {
+		dst.Networks = nil
+	}
+	dst.OomKillDisable = src.OomKillDisable
+	dst.OomScoreAdj = src.OomScoreAdj
+	dst.Pid = src.Pid
+	dst.PidsLimit = src.PidsLimit
+	dst.Platform = src.Platform
+	if src.Ports == nil {
+		dst.Ports = nil
+	} else {
+		if dst.Ports != nil {
+			if len(src.Ports) > len(dst.Ports) {
+				if cap(dst.Ports) >= len(src.Ports) {
+					dst.Ports = (dst.Ports)[:len(src.Ports)]
+				} else {
+					dst.Ports = make([]ServicePortConfig, len(src.Ports))
+				}
+			} else if len(src.Ports) < len(dst.Ports) {
+				dst.Ports = (dst.Ports)[:len(src.Ports)]
+			}
+		} else {
+			dst.Ports = make([]ServicePortConfig, len(src.Ports))
+		}
+		deriveDeepCopy_19(dst.Ports, src.Ports)
+	}
+	dst.Privileged = src.Privileged
+	dst.PullPolicy = src.PullPolicy
+	dst.ReadOnly = src.ReadOnly
+	dst.Restart = src.Restart
+	dst.Runtime = src.Runtime
+	if src.Scale == nil {
+		dst.Scale = nil
+	} else {
+		dst.Scale = new(int)
+		*dst.Scale = *src.Scale
+	}
+	if src.Secrets == nil {
+		dst.Secrets = nil
+	} else {
+		if dst.Secrets != nil {
+			if len(src.Secrets) > len(dst.Secrets) {
+				if cap(dst.Secrets) >= len(src.Secrets) {
+					dst.Secrets = (dst.Secrets)[:len(src.Secrets)]
+				} else {
+					dst.Secrets = make([]ServiceSecretConfig, len(src.Secrets))
+				}
+			} else if len(src.Secrets) < len(dst.Secrets) {
+				dst.Secrets = (dst.Secrets)[:len(src.Secrets)]
+			}
+		} else {
+			dst.Secrets = make([]ServiceSecretConfig, len(src.Secrets))
+		}
+		deriveDeepCopy_20(dst.Secrets, src.Secrets)
+	}
+	if src.SecurityOpt == nil {
+		dst.SecurityOpt = nil
+	} else {
+		if dst.SecurityOpt != nil {
+			if len(src.SecurityOpt) > len(dst.SecurityOpt) {
+				if cap(dst.SecurityOpt) >= len(src.SecurityOpt) {
+					dst.SecurityOpt = (dst.SecurityOpt)[:len(src.SecurityOpt)]
+				} else {
+					dst.SecurityOpt = make([]string, len(src.SecurityOpt))
+				}
+			} else if len(src.SecurityOpt) < len(dst.SecurityOpt) {
+				dst.SecurityOpt = (dst.SecurityOpt)[:len(src.SecurityOpt)]
+			}
+		} else {
+			dst.SecurityOpt = make([]string, len(src.SecurityOpt))
+		}
+		copy(dst.SecurityOpt, src.SecurityOpt)
+	}
+	dst.ShmSize = src.ShmSize
+	dst.StdinOpen = src.StdinOpen
+	if src.StopGracePeriod == nil {
+		dst.StopGracePeriod = nil
+	} else {
+		dst.StopGracePeriod = new(Duration)
+		*dst.StopGracePeriod = *src.StopGracePeriod
+	}
+	dst.StopSignal = src.StopSignal
+	if src.StorageOpt != nil {
+		dst.StorageOpt = make(map[string]string, len(src.StorageOpt))
+		deriveDeepCopy_4(dst.StorageOpt, src.StorageOpt)
+	} else {
+		dst.StorageOpt = nil
+	}
+	if src.Sysctls != nil {
+		dst.Sysctls = make(map[string]string, len(src.Sysctls))
+		deriveDeepCopy_4(dst.Sysctls, src.Sysctls)
+	} else {
+		dst.Sysctls = nil
+	}
+	if src.Tmpfs == nil {
+		dst.Tmpfs = nil
+	} else {
+		if dst.Tmpfs != nil {
+			if len(src.Tmpfs) > len(dst.Tmpfs) {
+				if cap(dst.Tmpfs) >= len(src.Tmpfs) {
+					dst.Tmpfs = (dst.Tmpfs)[:len(src.Tmpfs)]
+				} else {
+					dst.Tmpfs = make([]string, len(src.Tmpfs))
+				}
+			} else if len(src.Tmpfs) < len(dst.Tmpfs) {
+				dst.Tmpfs = (dst.Tmpfs)[:len(src.Tmpfs)]
+			}
+		} else {
+			dst.Tmpfs = make([]string, len(src.Tmpfs))
+		}
+		copy(dst.Tmpfs, src.Tmpfs)
+	}
+	dst.Tty = src.Tty
+	if src.Ulimits != nil {
+		dst.Ulimits = make(map[string]*UlimitsConfig, len(src.Ulimits))
+		deriveDeepCopy_21(dst.Ulimits, src.Ulimits)
+	} else {
+		dst.Ulimits = nil
+	}
+	dst.User = src.User
+	dst.UserNSMode = src.UserNSMode
+	dst.Uts = src.Uts
+	dst.VolumeDriver = src.VolumeDriver
+	if src.Volumes == nil {
+		dst.Volumes = nil
+	} else {
+		if dst.Volumes != nil {
+			if len(src.Volumes) > len(dst.Volumes) {
+				if cap(dst.Volumes) >= len(src.Volumes) {
+					dst.Volumes = (dst.Volumes)[:len(src.Volumes)]
+				} else {
+					dst.Volumes = make([]ServiceVolumeConfig, len(src.Volumes))
+				}
+			} else if len(src.Volumes) < len(dst.Volumes) {
+				dst.Volumes = (dst.Volumes)[:len(src.Volumes)]
+			}
+		} else {
+			dst.Volumes = make([]ServiceVolumeConfig, len(src.Volumes))
+		}
+		deriveDeepCopy_22(dst.Volumes, src.Volumes)
+	}
+	if src.VolumesFrom == nil {
+		dst.VolumesFrom = nil
+	} else {
+		if dst.VolumesFrom != nil {
+			if len(src.VolumesFrom) > len(dst.VolumesFrom) {
+				if cap(dst.VolumesFrom) >= len(src.VolumesFrom) {
+					dst.VolumesFrom = (dst.VolumesFrom)[:len(src.VolumesFrom)]
+				} else {
+					dst.VolumesFrom = make([]string, len(src.VolumesFrom))
+				}
+			} else if len(src.VolumesFrom) < len(dst.VolumesFrom) {
+				dst.VolumesFrom = (dst.VolumesFrom)[:len(src.VolumesFrom)]
+			}
+		} else {
+			dst.VolumesFrom = make([]string, len(src.VolumesFrom))
+		}
+		copy(dst.VolumesFrom, src.VolumesFrom)
+	}
+	dst.WorkingDir = src.WorkingDir
+	if src.PostStart == nil {
+		dst.PostStart = nil
+	} else {
+		if dst.PostStart != nil {
+			if len(src.PostStart) > len(dst.PostStart) {
+				if cap(dst.PostStart) >= len(src.PostStart) {
+					dst.PostStart = (dst.PostStart)[:len(src.PostStart)]
+				} else {
+					dst.PostStart = make([]ServiceHook, len(src.PostStart))
+				}
+			} else if len(src.PostStart) < len(dst.PostStart) {
+				dst.PostStart = (dst.PostStart)[:len(src.PostStart)]
+			}
+		} else {
+			dst.PostStart = make([]ServiceHook, len(src.PostStart))
+		}
+		deriveDeepCopy_23(dst.PostStart, src.PostStart)
+	}
+	if src.PreStop == nil {
+		dst.PreStop = nil
+	} else {
+		if dst.PreStop != nil {
+			if len(src.PreStop) > len(dst.PreStop) {
+				if cap(dst.PreStop) >= len(src.PreStop) {
+					dst.PreStop = (dst.PreStop)[:len(src.PreStop)]
+				} else {
+					dst.PreStop = make([]ServiceHook, len(src.PreStop))
+				}
+			} else if len(src.PreStop) < len(dst.PreStop) {
+				dst.PreStop = (dst.PreStop)[:len(src.PreStop)]
+			}
+		} else {
+			dst.PreStop = make([]ServiceHook, len(src.PreStop))
+		}
+		deriveDeepCopy_23(dst.PreStop, src.PreStop)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy recursively copies the contents of src into dst.
+func deriveDeepCopy(dst, src map[string]ServiceConfig) {
+	for src_key, src_value := range src {
+		func() {
+			field := new(ServiceConfig)
+			deriveDeepCopyService(field, &src_value)
+			dst[src_key] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_ recursively copies the contents of src into dst.
+func deriveDeepCopy_(dst, src map[string]NetworkConfig) {
+	for src_key, src_value := range src {
+		func() {
+			field := new(NetworkConfig)
+			deriveDeepCopy_24(field, &src_value)
+			dst[src_key] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_1 recursively copies the contents of src into dst.
+func deriveDeepCopy_1(dst, src map[string]VolumeConfig) {
+	for src_key, src_value := range src {
+		func() {
+			field := new(VolumeConfig)
+			deriveDeepCopy_25(field, &src_value)
+			dst[src_key] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_2 recursively copies the contents of src into dst.
+func deriveDeepCopy_2(dst, src map[string]SecretConfig) {
+	for src_key, src_value := range src {
+		func() {
+			field := new(SecretConfig)
+			deriveDeepCopy_26(field, &src_value)
+			dst[src_key] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_3 recursively copies the contents of src into dst.
+func deriveDeepCopy_3(dst, src map[string]ConfigObjConfig) {
+	for src_key, src_value := range src {
+		func() {
+			field := new(ConfigObjConfig)
+			deriveDeepCopy_27(field, &src_value)
+			dst[src_key] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_4 recursively copies the contents of src into dst.
+func deriveDeepCopy_4(dst, src map[string]string) {
+	for src_key, src_value := range src {
+		dst[src_key] = src_value
+	}
+}
+
+// deriveDeepCopy_5 recursively copies the contents of src into dst.
+func deriveDeepCopy_5(dst, src *BuildConfig) {
+	dst.Context = src.Context
+	dst.Dockerfile = src.Dockerfile
+	dst.DockerfileInline = src.DockerfileInline
+	if src.Entitlements == nil {
+		dst.Entitlements = nil
+	} else {
+		if dst.Entitlements != nil {
+			if len(src.Entitlements) > len(dst.Entitlements) {
+				if cap(dst.Entitlements) >= len(src.Entitlements) {
+					dst.Entitlements = (dst.Entitlements)[:len(src.Entitlements)]
+				} else {
+					dst.Entitlements = make([]string, len(src.Entitlements))
+				}
+			} else if len(src.Entitlements) < len(dst.Entitlements) {
+				dst.Entitlements = (dst.Entitlements)[:len(src.Entitlements)]
+			}
+		} else {
+			dst.Entitlements = make([]string, len(src.Entitlements))
+		}
+		copy(dst.Entitlements, src.Entitlements)
+	}
+	if src.Args != nil {
+		dst.Args = make(map[string]*string, len(src.Args))
+		deriveDeepCopy_13(dst.Args, src.Args)
+	} else {
+		dst.Args = nil
+	}
+	if src.SSH == nil {
+		dst.SSH = nil
+	} else {
+		if dst.SSH != nil {
+			if len(src.SSH) > len(dst.SSH) {
+				if cap(dst.SSH) >= len(src.SSH) {
+					dst.SSH = (dst.SSH)[:len(src.SSH)]
+				} else {
+					dst.SSH = make([]SSHKey, len(src.SSH))
+				}
+			} else if len(src.SSH) < len(dst.SSH) {
+				dst.SSH = (dst.SSH)[:len(src.SSH)]
+			}
+		} else {
+			dst.SSH = make([]SSHKey, len(src.SSH))
+		}
+		copy(dst.SSH, src.SSH)
+	}
+	if src.Labels != nil {
+		dst.Labels = make(map[string]string, len(src.Labels))
+		deriveDeepCopy_4(dst.Labels, src.Labels)
+	} else {
+		dst.Labels = nil
+	}
+	if src.CacheFrom == nil {
+		dst.CacheFrom = nil
+	} else {
+		if dst.CacheFrom != nil {
+			if len(src.CacheFrom) > len(dst.CacheFrom) {
+				if cap(dst.CacheFrom) >= len(src.CacheFrom) {
+					dst.CacheFrom = (dst.CacheFrom)[:len(src.CacheFrom)]
+				} else {
+					dst.CacheFrom = make([]string, len(src.CacheFrom))
+				}
+			} else if len(src.CacheFrom) < len(dst.CacheFrom) {
+				dst.CacheFrom = (dst.CacheFrom)[:len(src.CacheFrom)]
+			}
+		} else {
+			dst.CacheFrom = make([]string, len(src.CacheFrom))
+		}
+		copy(dst.CacheFrom, src.CacheFrom)
+	}
+	if src.CacheTo == nil {
+		dst.CacheTo = nil
+	} else {
+		if dst.CacheTo != nil {
+			if len(src.CacheTo) > len(dst.CacheTo) {
+				if cap(dst.CacheTo) >= len(src.CacheTo) {
+					dst.CacheTo = (dst.CacheTo)[:len(src.CacheTo)]
+				} else {
+					dst.CacheTo = make([]string, len(src.CacheTo))
+				}
+			} else if len(src.CacheTo) < len(dst.CacheTo) {
+				dst.CacheTo = (dst.CacheTo)[:len(src.CacheTo)]
+			}
+		} else {
+			dst.CacheTo = make([]string, len(src.CacheTo))
+		}
+		copy(dst.CacheTo, src.CacheTo)
+	}
+	dst.NoCache = src.NoCache
+	if src.AdditionalContexts != nil {
+		dst.AdditionalContexts = make(map[string]string, len(src.AdditionalContexts))
+		deriveDeepCopy_4(dst.AdditionalContexts, src.AdditionalContexts)
+	} else {
+		dst.AdditionalContexts = nil
+	}
+	dst.Pull = src.Pull
+	if src.ExtraHosts != nil {
+		dst.ExtraHosts = make(map[string][]string, len(src.ExtraHosts))
+		deriveDeepCopy_14(dst.ExtraHosts, src.ExtraHosts)
+	} else {
+		dst.ExtraHosts = nil
+	}
+	dst.Isolation = src.Isolation
+	dst.Network = src.Network
+	dst.Target = src.Target
+	if src.Secrets == nil {
+		dst.Secrets = nil
+	} else {
+		if dst.Secrets != nil {
+			if len(src.Secrets) > len(dst.Secrets) {
+				if cap(dst.Secrets) >= len(src.Secrets) {
+					dst.Secrets = (dst.Secrets)[:len(src.Secrets)]
+				} else {
+					dst.Secrets = make([]ServiceSecretConfig, len(src.Secrets))
+				}
+			} else if len(src.Secrets) < len(dst.Secrets) {
+				dst.Secrets = (dst.Secrets)[:len(src.Secrets)]
+			}
+		} else {
+			dst.Secrets = make([]ServiceSecretConfig, len(src.Secrets))
+		}
+		deriveDeepCopy_20(dst.Secrets, src.Secrets)
+	}
+	dst.ShmSize = src.ShmSize
+	if src.Tags == nil {
+		dst.Tags = nil
+	} else {
+		if dst.Tags != nil {
+			if len(src.Tags) > len(dst.Tags) {
+				if cap(dst.Tags) >= len(src.Tags) {
+					dst.Tags = (dst.Tags)[:len(src.Tags)]
+				} else {
+					dst.Tags = make([]string, len(src.Tags))
+				}
+			} else if len(src.Tags) < len(dst.Tags) {
+				dst.Tags = (dst.Tags)[:len(src.Tags)]
+			}
+		} else {
+			dst.Tags = make([]string, len(src.Tags))
+		}
+		copy(dst.Tags, src.Tags)
+	}
+	if src.Ulimits != nil {
+		dst.Ulimits = make(map[string]*UlimitsConfig, len(src.Ulimits))
+		deriveDeepCopy_21(dst.Ulimits, src.Ulimits)
+	} else {
+		dst.Ulimits = nil
+	}
+	if src.Platforms == nil {
+		dst.Platforms = nil
+	} else {
+		if dst.Platforms != nil {
+			if len(src.Platforms) > len(dst.Platforms) {
+				if cap(dst.Platforms) >= len(src.Platforms) {
+					dst.Platforms = (dst.Platforms)[:len(src.Platforms)]
+				} else {
+					dst.Platforms = make([]string, len(src.Platforms))
+				}
+			} else if len(src.Platforms) < len(dst.Platforms) {
+				dst.Platforms = (dst.Platforms)[:len(src.Platforms)]
+			}
+		} else {
+			dst.Platforms = make([]string, len(src.Platforms))
+		}
+		copy(dst.Platforms, src.Platforms)
+	}
+	dst.Privileged = src.Privileged
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_6 recursively copies the contents of src into dst.
+func deriveDeepCopy_6(dst, src *DevelopConfig) {
+	if src.Watch == nil {
+		dst.Watch = nil
+	} else {
+		if dst.Watch != nil {
+			if len(src.Watch) > len(dst.Watch) {
+				if cap(dst.Watch) >= len(src.Watch) {
+					dst.Watch = (dst.Watch)[:len(src.Watch)]
+				} else {
+					dst.Watch = make([]Trigger, len(src.Watch))
+				}
+			} else if len(src.Watch) < len(dst.Watch) {
+				dst.Watch = (dst.Watch)[:len(src.Watch)]
+			}
+		} else {
+			dst.Watch = make([]Trigger, len(src.Watch))
+		}
+		deriveDeepCopy_28(dst.Watch, src.Watch)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_7 recursively copies the contents of src into dst.
+func deriveDeepCopy_7(dst, src *BlkioConfig) {
+	dst.Weight = src.Weight
+	if src.WeightDevice == nil {
+		dst.WeightDevice = nil
+	} else {
+		if dst.WeightDevice != nil {
+			if len(src.WeightDevice) > len(dst.WeightDevice) {
+				if cap(dst.WeightDevice) >= len(src.WeightDevice) {
+					dst.WeightDevice = (dst.WeightDevice)[:len(src.WeightDevice)]
+				} else {
+					dst.WeightDevice = make([]WeightDevice, len(src.WeightDevice))
+				}
+			} else if len(src.WeightDevice) < len(dst.WeightDevice) {
+				dst.WeightDevice = (dst.WeightDevice)[:len(src.WeightDevice)]
+			}
+		} else {
+			dst.WeightDevice = make([]WeightDevice, len(src.WeightDevice))
+		}
+		deriveDeepCopy_29(dst.WeightDevice, src.WeightDevice)
+	}
+	if src.DeviceReadBps == nil {
+		dst.DeviceReadBps = nil
+	} else {
+		if dst.DeviceReadBps != nil {
+			if len(src.DeviceReadBps) > len(dst.DeviceReadBps) {
+				if cap(dst.DeviceReadBps) >= len(src.DeviceReadBps) {
+					dst.DeviceReadBps = (dst.DeviceReadBps)[:len(src.DeviceReadBps)]
+				} else {
+					dst.DeviceReadBps = make([]ThrottleDevice, len(src.DeviceReadBps))
+				}
+			} else if len(src.DeviceReadBps) < len(dst.DeviceReadBps) {
+				dst.DeviceReadBps = (dst.DeviceReadBps)[:len(src.DeviceReadBps)]
+			}
+		} else {
+			dst.DeviceReadBps = make([]ThrottleDevice, len(src.DeviceReadBps))
+		}
+		deriveDeepCopy_30(dst.DeviceReadBps, src.DeviceReadBps)
+	}
+	if src.DeviceReadIOps == nil {
+		dst.DeviceReadIOps = nil
+	} else {
+		if dst.DeviceReadIOps != nil {
+			if len(src.DeviceReadIOps) > len(dst.DeviceReadIOps) {
+				if cap(dst.DeviceReadIOps) >= len(src.DeviceReadIOps) {
+					dst.DeviceReadIOps = (dst.DeviceReadIOps)[:len(src.DeviceReadIOps)]
+				} else {
+					dst.DeviceReadIOps = make([]ThrottleDevice, len(src.DeviceReadIOps))
+				}
+			} else if len(src.DeviceReadIOps) < len(dst.DeviceReadIOps) {
+				dst.DeviceReadIOps = (dst.DeviceReadIOps)[:len(src.DeviceReadIOps)]
+			}
+		} else {
+			dst.DeviceReadIOps = make([]ThrottleDevice, len(src.DeviceReadIOps))
+		}
+		deriveDeepCopy_30(dst.DeviceReadIOps, src.DeviceReadIOps)
+	}
+	if src.DeviceWriteBps == nil {
+		dst.DeviceWriteBps = nil
+	} else {
+		if dst.DeviceWriteBps != nil {
+			if len(src.DeviceWriteBps) > len(dst.DeviceWriteBps) {
+				if cap(dst.DeviceWriteBps) >= len(src.DeviceWriteBps) {
+					dst.DeviceWriteBps = (dst.DeviceWriteBps)[:len(src.DeviceWriteBps)]
+				} else {
+					dst.DeviceWriteBps = make([]ThrottleDevice, len(src.DeviceWriteBps))
+				}
+			} else if len(src.DeviceWriteBps) < len(dst.DeviceWriteBps) {
+				dst.DeviceWriteBps = (dst.DeviceWriteBps)[:len(src.DeviceWriteBps)]
+			}
+		} else {
+			dst.DeviceWriteBps = make([]ThrottleDevice, len(src.DeviceWriteBps))
+		}
+		deriveDeepCopy_30(dst.DeviceWriteBps, src.DeviceWriteBps)
+	}
+	if src.DeviceWriteIOps == nil {
+		dst.DeviceWriteIOps = nil
+	} else {
+		if dst.DeviceWriteIOps != nil {
+			if len(src.DeviceWriteIOps) > len(dst.DeviceWriteIOps) {
+				if cap(dst.DeviceWriteIOps) >= len(src.DeviceWriteIOps) {
+					dst.DeviceWriteIOps = (dst.DeviceWriteIOps)[:len(src.DeviceWriteIOps)]
+				} else {
+					dst.DeviceWriteIOps = make([]ThrottleDevice, len(src.DeviceWriteIOps))
+				}
+			} else if len(src.DeviceWriteIOps) < len(dst.DeviceWriteIOps) {
+				dst.DeviceWriteIOps = (dst.DeviceWriteIOps)[:len(src.DeviceWriteIOps)]
+			}
+		} else {
+			dst.DeviceWriteIOps = make([]ThrottleDevice, len(src.DeviceWriteIOps))
+		}
+		deriveDeepCopy_30(dst.DeviceWriteIOps, src.DeviceWriteIOps)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_8 recursively copies the contents of src into dst.
+func deriveDeepCopy_8(dst, src []ServiceConfigObjConfig) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(ServiceConfigObjConfig)
+			deriveDeepCopy_31(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_9 recursively copies the contents of src into dst.
+func deriveDeepCopy_9(dst, src *CredentialSpecConfig) {
+	dst.Config = src.Config
+	dst.File = src.File
+	dst.Registry = src.Registry
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_10 recursively copies the contents of src into dst.
+func deriveDeepCopy_10(dst, src map[string]ServiceDependency) {
+	for src_key, src_value := range src {
+		func() {
+			field := new(ServiceDependency)
+			deriveDeepCopy_32(field, &src_value)
+			dst[src_key] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_11 recursively copies the contents of src into dst.
+func deriveDeepCopy_11(dst, src *DeployConfig) {
+	dst.Mode = src.Mode
+	if src.Replicas == nil {
+		dst.Replicas = nil
+	} else {
+		dst.Replicas = new(int)
+		*dst.Replicas = *src.Replicas
+	}
+	if src.Labels != nil {
+		dst.Labels = make(map[string]string, len(src.Labels))
+		deriveDeepCopy_4(dst.Labels, src.Labels)
+	} else {
+		dst.Labels = nil
+	}
+	if src.UpdateConfig == nil {
+		dst.UpdateConfig = nil
+	} else {
+		dst.UpdateConfig = new(UpdateConfig)
+		deriveDeepCopy_33(dst.UpdateConfig, src.UpdateConfig)
+	}
+	if src.RollbackConfig == nil {
+		dst.RollbackConfig = nil
+	} else {
+		dst.RollbackConfig = new(UpdateConfig)
+		deriveDeepCopy_33(dst.RollbackConfig, src.RollbackConfig)
+	}
+	func() {
+		field := new(Resources)
+		deriveDeepCopy_34(field, &src.Resources)
+		dst.Resources = *field
+	}()
+	if src.RestartPolicy == nil {
+		dst.RestartPolicy = nil
+	} else {
+		dst.RestartPolicy = new(RestartPolicy)
+		deriveDeepCopy_35(dst.RestartPolicy, src.RestartPolicy)
+	}
+	func() {
+		field := new(Placement)
+		deriveDeepCopy_36(field, &src.Placement)
+		dst.Placement = *field
+	}()
+	dst.EndpointMode = src.EndpointMode
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_12 recursively copies the contents of src into dst.
+func deriveDeepCopy_12(dst, src []DeviceMapping) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(DeviceMapping)
+			deriveDeepCopy_37(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_13 recursively copies the contents of src into dst.
+func deriveDeepCopy_13(dst, src map[string]*string) {
+	for src_key, src_value := range src {
+		if src_value == nil {
+			dst[src_key] = nil
+		}
+		if src_value == nil {
+			dst[src_key] = nil
+		} else {
+			dst[src_key] = new(string)
+			*dst[src_key] = *src_value
+		}
+	}
+}
+
+// deriveDeepCopy_14 recursively copies the contents of src into dst.
+func deriveDeepCopy_14(dst, src map[string][]string) {
+	for src_key, src_value := range src {
+		if src_value == nil {
+			dst[src_key] = nil
+		}
+		if src_value == nil {
+			dst[src_key] = nil
+		} else {
+			if dst[src_key] != nil {
+				if len(src_value) > len(dst[src_key]) {
+					if cap(dst[src_key]) >= len(src_value) {
+						dst[src_key] = (dst[src_key])[:len(src_value)]
+					} else {
+						dst[src_key] = make([]string, len(src_value))
+					}
+				} else if len(src_value) < len(dst[src_key]) {
+					dst[src_key] = (dst[src_key])[:len(src_value)]
+				}
+			} else {
+				dst[src_key] = make([]string, len(src_value))
+			}
+			copy(dst[src_key], src_value)
+		}
+	}
+}
+
+// deriveDeepCopy_15 recursively copies the contents of src into dst.
+func deriveDeepCopy_15(dst, src []DeviceRequest) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(DeviceRequest)
+			deriveDeepCopy_38(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_16 recursively copies the contents of src into dst.
+func deriveDeepCopy_16(dst, src *HealthCheckConfig) {
+	if src.Test == nil {
+		dst.Test = nil
+	} else {
+		if dst.Test != nil {
+			if len(src.Test) > len(dst.Test) {
+				if cap(dst.Test) >= len(src.Test) {
+					dst.Test = (dst.Test)[:len(src.Test)]
+				} else {
+					dst.Test = make([]string, len(src.Test))
+				}
+			} else if len(src.Test) < len(dst.Test) {
+				dst.Test = (dst.Test)[:len(src.Test)]
+			}
+		} else {
+			dst.Test = make([]string, len(src.Test))
+		}
+		copy(dst.Test, src.Test)
+	}
+	if src.Timeout == nil {
+		dst.Timeout = nil
+	} else {
+		dst.Timeout = new(Duration)
+		*dst.Timeout = *src.Timeout
+	}
+	if src.Interval == nil {
+		dst.Interval = nil
+	} else {
+		dst.Interval = new(Duration)
+		*dst.Interval = *src.Interval
+	}
+	if src.Retries == nil {
+		dst.Retries = nil
+	} else {
+		dst.Retries = new(uint64)
+		*dst.Retries = *src.Retries
+	}
+	if src.StartPeriod == nil {
+		dst.StartPeriod = nil
+	} else {
+		dst.StartPeriod = new(Duration)
+		*dst.StartPeriod = *src.StartPeriod
+	}
+	if src.StartInterval == nil {
+		dst.StartInterval = nil
+	} else {
+		dst.StartInterval = new(Duration)
+		*dst.StartInterval = *src.StartInterval
+	}
+	dst.Disable = src.Disable
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_17 recursively copies the contents of src into dst.
+func deriveDeepCopy_17(dst, src *LoggingConfig) {
+	dst.Driver = src.Driver
+	if src.Options != nil {
+		dst.Options = make(map[string]string, len(src.Options))
+		deriveDeepCopy_4(dst.Options, src.Options)
+	} else {
+		dst.Options = nil
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_18 recursively copies the contents of src into dst.
+func deriveDeepCopy_18(dst, src map[string]*ServiceNetworkConfig) {
+	for src_key, src_value := range src {
+		if src_value == nil {
+			dst[src_key] = nil
+		}
+		if src_value == nil {
+			dst[src_key] = nil
+		} else {
+			dst[src_key] = new(ServiceNetworkConfig)
+			deriveDeepCopy_39(dst[src_key], src_value)
+		}
+	}
+}
+
+// deriveDeepCopy_19 recursively copies the contents of src into dst.
+func deriveDeepCopy_19(dst, src []ServicePortConfig) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(ServicePortConfig)
+			deriveDeepCopy_40(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_20 recursively copies the contents of src into dst.
+func deriveDeepCopy_20(dst, src []ServiceSecretConfig) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(ServiceSecretConfig)
+			deriveDeepCopy_41(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_21 recursively copies the contents of src into dst.
+func deriveDeepCopy_21(dst, src map[string]*UlimitsConfig) {
+	for src_key, src_value := range src {
+		if src_value == nil {
+			dst[src_key] = nil
+		}
+		if src_value == nil {
+			dst[src_key] = nil
+		} else {
+			dst[src_key] = new(UlimitsConfig)
+			deriveDeepCopy_42(dst[src_key], src_value)
+		}
+	}
+}
+
+// deriveDeepCopy_22 recursively copies the contents of src into dst.
+func deriveDeepCopy_22(dst, src []ServiceVolumeConfig) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(ServiceVolumeConfig)
+			deriveDeepCopy_43(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_23 recursively copies the contents of src into dst.
+func deriveDeepCopy_23(dst, src []ServiceHook) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(ServiceHook)
+			deriveDeepCopy_44(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_24 recursively copies the contents of src into dst.
+func deriveDeepCopy_24(dst, src *NetworkConfig) {
+	dst.Name = src.Name
+	dst.Driver = src.Driver
+	if src.DriverOpts != nil {
+		dst.DriverOpts = make(map[string]string, len(src.DriverOpts))
+		deriveDeepCopy_4(dst.DriverOpts, src.DriverOpts)
+	} else {
+		dst.DriverOpts = nil
+	}
+	func() {
+		field := new(IPAMConfig)
+		deriveDeepCopy_45(field, &src.Ipam)
+		dst.Ipam = *field
+	}()
+	dst.External = src.External
+	dst.Internal = src.Internal
+	dst.Attachable = src.Attachable
+	if src.Labels != nil {
+		dst.Labels = make(map[string]string, len(src.Labels))
+		deriveDeepCopy_4(dst.Labels, src.Labels)
+	} else {
+		dst.Labels = nil
+	}
+	if src.CustomLabels != nil {
+		dst.CustomLabels = make(map[string]string, len(src.CustomLabels))
+		deriveDeepCopy_4(dst.CustomLabels, src.CustomLabels)
+	} else {
+		dst.CustomLabels = nil
+	}
+	if src.EnableIPv6 == nil {
+		dst.EnableIPv6 = nil
+	} else {
+		dst.EnableIPv6 = new(bool)
+		*dst.EnableIPv6 = *src.EnableIPv6
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_25 recursively copies the contents of src into dst.
+func deriveDeepCopy_25(dst, src *VolumeConfig) {
+	dst.Name = src.Name
+	dst.Driver = src.Driver
+	if src.DriverOpts != nil {
+		dst.DriverOpts = make(map[string]string, len(src.DriverOpts))
+		deriveDeepCopy_4(dst.DriverOpts, src.DriverOpts)
+	} else {
+		dst.DriverOpts = nil
+	}
+	dst.External = src.External
+	if src.Labels != nil {
+		dst.Labels = make(map[string]string, len(src.Labels))
+		deriveDeepCopy_4(dst.Labels, src.Labels)
+	} else {
+		dst.Labels = nil
+	}
+	if src.CustomLabels != nil {
+		dst.CustomLabels = make(map[string]string, len(src.CustomLabels))
+		deriveDeepCopy_4(dst.CustomLabels, src.CustomLabels)
+	} else {
+		dst.CustomLabels = nil
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_26 recursively copies the contents of src into dst.
+func deriveDeepCopy_26(dst, src *SecretConfig) {
+	dst.Name = src.Name
+	dst.File = src.File
+	dst.Environment = src.Environment
+	dst.Content = src.Content
+	dst.marshallContent = src.marshallContent
+	dst.External = src.External
+	if src.Labels != nil {
+		dst.Labels = make(map[string]string, len(src.Labels))
+		deriveDeepCopy_4(dst.Labels, src.Labels)
+	} else {
+		dst.Labels = nil
+	}
+	dst.Driver = src.Driver
+	if src.DriverOpts != nil {
+		dst.DriverOpts = make(map[string]string, len(src.DriverOpts))
+		deriveDeepCopy_4(dst.DriverOpts, src.DriverOpts)
+	} else {
+		dst.DriverOpts = nil
+	}
+	dst.TemplateDriver = src.TemplateDriver
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_27 recursively copies the contents of src into dst.
+func deriveDeepCopy_27(dst, src *ConfigObjConfig) {
+	dst.Name = src.Name
+	dst.File = src.File
+	dst.Environment = src.Environment
+	dst.Content = src.Content
+	dst.marshallContent = src.marshallContent
+	dst.External = src.External
+	if src.Labels != nil {
+		dst.Labels = make(map[string]string, len(src.Labels))
+		deriveDeepCopy_4(dst.Labels, src.Labels)
+	} else {
+		dst.Labels = nil
+	}
+	dst.Driver = src.Driver
+	if src.DriverOpts != nil {
+		dst.DriverOpts = make(map[string]string, len(src.DriverOpts))
+		deriveDeepCopy_4(dst.DriverOpts, src.DriverOpts)
+	} else {
+		dst.DriverOpts = nil
+	}
+	dst.TemplateDriver = src.TemplateDriver
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_28 recursively copies the contents of src into dst.
+func deriveDeepCopy_28(dst, src []Trigger) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(Trigger)
+			deriveDeepCopy_46(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_29 recursively copies the contents of src into dst.
+func deriveDeepCopy_29(dst, src []WeightDevice) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(WeightDevice)
+			deriveDeepCopy_47(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_30 recursively copies the contents of src into dst.
+func deriveDeepCopy_30(dst, src []ThrottleDevice) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(ThrottleDevice)
+			deriveDeepCopy_48(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_31 recursively copies the contents of src into dst.
+func deriveDeepCopy_31(dst, src *ServiceConfigObjConfig) {
+	dst.Source = src.Source
+	dst.Target = src.Target
+	dst.UID = src.UID
+	dst.GID = src.GID
+	if src.Mode == nil {
+		dst.Mode = nil
+	} else {
+		dst.Mode = new(uint32)
+		*dst.Mode = *src.Mode
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_32 recursively copies the contents of src into dst.
+func deriveDeepCopy_32(dst, src *ServiceDependency) {
+	dst.Condition = src.Condition
+	dst.Restart = src.Restart
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+	dst.Required = src.Required
+}
+
+// deriveDeepCopy_33 recursively copies the contents of src into dst.
+func deriveDeepCopy_33(dst, src *UpdateConfig) {
+	if src.Parallelism == nil {
+		dst.Parallelism = nil
+	} else {
+		dst.Parallelism = new(uint64)
+		*dst.Parallelism = *src.Parallelism
+	}
+	dst.Delay = src.Delay
+	dst.FailureAction = src.FailureAction
+	dst.Monitor = src.Monitor
+	dst.MaxFailureRatio = src.MaxFailureRatio
+	dst.Order = src.Order
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_34 recursively copies the contents of src into dst.
+func deriveDeepCopy_34(dst, src *Resources) {
+	if src.Limits == nil {
+		dst.Limits = nil
+	} else {
+		dst.Limits = new(Resource)
+		deriveDeepCopy_49(dst.Limits, src.Limits)
+	}
+	if src.Reservations == nil {
+		dst.Reservations = nil
+	} else {
+		dst.Reservations = new(Resource)
+		deriveDeepCopy_49(dst.Reservations, src.Reservations)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_35 recursively copies the contents of src into dst.
+func deriveDeepCopy_35(dst, src *RestartPolicy) {
+	dst.Condition = src.Condition
+	if src.Delay == nil {
+		dst.Delay = nil
+	} else {
+		dst.Delay = new(Duration)
+		*dst.Delay = *src.Delay
+	}
+	if src.MaxAttempts == nil {
+		dst.MaxAttempts = nil
+	} else {
+		dst.MaxAttempts = new(uint64)
+		*dst.MaxAttempts = *src.MaxAttempts
+	}
+	if src.Window == nil {
+		dst.Window = nil
+	} else {
+		dst.Window = new(Duration)
+		*dst.Window = *src.Window
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_36 recursively copies the contents of src into dst.
+func deriveDeepCopy_36(dst, src *Placement) {
+	if src.Constraints == nil {
+		dst.Constraints = nil
+	} else {
+		if dst.Constraints != nil {
+			if len(src.Constraints) > len(dst.Constraints) {
+				if cap(dst.Constraints) >= len(src.Constraints) {
+					dst.Constraints = (dst.Constraints)[:len(src.Constraints)]
+				} else {
+					dst.Constraints = make([]string, len(src.Constraints))
+				}
+			} else if len(src.Constraints) < len(dst.Constraints) {
+				dst.Constraints = (dst.Constraints)[:len(src.Constraints)]
+			}
+		} else {
+			dst.Constraints = make([]string, len(src.Constraints))
+		}
+		copy(dst.Constraints, src.Constraints)
+	}
+	if src.Preferences == nil {
+		dst.Preferences = nil
+	} else {
+		if dst.Preferences != nil {
+			if len(src.Preferences) > len(dst.Preferences) {
+				if cap(dst.Preferences) >= len(src.Preferences) {
+					dst.Preferences = (dst.Preferences)[:len(src.Preferences)]
+				} else {
+					dst.Preferences = make([]PlacementPreferences, len(src.Preferences))
+				}
+			} else if len(src.Preferences) < len(dst.Preferences) {
+				dst.Preferences = (dst.Preferences)[:len(src.Preferences)]
+			}
+		} else {
+			dst.Preferences = make([]PlacementPreferences, len(src.Preferences))
+		}
+		deriveDeepCopy_50(dst.Preferences, src.Preferences)
+	}
+	dst.MaxReplicas = src.MaxReplicas
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_37 recursively copies the contents of src into dst.
+func deriveDeepCopy_37(dst, src *DeviceMapping) {
+	dst.Source = src.Source
+	dst.Target = src.Target
+	dst.Permissions = src.Permissions
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_38 recursively copies the contents of src into dst.
+func deriveDeepCopy_38(dst, src *DeviceRequest) {
+	if src.Capabilities == nil {
+		dst.Capabilities = nil
+	} else {
+		if dst.Capabilities != nil {
+			if len(src.Capabilities) > len(dst.Capabilities) {
+				if cap(dst.Capabilities) >= len(src.Capabilities) {
+					dst.Capabilities = (dst.Capabilities)[:len(src.Capabilities)]
+				} else {
+					dst.Capabilities = make([]string, len(src.Capabilities))
+				}
+			} else if len(src.Capabilities) < len(dst.Capabilities) {
+				dst.Capabilities = (dst.Capabilities)[:len(src.Capabilities)]
+			}
+		} else {
+			dst.Capabilities = make([]string, len(src.Capabilities))
+		}
+		copy(dst.Capabilities, src.Capabilities)
+	}
+	dst.Driver = src.Driver
+	dst.Count = src.Count
+	if src.IDs == nil {
+		dst.IDs = nil
+	} else {
+		if dst.IDs != nil {
+			if len(src.IDs) > len(dst.IDs) {
+				if cap(dst.IDs) >= len(src.IDs) {
+					dst.IDs = (dst.IDs)[:len(src.IDs)]
+				} else {
+					dst.IDs = make([]string, len(src.IDs))
+				}
+			} else if len(src.IDs) < len(dst.IDs) {
+				dst.IDs = (dst.IDs)[:len(src.IDs)]
+			}
+		} else {
+			dst.IDs = make([]string, len(src.IDs))
+		}
+		copy(dst.IDs, src.IDs)
+	}
+	if src.Options != nil {
+		dst.Options = make(map[string]string, len(src.Options))
+		deriveDeepCopy_4(dst.Options, src.Options)
+	} else {
+		dst.Options = nil
+	}
+}
+
+// deriveDeepCopy_39 recursively copies the contents of src into dst.
+func deriveDeepCopy_39(dst, src *ServiceNetworkConfig) {
+	dst.Priority = src.Priority
+	if src.Aliases == nil {
+		dst.Aliases = nil
+	} else {
+		if dst.Aliases != nil {
+			if len(src.Aliases) > len(dst.Aliases) {
+				if cap(dst.Aliases) >= len(src.Aliases) {
+					dst.Aliases = (dst.Aliases)[:len(src.Aliases)]
+				} else {
+					dst.Aliases = make([]string, len(src.Aliases))
+				}
+			} else if len(src.Aliases) < len(dst.Aliases) {
+				dst.Aliases = (dst.Aliases)[:len(src.Aliases)]
+			}
+		} else {
+			dst.Aliases = make([]string, len(src.Aliases))
+		}
+		copy(dst.Aliases, src.Aliases)
+	}
+	dst.Ipv4Address = src.Ipv4Address
+	dst.Ipv6Address = src.Ipv6Address
+	if src.LinkLocalIPs == nil {
+		dst.LinkLocalIPs = nil
+	} else {
+		if dst.LinkLocalIPs != nil {
+			if len(src.LinkLocalIPs) > len(dst.LinkLocalIPs) {
+				if cap(dst.LinkLocalIPs) >= len(src.LinkLocalIPs) {
+					dst.LinkLocalIPs = (dst.LinkLocalIPs)[:len(src.LinkLocalIPs)]
+				} else {
+					dst.LinkLocalIPs = make([]string, len(src.LinkLocalIPs))
+				}
+			} else if len(src.LinkLocalIPs) < len(dst.LinkLocalIPs) {
+				dst.LinkLocalIPs = (dst.LinkLocalIPs)[:len(src.LinkLocalIPs)]
+			}
+		} else {
+			dst.LinkLocalIPs = make([]string, len(src.LinkLocalIPs))
+		}
+		copy(dst.LinkLocalIPs, src.LinkLocalIPs)
+	}
+	dst.MacAddress = src.MacAddress
+	if src.DriverOpts != nil {
+		dst.DriverOpts = make(map[string]string, len(src.DriverOpts))
+		deriveDeepCopy_4(dst.DriverOpts, src.DriverOpts)
+	} else {
+		dst.DriverOpts = nil
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_40 recursively copies the contents of src into dst.
+func deriveDeepCopy_40(dst, src *ServicePortConfig) {
+	dst.Name = src.Name
+	dst.Mode = src.Mode
+	dst.HostIP = src.HostIP
+	dst.Target = src.Target
+	dst.Published = src.Published
+	dst.Protocol = src.Protocol
+	dst.AppProtocol = src.AppProtocol
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_41 recursively copies the contents of src into dst.
+func deriveDeepCopy_41(dst, src *ServiceSecretConfig) {
+	dst.Source = src.Source
+	dst.Target = src.Target
+	dst.UID = src.UID
+	dst.GID = src.GID
+	if src.Mode == nil {
+		dst.Mode = nil
+	} else {
+		dst.Mode = new(uint32)
+		*dst.Mode = *src.Mode
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_42 recursively copies the contents of src into dst.
+func deriveDeepCopy_42(dst, src *UlimitsConfig) {
+	dst.Single = src.Single
+	dst.Soft = src.Soft
+	dst.Hard = src.Hard
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_43 recursively copies the contents of src into dst.
+func deriveDeepCopy_43(dst, src *ServiceVolumeConfig) {
+	dst.Type = src.Type
+	dst.Source = src.Source
+	dst.Target = src.Target
+	dst.ReadOnly = src.ReadOnly
+	dst.Consistency = src.Consistency
+	if src.Bind == nil {
+		dst.Bind = nil
+	} else {
+		dst.Bind = new(ServiceVolumeBind)
+		deriveDeepCopy_51(dst.Bind, src.Bind)
+	}
+	if src.Volume == nil {
+		dst.Volume = nil
+	} else {
+		dst.Volume = new(ServiceVolumeVolume)
+		deriveDeepCopy_52(dst.Volume, src.Volume)
+	}
+	if src.Tmpfs == nil {
+		dst.Tmpfs = nil
+	} else {
+		dst.Tmpfs = new(ServiceVolumeTmpfs)
+		deriveDeepCopy_53(dst.Tmpfs, src.Tmpfs)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_44 recursively copies the contents of src into dst.
+func deriveDeepCopy_44(dst, src *ServiceHook) {
+	if src.Command == nil {
+		dst.Command = nil
+	} else {
+		if dst.Command != nil {
+			if len(src.Command) > len(dst.Command) {
+				if cap(dst.Command) >= len(src.Command) {
+					dst.Command = (dst.Command)[:len(src.Command)]
+				} else {
+					dst.Command = make([]string, len(src.Command))
+				}
+			} else if len(src.Command) < len(dst.Command) {
+				dst.Command = (dst.Command)[:len(src.Command)]
+			}
+		} else {
+			dst.Command = make([]string, len(src.Command))
+		}
+		copy(dst.Command, src.Command)
+	}
+	dst.User = src.User
+	dst.Privileged = src.Privileged
+	dst.WorkingDir = src.WorkingDir
+	if src.Environment != nil {
+		dst.Environment = make(map[string]*string, len(src.Environment))
+		deriveDeepCopy_13(dst.Environment, src.Environment)
+	} else {
+		dst.Environment = nil
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_45 recursively copies the contents of src into dst.
+func deriveDeepCopy_45(dst, src *IPAMConfig) {
+	dst.Driver = src.Driver
+	if src.Config == nil {
+		dst.Config = nil
+	} else {
+		if dst.Config != nil {
+			if len(src.Config) > len(dst.Config) {
+				if cap(dst.Config) >= len(src.Config) {
+					dst.Config = (dst.Config)[:len(src.Config)]
+				} else {
+					dst.Config = make([]*IPAMPool, len(src.Config))
+				}
+			} else if len(src.Config) < len(dst.Config) {
+				dst.Config = (dst.Config)[:len(src.Config)]
+			}
+		} else {
+			dst.Config = make([]*IPAMPool, len(src.Config))
+		}
+		deriveDeepCopy_54(dst.Config, src.Config)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_46 recursively copies the contents of src into dst.
+func deriveDeepCopy_46(dst, src *Trigger) {
+	dst.Path = src.Path
+	dst.Action = src.Action
+	dst.Target = src.Target
+	func() {
+		field := new(ServiceHook)
+		deriveDeepCopy_44(field, &src.Exec)
+		dst.Exec = *field
+	}()
+	if src.Ignore == nil {
+		dst.Ignore = nil
+	} else {
+		if dst.Ignore != nil {
+			if len(src.Ignore) > len(dst.Ignore) {
+				if cap(dst.Ignore) >= len(src.Ignore) {
+					dst.Ignore = (dst.Ignore)[:len(src.Ignore)]
+				} else {
+					dst.Ignore = make([]string, len(src.Ignore))
+				}
+			} else if len(src.Ignore) < len(dst.Ignore) {
+				dst.Ignore = (dst.Ignore)[:len(src.Ignore)]
+			}
+		} else {
+			dst.Ignore = make([]string, len(src.Ignore))
+		}
+		copy(dst.Ignore, src.Ignore)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_47 recursively copies the contents of src into dst.
+func deriveDeepCopy_47(dst, src *WeightDevice) {
+	dst.Path = src.Path
+	dst.Weight = src.Weight
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_48 recursively copies the contents of src into dst.
+func deriveDeepCopy_48(dst, src *ThrottleDevice) {
+	dst.Path = src.Path
+	dst.Rate = src.Rate
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_49 recursively copies the contents of src into dst.
+func deriveDeepCopy_49(dst, src *Resource) {
+	dst.NanoCPUs = src.NanoCPUs
+	dst.MemoryBytes = src.MemoryBytes
+	dst.Pids = src.Pids
+	if src.Devices == nil {
+		dst.Devices = nil
+	} else {
+		if dst.Devices != nil {
+			if len(src.Devices) > len(dst.Devices) {
+				if cap(dst.Devices) >= len(src.Devices) {
+					dst.Devices = (dst.Devices)[:len(src.Devices)]
+				} else {
+					dst.Devices = make([]DeviceRequest, len(src.Devices))
+				}
+			} else if len(src.Devices) < len(dst.Devices) {
+				dst.Devices = (dst.Devices)[:len(src.Devices)]
+			}
+		} else {
+			dst.Devices = make([]DeviceRequest, len(src.Devices))
+		}
+		deriveDeepCopy_15(dst.Devices, src.Devices)
+	}
+	if src.GenericResources == nil {
+		dst.GenericResources = nil
+	} else {
+		if dst.GenericResources != nil {
+			if len(src.GenericResources) > len(dst.GenericResources) {
+				if cap(dst.GenericResources) >= len(src.GenericResources) {
+					dst.GenericResources = (dst.GenericResources)[:len(src.GenericResources)]
+				} else {
+					dst.GenericResources = make([]GenericResource, len(src.GenericResources))
+				}
+			} else if len(src.GenericResources) < len(dst.GenericResources) {
+				dst.GenericResources = (dst.GenericResources)[:len(src.GenericResources)]
+			}
+		} else {
+			dst.GenericResources = make([]GenericResource, len(src.GenericResources))
+		}
+		deriveDeepCopy_55(dst.GenericResources, src.GenericResources)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_50 recursively copies the contents of src into dst.
+func deriveDeepCopy_50(dst, src []PlacementPreferences) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(PlacementPreferences)
+			deriveDeepCopy_56(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_51 recursively copies the contents of src into dst.
+func deriveDeepCopy_51(dst, src *ServiceVolumeBind) {
+	dst.SELinux = src.SELinux
+	dst.Propagation = src.Propagation
+	dst.CreateHostPath = src.CreateHostPath
+	dst.Recursive = src.Recursive
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_52 recursively copies the contents of src into dst.
+func deriveDeepCopy_52(dst, src *ServiceVolumeVolume) {
+	dst.NoCopy = src.NoCopy
+	dst.Subpath = src.Subpath
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_53 recursively copies the contents of src into dst.
+func deriveDeepCopy_53(dst, src *ServiceVolumeTmpfs) {
+	dst.Size = src.Size
+	dst.Mode = src.Mode
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_54 recursively copies the contents of src into dst.
+func deriveDeepCopy_54(dst, src []*IPAMPool) {
+	for src_i, src_value := range src {
+		if src_value == nil {
+			dst[src_i] = nil
+		} else {
+			dst[src_i] = new(IPAMPool)
+			deriveDeepCopy_57(dst[src_i], src_value)
+		}
+	}
+}
+
+// deriveDeepCopy_55 recursively copies the contents of src into dst.
+func deriveDeepCopy_55(dst, src []GenericResource) {
+	for src_i, src_value := range src {
+		func() {
+			field := new(GenericResource)
+			deriveDeepCopy_58(field, &src_value)
+			dst[src_i] = *field
+		}()
+	}
+}
+
+// deriveDeepCopy_56 recursively copies the contents of src into dst.
+func deriveDeepCopy_56(dst, src *PlacementPreferences) {
+	dst.Spread = src.Spread
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_57 recursively copies the contents of src into dst.
+func deriveDeepCopy_57(dst, src *IPAMPool) {
+	dst.Subnet = src.Subnet
+	dst.Gateway = src.Gateway
+	dst.IPRange = src.IPRange
+	if src.AuxiliaryAddresses != nil {
+		dst.AuxiliaryAddresses = make(map[string]string, len(src.AuxiliaryAddresses))
+		deriveDeepCopy_4(dst.AuxiliaryAddresses, src.AuxiliaryAddresses)
+	} else {
+		dst.AuxiliaryAddresses = nil
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_58 recursively copies the contents of src into dst.
+func deriveDeepCopy_58(dst, src *GenericResource) {
+	if src.DiscreteResourceSpec == nil {
+		dst.DiscreteResourceSpec = nil
+	} else {
+		dst.DiscreteResourceSpec = new(DiscreteGenericResource)
+		deriveDeepCopy_59(dst.DiscreteResourceSpec, src.DiscreteResourceSpec)
+	}
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
+
+// deriveDeepCopy_59 recursively copies the contents of src into dst.
+func deriveDeepCopy_59(dst, src *DiscreteGenericResource) {
+	dst.Kind = src.Kind
+	dst.Value = src.Value
+	if src.Extensions != nil {
+		dst.Extensions = make(map[string]any, len(src.Extensions))
+		src.Extensions.DeepCopy(dst.Extensions)
+	} else {
+		dst.Extensions = nil
+	}
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/develop.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/develop.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/develop.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/develop.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,42 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+type DevelopConfig struct {
+	Watch []Trigger `yaml:"watch,omitempty" json:"watch,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+type WatchAction string
+
+const (
+	WatchActionSync        WatchAction = "sync"
+	WatchActionRebuild     WatchAction = "rebuild"
+	WatchActionRestart     WatchAction = "restart"
+	WatchActionSyncRestart WatchAction = "sync+restart"
+	WatchActionSyncExec    WatchAction = "sync+exec"
+)
+
+type Trigger struct {
+	Path       string      `yaml:"path" json:"path"`
+	Action     WatchAction `yaml:"action" json:"action"`
+	Target     string      `yaml:"target,omitempty" json:"target,omitempty"`
+	Exec       ServiceHook `yaml:"exec,omitempty" json:"exec,omitempty"`
+	Ignore     []string    `yaml:"ignore,omitempty" json:"ignore,omitempty"`
+	Extensions Extensions  `yaml:"#extensions,inline,omitempty" json:"-"`
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/device.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/device.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/device.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/device.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,53 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+type DeviceRequest struct {
+	Capabilities []string    `yaml:"capabilities,omitempty" json:"capabilities,omitempty"`
+	Driver       string      `yaml:"driver,omitempty" json:"driver,omitempty"`
+	Count        DeviceCount `yaml:"count,omitempty" json:"count,omitempty"`
+	IDs          []string    `yaml:"device_ids,omitempty" json:"device_ids,omitempty"`
+	Options      Mapping     `yaml:"options,omitempty" json:"options,omitempty"`
+}
+
+type DeviceCount int64
+
+func (c *DeviceCount) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case int:
+		*c = DeviceCount(v)
+	case string:
+		if strings.ToLower(v) == "all" {
+			*c = -1
+			return nil
+		}
+		i, err := strconv.ParseInt(v, 10, 64)
+		if err != nil {
+			return fmt.Errorf("invalid value %q, the only value allowed is 'all' or a number", v)
+		}
+		*c = DeviceCount(i)
+	default:
+		return fmt.Errorf("invalid type %T for device count", v)
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/duration.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/duration.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/duration.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/duration.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,60 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+	"time"
+)
+
+// Duration is a thin wrapper around time.Duration with improved JSON marshalling
+type Duration time.Duration
+
+func (d Duration) String() string {
+	return time.Duration(d).String()
+}
+
+func (d *Duration) DecodeMapstructure(value interface{}) error {
+	v, err := time.ParseDuration(fmt.Sprint(value))
+	if err != nil {
+		return err
+	}
+	*d = Duration(v)
+	return nil
+}
+
+// MarshalJSON makes Duration implement json.Marshaler
+func (d Duration) MarshalJSON() ([]byte, error) {
+	return json.Marshal(d.String())
+}
+
+// MarshalYAML makes Duration implement yaml.Marshaler
+func (d Duration) MarshalYAML() (interface{}, error) {
+	return d.String(), nil
+}
+
+func (d *Duration) UnmarshalJSON(b []byte) error {
+	s := strings.Trim(string(b), "\"")
+	timeDuration, err := time.ParseDuration(s)
+	if err != nil {
+		return err
+	}
+	*d = Duration(timeDuration)
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,47 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"encoding/json"
+)
+
+type EnvFile struct {
+	Path     string `yaml:"path,omitempty" json:"path,omitempty"`
+	Required bool   `yaml:"required" json:"required"`
+	Format   string `yaml:"format,omitempty" json:"format,omitempty"`
+}
+
+// MarshalYAML makes EnvFile implement yaml.Marshaler
+func (e EnvFile) MarshalYAML() (interface{}, error) {
+	if e.Required {
+		return e.Path, nil
+	}
+	return map[string]any{
+		"path":     e.Path,
+		"required": e.Required,
+	}, nil
+}
+
+// MarshalJSON makes EnvFile implement json.Marshaler
+func (e *EnvFile) MarshalJSON() ([]byte, error) {
+	if e.Required {
+		return json.Marshal(e.Path)
+	}
+	// Pass as a value to avoid re-entering this method and use the default implementation
+	return json.Marshal(*e)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,53 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+)
+
+// HealthCheckConfig the healthcheck configuration for a service
+type HealthCheckConfig struct {
+	Test          HealthCheckTest `yaml:"test,omitempty" json:"test,omitempty"`
+	Timeout       *Duration       `yaml:"timeout,omitempty" json:"timeout,omitempty"`
+	Interval      *Duration       `yaml:"interval,omitempty" json:"interval,omitempty"`
+	Retries       *uint64         `yaml:"retries,omitempty" json:"retries,omitempty"`
+	StartPeriod   *Duration       `yaml:"start_period,omitempty" json:"start_period,omitempty"`
+	StartInterval *Duration       `yaml:"start_interval,omitempty" json:"start_interval,omitempty"`
+	Disable       bool            `yaml:"disable,omitempty" json:"disable,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// HealthCheckTest is the command run to test the health of a service
+type HealthCheckTest []string
+
+func (l *HealthCheckTest) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case string:
+		*l = []string{"CMD-SHELL", v}
+	case []interface{}:
+		seq := make([]string, len(v))
+		for i, e := range v {
+			seq[i] = e.(string)
+		}
+		*l = seq
+	default:
+		return fmt.Errorf("unexpected value type %T for healthcheck.test", value)
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/hooks.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/hooks.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/hooks.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/hooks.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,28 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+// ServiceHook is a command to exec inside container by some lifecycle events
+type ServiceHook struct {
+	Command     ShellCommand      `yaml:"command,omitempty" json:"command"`
+	User        string            `yaml:"user,omitempty" json:"user,omitempty"`
+	Privileged  bool              `yaml:"privileged,omitempty" json:"privileged,omitempty"`
+	WorkingDir  string            `yaml:"working_dir,omitempty" json:"working_dir,omitempty"`
+	Environment MappingWithEquals `yaml:"environment,omitempty" json:"environment,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,144 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// HostsList is a list of colon-separated host-ip mappings
+type HostsList map[string][]string
+
+// NewHostsList creates a HostsList from a list of `host=ip` strings
+func NewHostsList(hosts []string) (HostsList, error) {
+	list := HostsList{}
+	for _, s := range hosts {
+		var found bool
+		for _, sep := range hostListSerapators {
+			host, ip, ok := strings.Cut(s, sep)
+			if ok {
+				// Mapping found with this separator, stop here.
+				if ips, ok := list[host]; ok {
+					list[host] = append(ips, strings.Split(ip, ",")...)
+				} else {
+					list[host] = strings.Split(ip, ",")
+				}
+				found = true
+				break
+			}
+		}
+		if !found {
+			return nil, fmt.Errorf("invalid additional host, missing IP: %s", s)
+		}
+	}
+	err := list.cleanup()
+	return list, err
+}
+
+// AsList returns host-ip mappings as a list of strings, using the given
+// separator. The Docker Engine API expects ':' separators, the original format
+// for '--add-hosts'. But an '=' separator is used in YAML/JSON renderings to
+// make IPv6 addresses more readable (for example "my-host=::1" instead of
+// "my-host:::1").
+func (h HostsList) AsList(sep string) []string {
+	l := make([]string, 0, len(h))
+	for k, v := range h {
+		for _, ip := range v {
+			l = append(l, fmt.Sprintf("%s%s%s", k, sep, ip))
+		}
+	}
+	return l
+}
+
+func (h HostsList) MarshalYAML() (interface{}, error) {
+	list := h.AsList("=")
+	sort.Strings(list)
+	return list, nil
+}
+
+func (h HostsList) MarshalJSON() ([]byte, error) {
+	list := h.AsList("=")
+	sort.Strings(list)
+	return json.Marshal(list)
+}
+
+var hostListSerapators = []string{"=", ":"}
+
+func (h *HostsList) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case map[string]interface{}:
+		list := make(HostsList, len(v))
+		for i, e := range v {
+			if e == nil {
+				e = ""
+			}
+			switch t := e.(type) {
+			case string:
+				list[i] = []string{t}
+			case []any:
+				hosts := make([]string, len(t))
+				for j, h := range t {
+					hosts[j] = fmt.Sprint(h)
+				}
+				list[i] = hosts
+			default:
+				return fmt.Errorf("unexpected value type %T for extra_hosts entry", value)
+			}
+		}
+		err := list.cleanup()
+		if err != nil {
+			return err
+		}
+		*h = list
+		return nil
+	case []interface{}:
+		s := make([]string, len(v))
+		for i, e := range v {
+			s[i] = fmt.Sprint(e)
+		}
+		list, err := NewHostsList(s)
+		if err != nil {
+			return err
+		}
+		*h = list
+		return nil
+	default:
+		return fmt.Errorf("unexpected value type %T for extra_hosts", value)
+	}
+}
+
+func (h HostsList) cleanup() error {
+	for host, ips := range h {
+		// Check that there is a hostname and that it doesn't contain either
+		// of the allowed separators, to generate a clearer error than the
+		// engine would do if it splits the string differently.
+		if host == "" || strings.ContainsAny(host, ":=") {
+			return fmt.Errorf("bad host name '%s'", host)
+		}
+		for i, ip := range ips {
+			// Remove brackets from IP addresses (for example "[::1]" -> "::1").
+			if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' {
+				ips[i] = ip[1 : len(ip)-1]
+			}
+		}
+		h[host] = ips
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/labels.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/labels.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/labels.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/labels.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,96 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Labels is a mapping type for labels
+type Labels map[string]string
+
+func NewLabelsFromMappingWithEquals(mapping MappingWithEquals) Labels {
+	labels := Labels{}
+	for k, v := range mapping {
+		if v != nil {
+			labels[k] = *v
+		}
+	}
+	return labels
+}
+
+func (l Labels) Add(key, value string) Labels {
+	if l == nil {
+		l = Labels{}
+	}
+	l[key] = value
+	return l
+}
+
+func (l Labels) AsList() []string {
+	s := make([]string, len(l))
+	i := 0
+	for k, v := range l {
+		s[i] = fmt.Sprintf("%s=%s", k, v)
+		i++
+	}
+	return s
+}
+
+func (l Labels) ToMappingWithEquals() MappingWithEquals {
+	mapping := MappingWithEquals{}
+	for k, v := range l {
+		v := v
+		mapping[k] = &v
+	}
+	return mapping
+}
+
+// label value can be a string | number | boolean | null (empty)
+func labelValue(e interface{}) string {
+	if e == nil {
+		return ""
+	}
+	switch v := e.(type) {
+	case string:
+		return v
+	default:
+		return fmt.Sprint(v)
+	}
+}
+
+func (l *Labels) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case map[string]interface{}:
+		labels := make(map[string]string, len(v))
+		for k, e := range v {
+			labels[k] = labelValue(e)
+		}
+		*l = labels
+	case []interface{}:
+		labels := make(map[string]string, len(v))
+		for _, s := range v {
+			k, e, _ := strings.Cut(fmt.Sprint(s), "=")
+			labels[k] = labelValue(e)
+		}
+		*l = labels
+	default:
+		return fmt.Errorf("unexpected value type %T for labels", value)
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,227 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// MappingWithEquals is a mapping type that can be converted from a list of
+// key[=value] strings.
+// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`.
+// For the key without value (`key`), the mapped value is set to nil.
+type MappingWithEquals map[string]*string
+
+// NewMappingWithEquals build a new Mapping from a set of KEY=VALUE strings
+func NewMappingWithEquals(values []string) MappingWithEquals {
+	mapping := MappingWithEquals{}
+	for _, env := range values {
+		tokens := strings.SplitN(env, "=", 2)
+		if len(tokens) > 1 {
+			mapping[tokens[0]] = &tokens[1]
+		} else {
+			mapping[env] = nil
+		}
+	}
+	return mapping
+}
+
+// OverrideBy update MappingWithEquals with values from another MappingWithEquals
+func (m MappingWithEquals) OverrideBy(other MappingWithEquals) MappingWithEquals {
+	for k, v := range other {
+		m[k] = v
+	}
+	return m
+}
+
+// Resolve update a MappingWithEquals for keys without value (`key`, but not `key=`)
+func (m MappingWithEquals) Resolve(lookupFn func(string) (string, bool)) MappingWithEquals {
+	for k, v := range m {
+		if v == nil {
+			if value, ok := lookupFn(k); ok {
+				m[k] = &value
+			}
+		}
+	}
+	return m
+}
+
+// RemoveEmpty excludes keys that are not associated with a value
+func (m MappingWithEquals) RemoveEmpty() MappingWithEquals {
+	for k, v := range m {
+		if v == nil {
+			delete(m, k)
+		}
+	}
+	return m
+}
+
+func (m MappingWithEquals) ToMapping() Mapping {
+	o := Mapping{}
+	for k, v := range m {
+		if v != nil {
+			o[k] = *v
+		}
+	}
+	return o
+}
+
+func (m *MappingWithEquals) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case map[string]interface{}:
+		mapping := make(MappingWithEquals, len(v))
+		for k, e := range v {
+			mapping[k] = mappingValue(e)
+		}
+		*m = mapping
+	case []interface{}:
+		mapping := make(MappingWithEquals, len(v))
+		for _, s := range v {
+			k, e, ok := strings.Cut(fmt.Sprint(s), "=")
+			if !ok {
+				mapping[k] = nil
+			} else {
+				mapping[k] = mappingValue(e)
+			}
+		}
+		*m = mapping
+	default:
+		return fmt.Errorf("unexpected value type %T for mapping", value)
+	}
+	return nil
+}
+
+// label value can be a string | number | boolean | null
+func mappingValue(e interface{}) *string {
+	if e == nil {
+		return nil
+	}
+	switch v := e.(type) {
+	case string:
+		return &v
+	default:
+		s := fmt.Sprint(v)
+		return &s
+	}
+}
+
+// Mapping is a mapping type that can be converted from a list of
+// key[=value] strings.
+// For the key with an empty value (`key=`), or key without value (`key`), the
+// mapped value is set to an empty string `""`.
+type Mapping map[string]string
+
+// NewMapping build a new Mapping from a set of KEY=VALUE strings
+func NewMapping(values []string) Mapping {
+	mapping := Mapping{}
+	for _, value := range values {
+		parts := strings.SplitN(value, "=", 2)
+		key := parts[0]
+		switch {
+		case len(parts) == 1:
+			mapping[key] = ""
+		default:
+			mapping[key] = parts[1]
+		}
+	}
+	return mapping
+}
+
+// convert values into a set of KEY=VALUE strings
+func (m Mapping) Values() []string {
+	values := make([]string, 0, len(m))
+	for k, v := range m {
+		values = append(values, fmt.Sprintf("%s=%s", k, v))
+	}
+	sort.Strings(values)
+	return values
+}
+
+// ToMappingWithEquals converts Mapping into a MappingWithEquals with pointer references
+func (m Mapping) ToMappingWithEquals() MappingWithEquals {
+	mapping := MappingWithEquals{}
+	for k, v := range m {
+		v := v
+		mapping[k] = &v
+	}
+	return mapping
+}
+
+func (m Mapping) Resolve(s string) (string, bool) {
+	v, ok := m[s]
+	return v, ok
+}
+
+func (m Mapping) Clone() Mapping {
+	clone := Mapping{}
+	for k, v := range m {
+		clone[k] = v
+	}
+	return clone
+}
+
+// Merge adds all values from second mapping which are not already defined
+func (m Mapping) Merge(o Mapping) Mapping {
+	for k, v := range o {
+		if _, set := m[k]; !set {
+			m[k] = v
+		}
+	}
+	return m
+}
+
+func (m *Mapping) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case map[string]interface{}:
+		mapping := make(Mapping, len(v))
+		for k, e := range v {
+			if e == nil {
+				e = ""
+			}
+			mapping[k] = fmt.Sprint(e)
+		}
+		*m = mapping
+	case []interface{}:
+		*m = decodeMapping(v, "=")
+	default:
+		return fmt.Errorf("unexpected value type %T for mapping", value)
+	}
+	return nil
+}
+
+// Generate a mapping by splitting strings at any of seps, which will be tried
+// in-order for each input string. (For example, to allow the preferred 'host=ip'
+// in 'extra_hosts', as well as 'host:ip' for backwards compatibility.)
+func decodeMapping(v []interface{}, seps ...string) map[string]string {
+	mapping := make(Mapping, len(v))
+	for _, s := range v {
+		for i, sep := range seps {
+			k, e, ok := strings.Cut(fmt.Sprint(s), sep)
+			if ok {
+				// Mapping found with this separator, stop here.
+				mapping[k] = e
+				break
+			} else if i == len(seps)-1 {
+				// No more separators to try, map to empty string.
+				mapping[k] = ""
+			}
+		}
+	}
+	return mapping
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/options.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/options.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/options.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/options.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,42 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import "fmt"
+
+// Options is a mapping type for options we pass as-is to container runtime
+type Options map[string]string
+
+func (d *Options) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case map[string]interface{}:
+		m := make(map[string]string)
+		for key, e := range v {
+			if e == nil {
+				m[key] = ""
+			} else {
+				m[key] = fmt.Sprint(e)
+			}
+		}
+		*d = m
+	case map[string]string:
+		*d = v
+	default:
+		return fmt.Errorf("invalid type %T for options", value)
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/project.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/project.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/project.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/project.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,808 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+	"sort"
+
+	"github.com/compose-spec/compose-go/v2/dotenv"
+	"github.com/compose-spec/compose-go/v2/errdefs"
+	"github.com/compose-spec/compose-go/v2/utils"
+	"github.com/distribution/reference"
+	godigest "github.com/opencontainers/go-digest"
+	"golang.org/x/exp/maps"
+	"golang.org/x/sync/errgroup"
+	"gopkg.in/yaml.v3"
+)
+
+// Project is the result of loading a set of compose files
+// Since v2, Project are managed as immutable objects.
+// Each public functions which mutate Project state now return a copy of the original Project with the expected changes.
+type Project struct {
+	Name       string     `yaml:"name,omitempty" json:"name,omitempty"`
+	WorkingDir string     `yaml:"-" json:"-"`
+	Services   Services   `yaml:"services" json:"services"`
+	Networks   Networks   `yaml:"networks,omitempty" json:"networks,omitempty"`
+	Volumes    Volumes    `yaml:"volumes,omitempty" json:"volumes,omitempty"`
+	Secrets    Secrets    `yaml:"secrets,omitempty" json:"secrets,omitempty"`
+	Configs    Configs    `yaml:"configs,omitempty" json:"configs,omitempty"`
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` // https://github.com/golang/go/issues/6213
+
+	ComposeFiles []string `yaml:"-" json:"-"`
+	Environment  Mapping  `yaml:"-" json:"-"`
+
+	// DisabledServices track services which have been disable as profile is not active
+	DisabledServices Services `yaml:"-" json:"-"`
+	Profiles         []string `yaml:"-" json:"-"`
+}
+
+// ServiceNames return names for all services in this Compose config
+func (p *Project) ServiceNames() []string {
+	var names []string
+	for k := range p.Services {
+		names = append(names, k)
+	}
+	sort.Strings(names)
+	return names
+}
+
+// DisabledServiceNames return names for all disabled services in this Compose config
+func (p *Project) DisabledServiceNames() []string {
+	var names []string
+	for k := range p.DisabledServices {
+		names = append(names, k)
+	}
+	sort.Strings(names)
+	return names
+}
+
+// VolumeNames return names for all volumes in this Compose config
+func (p *Project) VolumeNames() []string {
+	var names []string
+	for k := range p.Volumes {
+		names = append(names, k)
+	}
+	sort.Strings(names)
+	return names
+}
+
+// NetworkNames return names for all volumes in this Compose config
+func (p *Project) NetworkNames() []string {
+	var names []string
+	for k := range p.Networks {
+		names = append(names, k)
+	}
+	sort.Strings(names)
+	return names
+}
+
+// SecretNames return names for all secrets in this Compose config
+func (p *Project) SecretNames() []string {
+	var names []string
+	for k := range p.Secrets {
+		names = append(names, k)
+	}
+	sort.Strings(names)
+	return names
+}
+
+// ConfigNames return names for all configs in this Compose config
+func (p *Project) ConfigNames() []string {
+	var names []string
+	for k := range p.Configs {
+		names = append(names, k)
+	}
+	sort.Strings(names)
+	return names
+}
+
+func (p *Project) ServicesWithBuild() []string {
+	servicesBuild := p.Services.Filter(func(s ServiceConfig) bool {
+		return s.Build != nil && s.Build.Context != ""
+	})
+	return maps.Keys(servicesBuild)
+}
+
+func (p *Project) ServicesWithExtends() []string {
+	servicesExtends := p.Services.Filter(func(s ServiceConfig) bool {
+		return s.Extends != nil && *s.Extends != (ExtendsConfig{})
+	})
+	return maps.Keys(servicesExtends)
+}
+
+func (p *Project) ServicesWithDependsOn() []string {
+	servicesDependsOn := p.Services.Filter(func(s ServiceConfig) bool {
+		return len(s.DependsOn) > 0
+	})
+	return maps.Keys(servicesDependsOn)
+}
+
+func (p *Project) ServicesWithCapabilities() ([]string, []string, []string) {
+	capabilities := []string{}
+	gpu := []string{}
+	tpu := []string{}
+	for _, service := range p.Services {
+		deploy := service.Deploy
+		if deploy == nil {
+			continue
+		}
+		reservation := deploy.Resources.Reservations
+		if reservation == nil {
+			continue
+		}
+		devices := reservation.Devices
+		for _, d := range devices {
+			if len(d.Capabilities) > 0 {
+				capabilities = append(capabilities, service.Name)
+			}
+			for _, c := range d.Capabilities {
+				if c == "gpu" {
+					gpu = append(gpu, service.Name)
+				} else if c == "tpu" {
+					tpu = append(tpu, service.Name)
+				}
+			}
+		}
+	}
+
+	return utils.RemoveDuplicates(capabilities), utils.RemoveDuplicates(gpu), utils.RemoveDuplicates(tpu)
+}
+
+// GetServices retrieve services by names, or return all services if no name specified
+func (p *Project) GetServices(names ...string) (Services, error) {
+	if len(names) == 0 {
+		return p.Services, nil
+	}
+	services := Services{}
+	for _, name := range names {
+		service, err := p.GetService(name)
+		if err != nil {
+			return nil, err
+		}
+		services[name] = service
+	}
+	return services, nil
+}
+
+func (p *Project) getServicesByNames(names ...string) (Services, []string) {
+	if len(names) == 0 {
+		return p.Services, nil
+	}
+	services := Services{}
+	var servicesNotFound []string
+	for _, name := range names {
+		service, ok := p.Services[name]
+		if !ok {
+			servicesNotFound = append(servicesNotFound, name)
+			continue
+		}
+		services[name] = service
+	}
+	return services, servicesNotFound
+}
+
+// GetDisabledService retrieve disabled service by name
+func (p Project) GetDisabledService(name string) (ServiceConfig, error) {
+	service, ok := p.DisabledServices[name]
+	if !ok {
+		return ServiceConfig{}, fmt.Errorf("no such service: %s", name)
+	}
+	return service, nil
+}
+
+// GetService retrieve a specific service by name
+func (p *Project) GetService(name string) (ServiceConfig, error) {
+	service, ok := p.Services[name]
+	if !ok {
+		_, ok := p.DisabledServices[name]
+		if ok {
+			return ServiceConfig{}, fmt.Errorf("no such service: %s: %w", name, errdefs.ErrDisabled)
+		}
+		return ServiceConfig{}, fmt.Errorf("no such service: %s: %w", name, errdefs.ErrNotFound)
+	}
+	return service, nil
+}
+
+func (p *Project) AllServices() Services {
+	all := Services{}
+	for name, service := range p.Services {
+		all[name] = service
+	}
+	for name, service := range p.DisabledServices {
+		all[name] = service
+	}
+	return all
+}
+
+type ServiceFunc func(name string, service *ServiceConfig) error
+
+// ForEachService runs ServiceFunc on each service and dependencies according to DependencyPolicy
+func (p *Project) ForEachService(names []string, fn ServiceFunc, options ...DependencyOption) error {
+	if len(options) == 0 {
+		// backward compatibility
+		options = []DependencyOption{IncludeDependencies}
+	}
+	return p.withServices(names, fn, map[string]bool{}, options, map[string]ServiceDependency{})
+}
+
+type withServicesOptions struct {
+	dependencyPolicy int
+}
+
+const (
+	includeDependencies = iota
+	includeDependents
+	ignoreDependencies
+)
+
+func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]bool, options []DependencyOption, dependencies map[string]ServiceDependency) error {
+	services, servicesNotFound := p.getServicesByNames(names...)
+	if len(servicesNotFound) > 0 {
+		for _, serviceNotFound := range servicesNotFound {
+			if dependency, ok := dependencies[serviceNotFound]; !ok || dependency.Required {
+				return fmt.Errorf("no such service: %s", serviceNotFound)
+			}
+		}
+	}
+	opts := withServicesOptions{
+		dependencyPolicy: includeDependencies,
+	}
+	for _, option := range options {
+		option(&opts)
+	}
+
+	for name, service := range services {
+		if seen[name] {
+			continue
+		}
+		seen[name] = true
+		var dependencies map[string]ServiceDependency
+		switch opts.dependencyPolicy {
+		case includeDependents:
+			dependencies = utils.MapsAppend(dependencies, p.dependentsForService(service))
+		case includeDependencies:
+			dependencies = utils.MapsAppend(dependencies, service.DependsOn)
+		case ignoreDependencies:
+			// Noop
+		}
+		if len(dependencies) > 0 {
+			err := p.withServices(utils.MapKeys(dependencies), fn, seen, options, dependencies)
+			if err != nil {
+				return err
+			}
+		}
+		if err := fn(name, service.deepCopy()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *Project) GetDependentsForService(s ServiceConfig) []string {
+	return utils.MapKeys(p.dependentsForService(s))
+}
+
+func (p *Project) dependentsForService(s ServiceConfig) map[string]ServiceDependency {
+	dependent := make(map[string]ServiceDependency)
+	for _, service := range p.Services {
+		for name, dependency := range service.DependsOn {
+			if name == s.Name {
+				dependent[service.Name] = dependency
+			}
+		}
+	}
+	return dependent
+}
+
+// RelativePath resolve a relative path based project's working directory
+func (p *Project) RelativePath(path string) string {
+	if path[0] == '~' {
+		home, _ := os.UserHomeDir()
+		path = filepath.Join(home, path[1:])
+	}
+	if filepath.IsAbs(path) {
+		return path
+	}
+	return filepath.Join(p.WorkingDir, path)
+}
+
+// HasProfile return true if service has no profile declared or has at least one profile matching
+func (s ServiceConfig) HasProfile(profiles []string) bool {
+	if len(s.Profiles) == 0 {
+		return true
+	}
+	for _, p := range profiles {
+		if p == "*" {
+			return true
+		}
+		for _, sp := range s.Profiles {
+			if sp == p {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// WithProfiles disables services which don't match selected profiles
+// It returns a new Project instance with the changes and keep the original Project unchanged
+func (p *Project) WithProfiles(profiles []string) (*Project, error) {
+	newProject := p.deepCopy()
+	enabled := Services{}
+	disabled := Services{}
+	for name, service := range newProject.AllServices() {
+		if service.HasProfile(profiles) {
+			enabled[name] = service
+		} else {
+			disabled[name] = service
+		}
+	}
+	newProject.Services = enabled
+	newProject.DisabledServices = disabled
+	newProject.Profiles = profiles
+	return newProject, nil
+}
+
+// WithServicesEnabled ensures services are enabled and activate profiles accordingly
+// It returns a new Project instance with the changes and keep the original Project unchanged
+func (p *Project) WithServicesEnabled(names ...string) (*Project, error) {
+	newProject := p.deepCopy()
+	if len(names) == 0 {
+		return newProject, nil
+	}
+
+	profiles := append([]string{}, p.Profiles...)
+	for _, name := range names {
+		if _, ok := newProject.Services[name]; ok {
+			// already enabled
+			continue
+		}
+		service := p.DisabledServices[name]
+		profiles = append(profiles, service.Profiles...)
+	}
+	newProject, err := newProject.WithProfiles(profiles)
+	if err != nil {
+		return newProject, err
+	}
+
+	return newProject.WithServicesEnvironmentResolved(true)
+}
+
+// WithoutUnnecessaryResources drops networks/volumes/secrets/configs that are not referenced by active services
+// It returns a new Project instance with the changes and keep the original Project unchanged
+func (p *Project) WithoutUnnecessaryResources() *Project {
+	newProject := p.deepCopy()
+	requiredNetworks := map[string]struct{}{}
+	requiredVolumes := map[string]struct{}{}
+	requiredSecrets := map[string]struct{}{}
+	requiredConfigs := map[string]struct{}{}
+	for _, s := range newProject.Services {
+		for k := range s.Networks {
+			requiredNetworks[k] = struct{}{}
+		}
+		for _, v := range s.Volumes {
+			if v.Type != VolumeTypeVolume || v.Source == "" {
+				continue
+			}
+			requiredVolumes[v.Source] = struct{}{}
+		}
+		for _, v := range s.Secrets {
+			requiredSecrets[v.Source] = struct{}{}
+		}
+		if s.Build != nil {
+			for _, v := range s.Build.Secrets {
+				requiredSecrets[v.Source] = struct{}{}
+			}
+		}
+		for _, v := range s.Configs {
+			requiredConfigs[v.Source] = struct{}{}
+		}
+	}
+
+	networks := Networks{}
+	for k := range requiredNetworks {
+		if value, ok := p.Networks[k]; ok {
+			networks[k] = value
+		}
+	}
+	newProject.Networks = networks
+
+	volumes := Volumes{}
+	for k := range requiredVolumes {
+		if value, ok := p.Volumes[k]; ok {
+			volumes[k] = value
+		}
+	}
+	newProject.Volumes = volumes
+
+	secrets := Secrets{}
+	for k := range requiredSecrets {
+		if value, ok := p.Secrets[k]; ok {
+			secrets[k] = value
+		}
+	}
+	newProject.Secrets = secrets
+
+	configs := Configs{}
+	for k := range requiredConfigs {
+		if value, ok := p.Configs[k]; ok {
+			configs[k] = value
+		}
+	}
+	newProject.Configs = configs
+	return newProject
+}
+
+type DependencyOption func(options *withServicesOptions)
+
+func IncludeDependencies(options *withServicesOptions) {
+	options.dependencyPolicy = includeDependencies
+}
+
+func IncludeDependents(options *withServicesOptions) {
+	options.dependencyPolicy = includeDependents
+}
+
+func IgnoreDependencies(options *withServicesOptions) {
+	options.dependencyPolicy = ignoreDependencies
+}
+
+// WithSelectedServices restricts the project model to selected services and dependencies
+// It returns a new Project instance with the changes and keep the original Project unchanged
+func (p *Project) WithSelectedServices(names []string, options ...DependencyOption) (*Project, error) {
+	newProject := p.deepCopy()
+	if len(names) == 0 {
+		// All services
+		return newProject, nil
+	}
+
+	set := utils.NewSet[string]()
+	err := p.ForEachService(names, func(name string, service *ServiceConfig) error {
+		set.Add(name)
+		return nil
+	}, options...)
+	if err != nil {
+		return nil, err
+	}
+
+	// Disable all services which are not explicit target or dependencies
+	enabled := Services{}
+	for name, s := range newProject.Services {
+		if _, ok := set[name]; ok {
+			// remove all dependencies but those implied by explicitly selected services
+			dependencies := s.DependsOn
+			for d := range dependencies {
+				if _, ok := set[d]; !ok {
+					delete(dependencies, d)
+				}
+			}
+			s.DependsOn = dependencies
+			enabled[name] = s
+		} else {
+			newProject = newProject.WithServicesDisabled(name)
+		}
+	}
+	newProject.Services = enabled
+	return newProject, nil
+}
+
+// WithServicesDisabled removes from the project model the given services and their references in all dependencies
+// It returns a new Project instance with the changes and keep the original Project unchanged
+func (p *Project) WithServicesDisabled(names ...string) *Project {
+	newProject := p.deepCopy()
+	if len(names) == 0 {
+		return newProject
+	}
+	if newProject.DisabledServices == nil {
+		newProject.DisabledServices = Services{}
+	}
+	for _, name := range names {
+		// We should remove all dependencies which reference the disabled service
+		for i, s := range newProject.Services {
+			if _, ok := s.DependsOn[name]; ok {
+				delete(s.DependsOn, name)
+				newProject.Services[i] = s
+			}
+		}
+		if service, ok := newProject.Services[name]; ok {
+			newProject.DisabledServices[name] = service
+			delete(newProject.Services, name)
+		}
+	}
+	return newProject
+}
+
+// WithImagesResolved updates services images to include digest computed by a resolver function
+// It returns a new Project instance with the changes and keep the original Project unchanged
+func (p *Project) WithImagesResolved(resolver func(named reference.Named) (godigest.Digest, error)) (*Project, error) {
+	return p.WithServicesTransform(func(name string, service ServiceConfig) (ServiceConfig, error) {
+		if service.Image == "" {
+			return service, nil
+		}
+		named, err := reference.ParseDockerRef(service.Image)
+		if err != nil {
+			return service, err
+		}
+
+		if _, ok := named.(reference.Canonical); !ok {
+			// image is named but not digested reference
+			digest, err := resolver(named)
+			if err != nil {
+				return service, err
+			}
+			named, err = reference.WithDigest(named, digest)
+			if err != nil {
+				return service, err
+			}
+		}
+		service.Image = named.String()
+		return service, nil
+	})
+}
+
+type marshallOptions struct {
+	secretsContent bool
+}
+
+func WithSecretContent(o *marshallOptions) {
+	o.secretsContent = true
+}
+
+func (opt *marshallOptions) apply(p *Project) *Project {
+	if opt.secretsContent {
+		p = p.deepCopy()
+		for name, config := range p.Secrets {
+			config.marshallContent = true
+			p.Secrets[name] = config
+		}
+	}
+	return p
+}
+
+func applyMarshallOptions(p *Project, options ...func(*marshallOptions)) *Project {
+	opts := &marshallOptions{}
+	for _, option := range options {
+		option(opts)
+	}
+	p = opts.apply(p)
+	return p
+}
+
+// MarshalYAML marshal Project into a yaml tree
+func (p *Project) MarshalYAML(options ...func(*marshallOptions)) ([]byte, error) {
+	buf := bytes.NewBuffer([]byte{})
+	encoder := yaml.NewEncoder(buf)
+	encoder.SetIndent(2)
+	// encoder.CompactSeqIndent() FIXME https://github.com/go-yaml/yaml/pull/753
+	src := applyMarshallOptions(p, options...)
+	err := encoder.Encode(src)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// MarshalJSON marshal Project into a json document
+func (p *Project) MarshalJSON(options ...func(*marshallOptions)) ([]byte, error) {
+	src := applyMarshallOptions(p, options...)
+	m := map[string]interface{}{
+		"name":     src.Name,
+		"services": src.Services,
+	}
+
+	if len(src.Networks) > 0 {
+		m["networks"] = src.Networks
+	}
+	if len(src.Volumes) > 0 {
+		m["volumes"] = src.Volumes
+	}
+	if len(src.Secrets) > 0 {
+		m["secrets"] = src.Secrets
+	}
+	if len(src.Configs) > 0 {
+		m["configs"] = src.Configs
+	}
+	for k, v := range src.Extensions {
+		m[k] = v
+	}
+	return json.MarshalIndent(m, "", "  ")
+}
+
+// WithServicesEnvironmentResolved parses env_files set for services to resolve the actual environment map for services
+// It returns a new Project instance with the changes and keep the original Project unchanged
+func (p Project) WithServicesEnvironmentResolved(discardEnvFiles bool) (*Project, error) {
+	newProject := p.deepCopy()
+	for i, service := range newProject.Services {
+		service.Environment = service.Environment.Resolve(newProject.Environment.Resolve)
+
+		environment := MappingWithEquals{}
+		// resolve variables based on other files we already parsed, + project's environment
+		var resolve dotenv.LookupFn = func(s string) (string, bool) {
+			v, ok := environment[s]
+			if ok && v != nil {
+				return *v, ok
+			}
+			return newProject.Environment.Resolve(s)
+		}
+
+		for _, envFile := range service.EnvFiles {
+			vars, err := loadEnvFile(envFile, resolve)
+			if err != nil {
+				return nil, err
+			}
+			environment.OverrideBy(vars.ToMappingWithEquals())
+		}
+
+		service.Environment = environment.OverrideBy(service.Environment)
+
+		if discardEnvFiles {
+			service.EnvFiles = nil
+		}
+		newProject.Services[i] = service
+	}
+	return newProject, nil
+}
+
+// WithServicesLabelsResolved parses label_files set for services to resolve the actual label map for services
+// It returns a new Project instance with the changes and keep the original Project unchanged
+func (p Project) WithServicesLabelsResolved(discardLabelFiles bool) (*Project, error) {
+	newProject := p.deepCopy()
+	for i, service := range newProject.Services {
+		labels := MappingWithEquals{}
+		// resolve variables based on other files we already parsed
+		var resolve dotenv.LookupFn = func(s string) (string, bool) {
+			v, ok := labels[s]
+			if ok && v != nil {
+				return *v, ok
+			}
+			return "", false
+		}
+
+		for _, labelFile := range service.LabelFiles {
+			vars, err := loadLabelFile(labelFile, resolve)
+			if err != nil {
+				return nil, err
+			}
+			labels.OverrideBy(vars.ToMappingWithEquals())
+		}
+
+		labels = labels.OverrideBy(service.Labels.ToMappingWithEquals())
+		if len(labels) == 0 {
+			labels = nil
+		} else {
+			service.Labels = NewLabelsFromMappingWithEquals(labels)
+		}
+
+		if discardLabelFiles {
+			service.LabelFiles = nil
+		}
+		newProject.Services[i] = service
+	}
+	return newProject, nil
+}
+
+func loadEnvFile(envFile EnvFile, resolve dotenv.LookupFn) (Mapping, error) {
+	if _, err := os.Stat(envFile.Path); os.IsNotExist(err) {
+		if envFile.Required {
+			return nil, fmt.Errorf("env file %s not found: %w", envFile.Path, err)
+		}
+		return nil, nil
+	}
+
+	return loadMappingFile(envFile.Path, envFile.Format, resolve)
+}
+
+func loadLabelFile(labelFile string, resolve dotenv.LookupFn) (Mapping, error) {
+	if _, err := os.Stat(labelFile); os.IsNotExist(err) {
+		return nil, fmt.Errorf("label file %s not found: %w", labelFile, err)
+	}
+
+	return loadMappingFile(labelFile, "", resolve)
+}
+
+func loadMappingFile(path string, format string, resolve dotenv.LookupFn) (Mapping, error) {
+	file, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close() //nolint:errcheck
+
+	var fileVars map[string]string
+	if format != "" {
+		fileVars, err = dotenv.ParseWithFormat(file, path, resolve, format)
+	} else {
+		fileVars, err = dotenv.ParseWithLookup(file, resolve)
+	}
+	if err != nil {
+		return nil, err
+	}
+	return fileVars, nil
+}
+
+func (p *Project) deepCopy() *Project {
+	if p == nil {
+		return nil
+	}
+	n := &Project{}
+	deriveDeepCopyProject(n, p)
+	return n
+
+}
+
+// WithServicesTransform applies a transformation to project services and return a new project with transformation results
+func (p *Project) WithServicesTransform(fn func(name string, s ServiceConfig) (ServiceConfig, error)) (*Project, error) {
+	type result struct {
+		name    string
+		service ServiceConfig
+	}
+	expect := len(p.Services)
+	resultCh := make(chan result, expect)
+	newProject := p.deepCopy()
+
+	eg, ctx := errgroup.WithContext(context.Background())
+	eg.Go(func() error {
+		s := Services{}
+		for expect > 0 {
+			select {
+			case <-ctx.Done():
+				// interrupted as some goroutine returned an error
+				return nil
+			case r := <-resultCh:
+				s[r.name] = r.service
+				expect--
+			}
+		}
+		newProject.Services = s
+		return nil
+	})
+	for n, s := range newProject.Services {
+		name := n
+		service := s
+		eg.Go(func() error {
+			updated, err := fn(name, service)
+			if err != nil {
+				return err
+			}
+			resultCh <- result{
+				name:    name,
+				service: updated,
+			}
+			return nil
+		})
+	}
+	return newProject, eg.Wait()
+}
+
+// CheckContainerNameUnicity validate project doesn't have services declaring the same container_name
+func (p *Project) CheckContainerNameUnicity() error {
+	names := utils.Set[string]{}
+	for name, s := range p.Services {
+		if s.ContainerName != "" {
+			if existing, ok := names[s.ContainerName]; ok {
+				return fmt.Errorf(`services.%s: container name %q is already in use by service %s"`, name, s.ContainerName, existing)
+			}
+			names.Add(s.ContainerName)
+		}
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/services.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/services.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/services.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/services.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,45 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+// Services is a map of ServiceConfig
+type Services map[string]ServiceConfig
+
+// GetProfiles retrieve the profiles implicitly enabled by explicitly targeting selected services
+func (s Services) GetProfiles() []string {
+	set := map[string]struct{}{}
+	for _, service := range s {
+		for _, p := range service.Profiles {
+			set[p] = struct{}{}
+		}
+	}
+	var profiles []string
+	for k := range set {
+		profiles = append(profiles, k)
+	}
+	return profiles
+}
+
+func (s Services) Filter(predicate func(ServiceConfig) bool) Services {
+	services := Services{}
+	for name, service := range s {
+		if predicate(service) {
+			services[name] = service
+		}
+	}
+	return services
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,73 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+)
+
+type SSHKey struct {
+	ID   string `yaml:"id,omitempty" json:"id,omitempty"`
+	Path string `path:"path,omitempty" json:"path,omitempty"`
+}
+
+// SSHConfig is a mapping type for SSH build config
+type SSHConfig []SSHKey
+
+func (s SSHConfig) Get(id string) (string, error) {
+	for _, sshKey := range s {
+		if sshKey.ID == id {
+			return sshKey.Path, nil
+		}
+	}
+	return "", fmt.Errorf("ID %s not found in SSH keys", id)
+}
+
+// MarshalYAML makes SSHKey implement yaml.Marshaller
+func (s SSHKey) MarshalYAML() (interface{}, error) {
+	if s.Path == "" {
+		return s.ID, nil
+	}
+	return fmt.Sprintf("%s: %s", s.ID, s.Path), nil
+}
+
+// MarshalJSON makes SSHKey implement json.Marshaller
+func (s SSHKey) MarshalJSON() ([]byte, error) {
+	if s.Path == "" {
+		return []byte(fmt.Sprintf(`%q`, s.ID)), nil
+	}
+	return []byte(fmt.Sprintf(`%q: %s`, s.ID, s.Path)), nil
+}
+
+func (s *SSHConfig) DecodeMapstructure(value interface{}) error {
+	v, ok := value.(map[string]any)
+	if !ok {
+		return fmt.Errorf("invalid ssh config type %T", value)
+	}
+	result := make(SSHConfig, len(v))
+	i := 0
+	for id, path := range v {
+		key := SSHKey{ID: id}
+		if path != nil {
+			key.Path = fmt.Sprint(path)
+		}
+		result[i] = key
+		i++
+	}
+	*s = result
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,61 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import "fmt"
+
+// StringList is a type for fields that can be a string or list of strings
+type StringList []string
+
+func (l *StringList) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case string:
+		*l = []string{v}
+	case []interface{}:
+		list := make([]string, len(v))
+		for i, e := range v {
+			val, ok := e.(string)
+			if !ok {
+				return fmt.Errorf("invalid type %T for string list", value)
+			}
+			list[i] = val
+		}
+		*l = list
+	default:
+		return fmt.Errorf("invalid type %T for string list", value)
+	}
+	return nil
+}
+
+// StringOrNumberList is a type for fields that can be a list of strings or numbers
+type StringOrNumberList []string
+
+func (l *StringOrNumberList) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case string:
+		*l = []string{v}
+	case []interface{}:
+		list := make([]string, len(v))
+		for i, e := range v {
+			list[i] = fmt.Sprint(e)
+		}
+		*l = list
+	default:
+		return fmt.Errorf("invalid type %T for string list", value)
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/types.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/types.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/types/types.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/types/types.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,821 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package types
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"strings"
+
+	"github.com/docker/go-connections/nat"
+)
+
+// ServiceConfig is the configuration of one service
+type ServiceConfig struct {
+	Name     string   `yaml:"name,omitempty" json:"-"`
+	Profiles []string `yaml:"profiles,omitempty" json:"profiles,omitempty"`
+
+	Annotations  Mapping        `yaml:"annotations,omitempty" json:"annotations,omitempty"`
+	Attach       *bool          `yaml:"attach,omitempty" json:"attach,omitempty"`
+	Build        *BuildConfig   `yaml:"build,omitempty" json:"build,omitempty"`
+	Develop      *DevelopConfig `yaml:"develop,omitempty" json:"develop,omitempty"`
+	BlkioConfig  *BlkioConfig   `yaml:"blkio_config,omitempty" json:"blkio_config,omitempty"`
+	CapAdd       []string       `yaml:"cap_add,omitempty" json:"cap_add,omitempty"`
+	CapDrop      []string       `yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"`
+	CgroupParent string         `yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"`
+	Cgroup       string         `yaml:"cgroup,omitempty" json:"cgroup,omitempty"`
+	CPUCount     int64          `yaml:"cpu_count,omitempty" json:"cpu_count,omitempty"`
+	CPUPercent   float32        `yaml:"cpu_percent,omitempty" json:"cpu_percent,omitempty"`
+	CPUPeriod    int64          `yaml:"cpu_period,omitempty" json:"cpu_period,omitempty"`
+	CPUQuota     int64          `yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"`
+	CPURTPeriod  int64          `yaml:"cpu_rt_period,omitempty" json:"cpu_rt_period,omitempty"`
+	CPURTRuntime int64          `yaml:"cpu_rt_runtime,omitempty" json:"cpu_rt_runtime,omitempty"`
+	CPUS         float32        `yaml:"cpus,omitempty" json:"cpus,omitempty"`
+	CPUSet       string         `yaml:"cpuset,omitempty" json:"cpuset,omitempty"`
+	CPUShares    int64          `yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"`
+
+	// Command for the service containers.
+	// If set, overrides COMMAND from the image.
+	//
+	// Set to `[]` or an empty string to clear the command from the image.
+	Command ShellCommand `yaml:"command,omitempty" json:"command"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details.
+
+	Configs           []ServiceConfigObjConfig `yaml:"configs,omitempty" json:"configs,omitempty"`
+	ContainerName     string                   `yaml:"container_name,omitempty" json:"container_name,omitempty"`
+	CredentialSpec    *CredentialSpecConfig    `yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"`
+	DependsOn         DependsOnConfig          `yaml:"depends_on,omitempty" json:"depends_on,omitempty"`
+	Deploy            *DeployConfig            `yaml:"deploy,omitempty" json:"deploy,omitempty"`
+	DeviceCgroupRules []string                 `yaml:"device_cgroup_rules,omitempty" json:"device_cgroup_rules,omitempty"`
+	Devices           []DeviceMapping          `yaml:"devices,omitempty" json:"devices,omitempty"`
+	DNS               StringList               `yaml:"dns,omitempty" json:"dns,omitempty"`
+	DNSOpts           []string                 `yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"`
+	DNSSearch         StringList               `yaml:"dns_search,omitempty" json:"dns_search,omitempty"`
+	Dockerfile        string                   `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"`
+	DomainName        string                   `yaml:"domainname,omitempty" json:"domainname,omitempty"`
+
+	// Entrypoint for the service containers.
+	// If set, overrides ENTRYPOINT from the image.
+	//
+	// Set to `[]` or an empty string to clear the entrypoint from the image.
+	Entrypoint ShellCommand `yaml:"entrypoint,omitempty" json:"entrypoint"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details.
+
+	Environment     MappingWithEquals                `yaml:"environment,omitempty" json:"environment,omitempty"`
+	EnvFiles        []EnvFile                        `yaml:"env_file,omitempty" json:"env_file,omitempty"`
+	Expose          StringOrNumberList               `yaml:"expose,omitempty" json:"expose,omitempty"`
+	Extends         *ExtendsConfig                   `yaml:"extends,omitempty" json:"extends,omitempty"`
+	ExternalLinks   []string                         `yaml:"external_links,omitempty" json:"external_links,omitempty"`
+	ExtraHosts      HostsList                        `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
+	GroupAdd        []string                         `yaml:"group_add,omitempty" json:"group_add,omitempty"`
+	Gpus            []DeviceRequest                  `yaml:"gpus,omitempty" json:"gpus,omitempty"`
+	Hostname        string                           `yaml:"hostname,omitempty" json:"hostname,omitempty"`
+	HealthCheck     *HealthCheckConfig               `yaml:"healthcheck,omitempty" json:"healthcheck,omitempty"`
+	Image           string                           `yaml:"image,omitempty" json:"image,omitempty"`
+	Init            *bool                            `yaml:"init,omitempty" json:"init,omitempty"`
+	Ipc             string                           `yaml:"ipc,omitempty" json:"ipc,omitempty"`
+	Isolation       string                           `yaml:"isolation,omitempty" json:"isolation,omitempty"`
+	Labels          Labels                           `yaml:"labels,omitempty" json:"labels,omitempty"`
+	LabelFiles      []string                         `yaml:"label_file,omitempty" json:"label_file,omitempty"`
+	CustomLabels    Labels                           `yaml:"-" json:"-"`
+	Links           []string                         `yaml:"links,omitempty" json:"links,omitempty"`
+	Logging         *LoggingConfig                   `yaml:"logging,omitempty" json:"logging,omitempty"`
+	LogDriver       string                           `yaml:"log_driver,omitempty" json:"log_driver,omitempty"`
+	LogOpt          map[string]string                `yaml:"log_opt,omitempty" json:"log_opt,omitempty"`
+	MemLimit        UnitBytes                        `yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"`
+	MemReservation  UnitBytes                        `yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"`
+	MemSwapLimit    UnitBytes                        `yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"`
+	MemSwappiness   UnitBytes                        `yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"`
+	MacAddress      string                           `yaml:"mac_address,omitempty" json:"mac_address,omitempty"`
+	Net             string                           `yaml:"net,omitempty" json:"net,omitempty"`
+	NetworkMode     string                           `yaml:"network_mode,omitempty" json:"network_mode,omitempty"`
+	Networks        map[string]*ServiceNetworkConfig `yaml:"networks,omitempty" json:"networks,omitempty"`
+	OomKillDisable  bool                             `yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"`
+	OomScoreAdj     int64                            `yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"`
+	Pid             string                           `yaml:"pid,omitempty" json:"pid,omitempty"`
+	PidsLimit       int64                            `yaml:"pids_limit,omitempty" json:"pids_limit,omitempty"`
+	Platform        string                           `yaml:"platform,omitempty" json:"platform,omitempty"`
+	Ports           []ServicePortConfig              `yaml:"ports,omitempty" json:"ports,omitempty"`
+	Privileged      bool                             `yaml:"privileged,omitempty" json:"privileged,omitempty"`
+	PullPolicy      string                           `yaml:"pull_policy,omitempty" json:"pull_policy,omitempty"`
+	ReadOnly        bool                             `yaml:"read_only,omitempty" json:"read_only,omitempty"`
+	Restart         string                           `yaml:"restart,omitempty" json:"restart,omitempty"`
+	Runtime         string                           `yaml:"runtime,omitempty" json:"runtime,omitempty"`
+	Scale           *int                             `yaml:"scale,omitempty" json:"scale,omitempty"`
+	Secrets         []ServiceSecretConfig            `yaml:"secrets,omitempty" json:"secrets,omitempty"`
+	SecurityOpt     []string                         `yaml:"security_opt,omitempty" json:"security_opt,omitempty"`
+	ShmSize         UnitBytes                        `yaml:"shm_size,omitempty" json:"shm_size,omitempty"`
+	StdinOpen       bool                             `yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"`
+	StopGracePeriod *Duration                        `yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"`
+	StopSignal      string                           `yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"`
+	StorageOpt      map[string]string                `yaml:"storage_opt,omitempty" json:"storage_opt,omitempty"`
+	Sysctls         Mapping                          `yaml:"sysctls,omitempty" json:"sysctls,omitempty"`
+	Tmpfs           StringList                       `yaml:"tmpfs,omitempty" json:"tmpfs,omitempty"`
+	Tty             bool                             `yaml:"tty,omitempty" json:"tty,omitempty"`
+	Ulimits         map[string]*UlimitsConfig        `yaml:"ulimits,omitempty" json:"ulimits,omitempty"`
+	User            string                           `yaml:"user,omitempty" json:"user,omitempty"`
+	UserNSMode      string                           `yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"`
+	Uts             string                           `yaml:"uts,omitempty" json:"uts,omitempty"`
+	VolumeDriver    string                           `yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"`
+	Volumes         []ServiceVolumeConfig            `yaml:"volumes,omitempty" json:"volumes,omitempty"`
+	VolumesFrom     []string                         `yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"`
+	WorkingDir      string                           `yaml:"working_dir,omitempty" json:"working_dir,omitempty"`
+	PostStart       []ServiceHook                    `yaml:"post_start,omitempty" json:"post_start,omitempty"`
+	PreStop         []ServiceHook                    `yaml:"pre_stop,omitempty" json:"pre_stop,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// MarshalYAML makes ServiceConfig implement yaml.Marshaller
+func (s ServiceConfig) MarshalYAML() (interface{}, error) {
+	type t ServiceConfig
+	value := t(s)
+	value.Name = "" // set during map to slice conversion, not part of the yaml representation
+	return value, nil
+}
+
+// NetworksByPriority return the service networks IDs sorted according to Priority
+func (s *ServiceConfig) NetworksByPriority() []string {
+	type key struct {
+		name     string
+		priority int
+	}
+	var keys []key
+	for k, v := range s.Networks {
+		priority := 0
+		if v != nil {
+			priority = v.Priority
+		}
+		keys = append(keys, key{
+			name:     k,
+			priority: priority,
+		})
+	}
+	sort.Slice(keys, func(i, j int) bool {
+		if keys[i].priority == keys[j].priority {
+			return keys[i].name < keys[j].name
+		}
+		return keys[i].priority > keys[j].priority
+	})
+	var sorted []string
+	for _, k := range keys {
+		sorted = append(sorted, k.name)
+	}
+	return sorted
+}
+
+func (s *ServiceConfig) GetScale() int {
+	if s.Scale != nil {
+		return *s.Scale
+	}
+	if s.Deploy != nil && s.Deploy.Replicas != nil {
+		// this should not be required as compose-go enforce consistency between scale anr replicas
+		return *s.Deploy.Replicas
+	}
+	return 1
+}
+
+func (s *ServiceConfig) SetScale(scale int) {
+	s.Scale = &scale
+	if s.Deploy != nil {
+		s.Deploy.Replicas = &scale
+	}
+}
+
+func (s *ServiceConfig) deepCopy() *ServiceConfig {
+	if s == nil {
+		return nil
+	}
+	n := &ServiceConfig{}
+	deriveDeepCopyService(n, s)
+	return n
+}
+
+const (
+	// PullPolicyAlways always pull images
+	PullPolicyAlways = "always"
+	// PullPolicyNever never pull images
+	PullPolicyNever = "never"
+	// PullPolicyIfNotPresent pull missing images
+	PullPolicyIfNotPresent = "if_not_present"
+	// PullPolicyMissing pull missing images
+	PullPolicyMissing = "missing"
+	// PullPolicyBuild force building images
+	PullPolicyBuild = "build"
+)
+
+const (
+	// RestartPolicyAlways always restart the container if it stops
+	RestartPolicyAlways = "always"
+	// RestartPolicyOnFailure restart the container if it exits due to an error
+	RestartPolicyOnFailure = "on-failure"
+	// RestartPolicyNo do not automatically restart the container
+	RestartPolicyNo = "no"
+	// RestartPolicyUnlessStopped always restart the container unless the container is stopped (manually or otherwise)
+	RestartPolicyUnlessStopped = "unless-stopped"
+)
+
+const (
+	// ServicePrefix is the prefix for references pointing to a service
+	ServicePrefix = "service:"
+	// ContainerPrefix is the prefix for references pointing to a container
+	ContainerPrefix = "container:"
+
+	// NetworkModeServicePrefix is the prefix for network_mode pointing to a service
+	// Deprecated prefer ServicePrefix
+	NetworkModeServicePrefix = ServicePrefix
+	// NetworkModeContainerPrefix is the prefix for network_mode pointing to a container
+	// Deprecated prefer ContainerPrefix
+	NetworkModeContainerPrefix = ContainerPrefix
+)
+
+const (
+	SecretConfigXValue = "x-#value"
+)
+
+// GetDependencies retrieves all services this service depends on
+func (s ServiceConfig) GetDependencies() []string {
+	var dependencies []string
+	for service := range s.DependsOn {
+		dependencies = append(dependencies, service)
+	}
+	return dependencies
+}
+
+// GetDependents retrieves all services which depend on this service
+func (s ServiceConfig) GetDependents(p *Project) []string {
+	var dependent []string
+	for _, service := range p.Services {
+		for name := range service.DependsOn {
+			if name == s.Name {
+				dependent = append(dependent, service.Name)
+			}
+		}
+	}
+	return dependent
+}
+
+// BuildConfig is a type for build
+type BuildConfig struct {
+	Context            string                    `yaml:"context,omitempty" json:"context,omitempty"`
+	Dockerfile         string                    `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"`
+	DockerfileInline   string                    `yaml:"dockerfile_inline,omitempty" json:"dockerfile_inline,omitempty"`
+	Entitlements       []string                  `yaml:"entitlements,omitempty" json:"entitlements,omitempty"`
+	Args               MappingWithEquals         `yaml:"args,omitempty" json:"args,omitempty"`
+	SSH                SSHConfig                 `yaml:"ssh,omitempty" json:"ssh,omitempty"`
+	Labels             Labels                    `yaml:"labels,omitempty" json:"labels,omitempty"`
+	CacheFrom          StringList                `yaml:"cache_from,omitempty" json:"cache_from,omitempty"`
+	CacheTo            StringList                `yaml:"cache_to,omitempty" json:"cache_to,omitempty"`
+	NoCache            bool                      `yaml:"no_cache,omitempty" json:"no_cache,omitempty"`
+	AdditionalContexts Mapping                   `yaml:"additional_contexts,omitempty" json:"additional_contexts,omitempty"`
+	Pull               bool                      `yaml:"pull,omitempty" json:"pull,omitempty"`
+	ExtraHosts         HostsList                 `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
+	Isolation          string                    `yaml:"isolation,omitempty" json:"isolation,omitempty"`
+	Network            string                    `yaml:"network,omitempty" json:"network,omitempty"`
+	Target             string                    `yaml:"target,omitempty" json:"target,omitempty"`
+	Secrets            []ServiceSecretConfig     `yaml:"secrets,omitempty" json:"secrets,omitempty"`
+	ShmSize            UnitBytes                 `yaml:"shm_size,omitempty" json:"shm_size,omitempty"`
+	Tags               StringList                `yaml:"tags,omitempty" json:"tags,omitempty"`
+	Ulimits            map[string]*UlimitsConfig `yaml:"ulimits,omitempty" json:"ulimits,omitempty"`
+	Platforms          StringList                `yaml:"platforms,omitempty" json:"platforms,omitempty"`
+	Privileged         bool                      `yaml:"privileged,omitempty" json:"privileged,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// BlkioConfig define blkio config
+type BlkioConfig struct {
+	Weight          uint16           `yaml:"weight,omitempty" json:"weight,omitempty"`
+	WeightDevice    []WeightDevice   `yaml:"weight_device,omitempty" json:"weight_device,omitempty"`
+	DeviceReadBps   []ThrottleDevice `yaml:"device_read_bps,omitempty" json:"device_read_bps,omitempty"`
+	DeviceReadIOps  []ThrottleDevice `yaml:"device_read_iops,omitempty" json:"device_read_iops,omitempty"`
+	DeviceWriteBps  []ThrottleDevice `yaml:"device_write_bps,omitempty" json:"device_write_bps,omitempty"`
+	DeviceWriteIOps []ThrottleDevice `yaml:"device_write_iops,omitempty" json:"device_write_iops,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+type DeviceMapping struct {
+	Source      string `yaml:"source,omitempty" json:"source,omitempty"`
+	Target      string `yaml:"target,omitempty" json:"target,omitempty"`
+	Permissions string `yaml:"permissions,omitempty" json:"permissions,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// WeightDevice is a structure that holds device:weight pair
+type WeightDevice struct {
+	Path   string
+	Weight uint16
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// ThrottleDevice is a structure that holds device:rate_per_second pair
+type ThrottleDevice struct {
+	Path string
+	Rate UnitBytes
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// MappingWithColon is a mapping type that can be converted from a list of
+// 'key: value' strings
+type MappingWithColon map[string]string
+
+// LoggingConfig the logging configuration for a service
+type LoggingConfig struct {
+	Driver  string  `yaml:"driver,omitempty" json:"driver,omitempty"`
+	Options Options `yaml:"options,omitempty" json:"options,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// DeployConfig the deployment configuration for a service
+type DeployConfig struct {
+	Mode           string         `yaml:"mode,omitempty" json:"mode,omitempty"`
+	Replicas       *int           `yaml:"replicas,omitempty" json:"replicas,omitempty"`
+	Labels         Labels         `yaml:"labels,omitempty" json:"labels,omitempty"`
+	UpdateConfig   *UpdateConfig  `yaml:"update_config,omitempty" json:"update_config,omitempty"`
+	RollbackConfig *UpdateConfig  `yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"`
+	Resources      Resources      `yaml:"resources,omitempty" json:"resources,omitempty"`
+	RestartPolicy  *RestartPolicy `yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"`
+	Placement      Placement      `yaml:"placement,omitempty" json:"placement,omitempty"`
+	EndpointMode   string         `yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// UpdateConfig the service update configuration
+type UpdateConfig struct {
+	Parallelism     *uint64  `yaml:"parallelism,omitempty" json:"parallelism,omitempty"`
+	Delay           Duration `yaml:"delay,omitempty" json:"delay,omitempty"`
+	FailureAction   string   `yaml:"failure_action,omitempty" json:"failure_action,omitempty"`
+	Monitor         Duration `yaml:"monitor,omitempty" json:"monitor,omitempty"`
+	MaxFailureRatio float32  `yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"`
+	Order           string   `yaml:"order,omitempty" json:"order,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// Resources the resource limits and reservations
+type Resources struct {
+	Limits       *Resource `yaml:"limits,omitempty" json:"limits,omitempty"`
+	Reservations *Resource `yaml:"reservations,omitempty" json:"reservations,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// Resource is a resource to be limited or reserved
+type Resource struct {
+	// TODO: types to convert from units and ratios
+	NanoCPUs         NanoCPUs          `yaml:"cpus,omitempty" json:"cpus,omitempty"`
+	MemoryBytes      UnitBytes         `yaml:"memory,omitempty" json:"memory,omitempty"`
+	Pids             int64             `yaml:"pids,omitempty" json:"pids,omitempty"`
+	Devices          []DeviceRequest   `yaml:"devices,omitempty" json:"devices,omitempty"`
+	GenericResources []GenericResource `yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// GenericResource represents a "user defined" resource which can
+// only be an integer (e.g: SSD=3) for a service
+type GenericResource struct {
+	DiscreteResourceSpec *DiscreteGenericResource `yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// DiscreteGenericResource represents a "user defined" resource which is defined
+// as an integer
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to count the resource (SSD=5, HDD=3, ...)
+type DiscreteGenericResource struct {
+	Kind  string `json:"kind"`
+	Value int64  `json:"value"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// RestartPolicy the service restart policy
+type RestartPolicy struct {
+	Condition   string    `yaml:"condition,omitempty" json:"condition,omitempty"`
+	Delay       *Duration `yaml:"delay,omitempty" json:"delay,omitempty"`
+	MaxAttempts *uint64   `yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"`
+	Window      *Duration `yaml:"window,omitempty" json:"window,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// Placement constraints for the service
+type Placement struct {
+	Constraints []string               `yaml:"constraints,omitempty" json:"constraints,omitempty"`
+	Preferences []PlacementPreferences `yaml:"preferences,omitempty" json:"preferences,omitempty"`
+	MaxReplicas uint64                 `yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// PlacementPreferences is the preferences for a service placement
+type PlacementPreferences struct {
+	Spread string `yaml:"spread,omitempty" json:"spread,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// ServiceNetworkConfig is the network configuration for a service
+type ServiceNetworkConfig struct {
+	Priority     int      `yaml:"priority,omitempty" json:"priority,omitempty"`
+	Aliases      []string `yaml:"aliases,omitempty" json:"aliases,omitempty"`
+	Ipv4Address  string   `yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"`
+	Ipv6Address  string   `yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"`
+	LinkLocalIPs []string `yaml:"link_local_ips,omitempty" json:"link_local_ips,omitempty"`
+	MacAddress   string   `yaml:"mac_address,omitempty" json:"mac_address,omitempty"`
+	DriverOpts   Options  `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// ServicePortConfig is the port configuration for a service
+type ServicePortConfig struct {
+	Name        string `yaml:"name,omitempty" json:"name,omitempty"`
+	Mode        string `yaml:"mode,omitempty" json:"mode,omitempty"`
+	HostIP      string `yaml:"host_ip,omitempty" json:"host_ip,omitempty"`
+	Target      uint32 `yaml:"target,omitempty" json:"target,omitempty"`
+	Published   string `yaml:"published,omitempty" json:"published,omitempty"`
+	Protocol    string `yaml:"protocol,omitempty" json:"protocol,omitempty"`
+	AppProtocol string `yaml:"app_protocol,omitempty" json:"app_protocol,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// ParsePortConfig parse short syntax for service port configuration
+func ParsePortConfig(value string) ([]ServicePortConfig, error) {
+	var portConfigs []ServicePortConfig
+	ports, portBindings, err := nat.ParsePortSpecs([]string{value})
+	if err != nil {
+		return nil, err
+	}
+	// We need to sort the key of the ports to make sure it is consistent
+	keys := []string{}
+	for port := range ports {
+		keys = append(keys, string(port))
+	}
+	sort.Strings(keys)
+
+	for _, key := range keys {
+		port := nat.Port(key)
+		converted, err := convertPortToPortConfig(port, portBindings)
+		if err != nil {
+			return nil, err
+		}
+		portConfigs = append(portConfigs, converted...)
+	}
+	return portConfigs, nil
+}
+
+func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) ([]ServicePortConfig, error) {
+	var portConfigs []ServicePortConfig
+	for _, binding := range portBindings[port] {
+		portConfigs = append(portConfigs, ServicePortConfig{
+			HostIP:    binding.HostIP,
+			Protocol:  strings.ToLower(port.Proto()),
+			Target:    uint32(port.Int()),
+			Published: binding.HostPort,
+			Mode:      "ingress",
+		})
+	}
+	return portConfigs, nil
+}
+
+// ServiceVolumeConfig are references to a volume used by a service
+type ServiceVolumeConfig struct {
+	Type        string               `yaml:"type,omitempty" json:"type,omitempty"`
+	Source      string               `yaml:"source,omitempty" json:"source,omitempty"`
+	Target      string               `yaml:"target,omitempty" json:"target,omitempty"`
+	ReadOnly    bool                 `yaml:"read_only,omitempty" json:"read_only,omitempty"`
+	Consistency string               `yaml:"consistency,omitempty" json:"consistency,omitempty"`
+	Bind        *ServiceVolumeBind   `yaml:"bind,omitempty" json:"bind,omitempty"`
+	Volume      *ServiceVolumeVolume `yaml:"volume,omitempty" json:"volume,omitempty"`
+	Tmpfs       *ServiceVolumeTmpfs  `yaml:"tmpfs,omitempty" json:"tmpfs,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// String render ServiceVolumeConfig as a volume string, one can parse back using loader.ParseVolume
+func (s ServiceVolumeConfig) String() string {
+	access := "rw"
+	if s.ReadOnly {
+		access = "ro"
+	}
+	options := []string{access}
+	if s.Bind != nil && s.Bind.SELinux != "" {
+		options = append(options, s.Bind.SELinux)
+	}
+	if s.Bind != nil && s.Bind.Propagation != "" {
+		options = append(options, s.Bind.Propagation)
+	}
+	if s.Volume != nil && s.Volume.NoCopy {
+		options = append(options, "nocopy")
+	}
+	return fmt.Sprintf("%s:%s:%s", s.Source, s.Target, strings.Join(options, ","))
+}
+
+const (
+	// VolumeTypeBind is the type for mounting host dir
+	VolumeTypeBind = "bind"
+	// VolumeTypeVolume is the type for remote storage volumes
+	VolumeTypeVolume = "volume"
+	// VolumeTypeTmpfs is the type for mounting tmpfs
+	VolumeTypeTmpfs = "tmpfs"
+	// VolumeTypeNamedPipe is the type for mounting Windows named pipes
+	VolumeTypeNamedPipe = "npipe"
+	// VolumeTypeCluster is the type for mounting container storage interface (CSI) volumes
+	VolumeTypeCluster = "cluster"
+
+	// SElinuxShared share the volume content
+	SElinuxShared = "z"
+	// SElinuxUnshared label content as private unshared
+	SElinuxUnshared = "Z"
+)
+
+// ServiceVolumeBind are options for a service volume of type bind
+type ServiceVolumeBind struct {
+	SELinux        string `yaml:"selinux,omitempty" json:"selinux,omitempty"`
+	Propagation    string `yaml:"propagation,omitempty" json:"propagation,omitempty"`
+	CreateHostPath bool   `yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"`
+	Recursive      string `yaml:"recursive,omitempty" json:"recursive,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// SELinux represents the SELinux re-labeling options.
+const (
+	// SELinuxShared option indicates that the bind mount content is shared among multiple containers
+	SELinuxShared string = "z"
+	// SELinuxPrivate option indicates that the bind mount content is private and unshared
+	SELinuxPrivate string = "Z"
+)
+
+// Propagation represents the propagation of a mount.
+const (
+	// PropagationRPrivate RPRIVATE
+	PropagationRPrivate string = "rprivate"
+	// PropagationPrivate PRIVATE
+	PropagationPrivate string = "private"
+	// PropagationRShared RSHARED
+	PropagationRShared string = "rshared"
+	// PropagationShared SHARED
+	PropagationShared string = "shared"
+	// PropagationRSlave RSLAVE
+	PropagationRSlave string = "rslave"
+	// PropagationSlave SLAVE
+	PropagationSlave string = "slave"
+)
+
+// ServiceVolumeVolume are options for a service volume of type volume
+type ServiceVolumeVolume struct {
+	NoCopy  bool   `yaml:"nocopy,omitempty" json:"nocopy,omitempty"`
+	Subpath string `yaml:"subpath,omitempty" json:"subpath,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// ServiceVolumeTmpfs are options for a service volume of type tmpfs
+type ServiceVolumeTmpfs struct {
+	Size UnitBytes `yaml:"size,omitempty" json:"size,omitempty"`
+
+	Mode uint32 `yaml:"mode,omitempty" json:"mode,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// FileReferenceConfig for a reference to a swarm file object
+type FileReferenceConfig struct {
+	Source string  `yaml:"source,omitempty" json:"source,omitempty"`
+	Target string  `yaml:"target,omitempty" json:"target,omitempty"`
+	UID    string  `yaml:"uid,omitempty" json:"uid,omitempty"`
+	GID    string  `yaml:"gid,omitempty" json:"gid,omitempty"`
+	Mode   *uint32 `yaml:"mode,omitempty" json:"mode,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// ServiceConfigObjConfig is the config obj configuration for a service
+type ServiceConfigObjConfig FileReferenceConfig
+
+// ServiceSecretConfig is the secret configuration for a service
+type ServiceSecretConfig FileReferenceConfig
+
+// UlimitsConfig the ulimit configuration
+type UlimitsConfig struct {
+	Single int `yaml:"single,omitempty" json:"single,omitempty"`
+	Soft   int `yaml:"soft,omitempty" json:"soft,omitempty"`
+	Hard   int `yaml:"hard,omitempty" json:"hard,omitempty"`
+
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+func (u *UlimitsConfig) DecodeMapstructure(value interface{}) error {
+	switch v := value.(type) {
+	case *UlimitsConfig:
+		// this call to DecodeMapstructure is triggered after initial value conversion as we use a map[string]*UlimitsConfig
+		return nil
+	case int:
+		u.Single = v
+		u.Soft = 0
+		u.Hard = 0
+	case map[string]any:
+		u.Single = 0
+		soft, ok := v["soft"]
+		if ok {
+			u.Soft = soft.(int)
+		}
+		hard, ok := v["hard"]
+		if ok {
+			u.Hard = hard.(int)
+		}
+	default:
+		return fmt.Errorf("unexpected value type %T for ulimit", value)
+	}
+	return nil
+}
+
+// MarshalYAML makes UlimitsConfig implement yaml.Marshaller
+func (u *UlimitsConfig) MarshalYAML() (interface{}, error) {
+	if u.Single != 0 {
+		return u.Single, nil
+	}
+	return struct {
+		Soft int
+		Hard int
+	}{
+		Soft: u.Soft,
+		Hard: u.Hard,
+	}, nil
+}
+
+// MarshalJSON makes UlimitsConfig implement json.Marshaller
+func (u *UlimitsConfig) MarshalJSON() ([]byte, error) {
+	if u.Single != 0 {
+		return json.Marshal(u.Single)
+	}
+	// Pass as a value to avoid re-entering this method and use the default implementation
+	return json.Marshal(*u)
+}
+
+// NetworkConfig for a network
+type NetworkConfig struct {
+	Name         string     `yaml:"name,omitempty" json:"name,omitempty"`
+	Driver       string     `yaml:"driver,omitempty" json:"driver,omitempty"`
+	DriverOpts   Options    `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
+	Ipam         IPAMConfig `yaml:"ipam,omitempty" json:"ipam,omitempty"`
+	External     External   `yaml:"external,omitempty" json:"external,omitempty"`
+	Internal     bool       `yaml:"internal,omitempty" json:"internal,omitempty"`
+	Attachable   bool       `yaml:"attachable,omitempty" json:"attachable,omitempty"`
+	Labels       Labels     `yaml:"labels,omitempty" json:"labels,omitempty"`
+	CustomLabels Labels     `yaml:"-" json:"-"`
+	EnableIPv6   *bool      `yaml:"enable_ipv6,omitempty" json:"enable_ipv6,omitempty"`
+	Extensions   Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// IPAMConfig for a network
+type IPAMConfig struct {
+	Driver     string      `yaml:"driver,omitempty" json:"driver,omitempty"`
+	Config     []*IPAMPool `yaml:"config,omitempty" json:"config,omitempty"`
+	Extensions Extensions  `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// IPAMPool for a network
+type IPAMPool struct {
+	Subnet             string     `yaml:"subnet,omitempty" json:"subnet,omitempty"`
+	Gateway            string     `yaml:"gateway,omitempty" json:"gateway,omitempty"`
+	IPRange            string     `yaml:"ip_range,omitempty" json:"ip_range,omitempty"`
+	AuxiliaryAddresses Mapping    `yaml:"aux_addresses,omitempty" json:"aux_addresses,omitempty"`
+	Extensions         Extensions `yaml:",inline" json:"-"`
+}
+
+// VolumeConfig for a volume
+type VolumeConfig struct {
+	Name         string     `yaml:"name,omitempty" json:"name,omitempty"`
+	Driver       string     `yaml:"driver,omitempty" json:"driver,omitempty"`
+	DriverOpts   Options    `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
+	External     External   `yaml:"external,omitempty" json:"external,omitempty"`
+	Labels       Labels     `yaml:"labels,omitempty" json:"labels,omitempty"`
+	CustomLabels Labels     `yaml:"-" json:"-"`
+	Extensions   Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// External identifies a Volume or Network as a reference to a resource that is
+// not managed, and should already exist.
+type External bool
+
+// CredentialSpecConfig for credential spec on Windows
+type CredentialSpecConfig struct {
+	Config     string     `yaml:"config,omitempty" json:"config,omitempty"` // Config was added in API v1.40
+	File       string     `yaml:"file,omitempty" json:"file,omitempty"`
+	Registry   string     `yaml:"registry,omitempty" json:"registry,omitempty"`
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+// FileObjectConfig is a config type for a file used by a service
+type FileObjectConfig struct {
+	Name        string `yaml:"name,omitempty" json:"name,omitempty"`
+	File        string `yaml:"file,omitempty" json:"file,omitempty"`
+	Environment string `yaml:"environment,omitempty" json:"environment,omitempty"`
+	Content     string `yaml:"content,omitempty" json:"content,omitempty"`
+	// configure marshalling to include Content - excluded by default to prevent sensitive data leaks
+	marshallContent bool
+	External        External          `yaml:"external,omitempty" json:"external,omitempty"`
+	Labels          Labels            `yaml:"labels,omitempty" json:"labels,omitempty"`
+	Driver          string            `yaml:"driver,omitempty" json:"driver,omitempty"`
+	DriverOpts      map[string]string `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
+	TemplateDriver  string            `yaml:"template_driver,omitempty" json:"template_driver,omitempty"`
+	Extensions      Extensions        `yaml:"#extensions,inline,omitempty" json:"-"`
+}
+
+const (
+	// ServiceConditionCompletedSuccessfully is the type for waiting until a service has completed successfully (exit code 0).
+	ServiceConditionCompletedSuccessfully = "service_completed_successfully"
+
+	// ServiceConditionHealthy is the type for waiting until a service is healthy.
+	ServiceConditionHealthy = "service_healthy"
+
+	// ServiceConditionStarted is the type for waiting until a service has started (default).
+	ServiceConditionStarted = "service_started"
+)
+
+type DependsOnConfig map[string]ServiceDependency
+
+type ServiceDependency struct {
+	Condition  string     `yaml:"condition,omitempty" json:"condition,omitempty"`
+	Restart    bool       `yaml:"restart,omitempty" json:"restart,omitempty"`
+	Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"`
+	Required   bool       `yaml:"required" json:"required"`
+}
+
+type ExtendsConfig struct {
+	File    string `yaml:"file,omitempty" json:"file,omitempty"`
+	Service string `yaml:"service,omitempty" json:"service,omitempty"`
+}
+
+// SecretConfig for a secret
+type SecretConfig FileObjectConfig
+
+// MarshalYAML makes SecretConfig implement yaml.Marshaller
+func (s SecretConfig) MarshalYAML() (interface{}, error) {
+	// secret content is set while loading model. Never marshall it
+	if !s.marshallContent {
+		s.Content = ""
+	}
+	return FileObjectConfig(s), nil
+}
+
+// MarshalJSON makes SecretConfig implement json.Marshaller
+func (s SecretConfig) MarshalJSON() ([]byte, error) {
+	// secret content is set while loading model. Never marshall it
+	if !s.marshallContent {
+		s.Content = ""
+	}
+	return json.Marshal(FileObjectConfig(s))
+}
+
+// ConfigObjConfig is the config for the swarm "Config" object
+type ConfigObjConfig FileObjectConfig
+
+// MarshalYAML makes ConfigObjConfig implement yaml.Marshaller
+func (s ConfigObjConfig) MarshalYAML() (interface{}, error) {
+	// config content may have been set from environment while loading model. Marshall actual source
+	if s.Environment != "" {
+		s.Content = ""
+	}
+	return FileObjectConfig(s), nil
+}
+
+// MarshalJSON makes ConfigObjConfig implement json.Marshaller
+func (s ConfigObjConfig) MarshalJSON() ([]byte, error) {
+	// config content may have been set from environment while loading model. Marshall actual source
+	if s.Environment != "" {
+		s.Content = ""
+	}
+	return json.Marshal(FileObjectConfig(s))
+}
+
+type IncludeConfig struct {
+	Path             StringList `yaml:"path,omitempty" json:"path,omitempty"`
+	ProjectDirectory string     `yaml:"project_directory,omitempty" json:"project_directory,omitempty"`
+	EnvFile          StringList `yaml:"env_file,omitempty" json:"env_file,omitempty"`
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,68 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package utils
+
+import (
+	"golang.org/x/exp/constraints"
+	"golang.org/x/exp/maps"
+	"golang.org/x/exp/slices"
+)
+
+func MapKeys[T constraints.Ordered, U any](theMap map[T]U) []T {
+	result := maps.Keys(theMap)
+	slices.Sort(result)
+	return result
+}
+
+func MapsAppend[T comparable, U any](target map[T]U, source map[T]U) map[T]U {
+	if target == nil {
+		return source
+	}
+	if source == nil {
+		return target
+	}
+	for key, value := range source {
+		if _, ok := target[key]; !ok {
+			target[key] = value
+		}
+	}
+	return target
+}
+
+func ArrayContains[T comparable](source []T, toCheck []T) bool {
+	for _, value := range toCheck {
+		if !slices.Contains(source, value) {
+			return false
+		}
+	}
+	return true
+}
+
+func RemoveDuplicates[T comparable](slice []T) []T {
+	// Create a map to store unique elements
+	seen := make(map[T]bool)
+	result := []T{}
+
+	// Loop through the slice, adding elements to the map if they haven't been seen before
+	for _, val := range slice {
+		if _, ok := seen[val]; !ok {
+			seen[val] = true
+			result = append(result, val)
+		}
+	}
+	return result
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,92 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package utils
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+// ResolveSymbolicLink converts the section of an absolute path if it is a
+// symbolic link
+//
+// Parameters:
+//   - path: an absolute path
+//
+// Returns:
+//   - converted path if it has a symbolic link or the same path if there is
+//     no symbolic link
+func ResolveSymbolicLink(path string) (string, error) {
+	sym, part, err := getSymbolinkLink(path)
+	if err != nil {
+		return "", err
+	}
+	if sym == "" && part == "" {
+		// no symbolic link detected
+		return path, nil
+	}
+	return strings.Replace(path, part, sym, 1), nil
+
+}
+
+// getSymbolinkLink parses all parts of the path and returns the
+// the symbolic link part as well as the correspondent original part
+// Parameters:
+//   - path: an absolute path
+//
+// Returns:
+//   - string section of the path that is a symbolic link
+//   - string correspondent path section of the symbolic link
+//   - An error
+func getSymbolinkLink(path string) (string, string, error) {
+	parts := strings.Split(path, string(os.PathSeparator))
+
+	// Reconstruct the path step by step, checking each component
+	var currentPath string
+	if filepath.IsAbs(path) {
+		currentPath = string(os.PathSeparator)
+	}
+
+	for _, part := range parts {
+		if part == "" {
+			continue
+		}
+		currentPath = filepath.Join(currentPath, part)
+
+		if isSymLink := isSymbolicLink(currentPath); isSymLink {
+			// return symbolic link, and correspondent part
+			target, err := filepath.EvalSymlinks(currentPath)
+			if err != nil {
+				return "", "", err
+			}
+			return target, currentPath, nil
+		}
+	}
+	return "", "", nil // no symbolic link
+}
+
+// isSymbolicLink validates if the path is a symbolic link
+func isSymbolicLink(path string) bool {
+	info, err := os.Lstat(path)
+	if err != nil {
+		return false
+	}
+
+	// Check if the file mode indicates a symbolic link
+	return info.Mode()&os.ModeSymlink != 0
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/utils/set.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/utils/set.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/utils/set.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/utils/set.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,95 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package utils
+
+type Set[T comparable] map[T]struct{}
+
+func NewSet[T comparable](v ...T) Set[T] {
+	if len(v) == 0 {
+		return make(Set[T])
+	}
+
+	out := make(Set[T], len(v))
+	for i := range v {
+		out.Add(v[i])
+	}
+	return out
+}
+
+func (s Set[T]) Has(v T) bool {
+	_, ok := s[v]
+	return ok
+}
+
+func (s Set[T]) Add(v T) {
+	s[v] = struct{}{}
+}
+
+func (s Set[T]) AddAll(v ...T) {
+	for _, e := range v {
+		s[e] = struct{}{}
+	}
+}
+
+func (s Set[T]) Remove(v T) bool {
+	_, ok := s[v]
+	if ok {
+		delete(s, v)
+	}
+	return ok
+}
+
+func (s Set[T]) Clear() {
+	for v := range s {
+		delete(s, v)
+	}
+}
+
+func (s Set[T]) Elements() []T {
+	elements := make([]T, 0, len(s))
+	for v := range s {
+		elements = append(elements, v)
+	}
+	return elements
+}
+
+func (s Set[T]) RemoveAll(elements ...T) {
+	for _, e := range elements {
+		s.Remove(e)
+	}
+}
+
+func (s Set[T]) Diff(other Set[T]) Set[T] {
+	out := make(Set[T])
+	for k := range s {
+		if _, ok := other[k]; !ok {
+			out[k] = struct{}{}
+		}
+	}
+	return out
+}
+
+func (s Set[T]) Union(other Set[T]) Set[T] {
+	out := make(Set[T])
+	for k := range s {
+		out[k] = struct{}{}
+	}
+	for k := range other {
+		out[k] = struct{}{}
+	}
+	return out
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,50 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package utils
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// StringToBool converts a string to a boolean ignoring errors
+func StringToBool(s string) bool {
+	b, _ := strconv.ParseBool(strings.ToLower(strings.TrimSpace(s)))
+	return b
+}
+
+// GetAsEqualsMap split key=value formatted strings into a key : value map
+func GetAsEqualsMap(em []string) map[string]string {
+	m := make(map[string]string)
+	for _, v := range em {
+		key, val, found := strings.Cut(v, "=")
+		if found {
+			m[key] = val
+		}
+	}
+	return m
+}
+
+// GetAsEqualsMap format a key : value map into key=value strings
+func GetAsStringList(em map[string]string) []string {
+	m := make([]string, 0, len(em))
+	for k, v := range em {
+		m = append(m, fmt.Sprintf("%s=%s", k, v))
+	}
+	return m
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/validation/external.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/validation/external.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/validation/external.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/validation/external.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,49 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/consts"
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func checkExternal(v map[string]any, p tree.Path) error {
+	b, ok := v["external"]
+	if !ok {
+		return nil
+	}
+	if !b.(bool) {
+		return nil
+	}
+
+	for k := range v {
+		switch k {
+		case "name", "external", consts.Extensions:
+			continue
+		default:
+			if strings.HasPrefix(k, "x-") {
+				// custom extension, ignored
+				continue
+			}
+			return fmt.Errorf("%s: conflicting parameters \"external\" and %q specified", p, k)
+		}
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,108 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+type checkerFunc func(value any, p tree.Path) error
+
+var checks = map[tree.Path]checkerFunc{
+	"volumes.*":                       checkVolume,
+	"configs.*":                       checkFileObject("file", "environment", "content"),
+	"secrets.*":                       checkFileObject("file", "environment"),
+	"services.*.develop.watch.*.path": checkPath,
+	"services.*.deploy.resources.reservations.devices.*": checkDeviceRequest,
+	"services.*.gpus.*": checkDeviceRequest,
+}
+
+func Validate(dict map[string]any) error {
+	return check(dict, tree.NewPath())
+}
+
+func check(value any, p tree.Path) error {
+	for pattern, fn := range checks {
+		if p.Matches(pattern) {
+			return fn(value, p)
+		}
+	}
+	switch v := value.(type) {
+	case map[string]any:
+		for k, v := range v {
+			err := check(v, p.Next(k))
+			if err != nil {
+				return err
+			}
+		}
+	case []any:
+		for _, e := range v {
+			err := check(e, p.Next("[]"))
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func checkFileObject(keys ...string) checkerFunc {
+	return func(value any, p tree.Path) error {
+
+		v := value.(map[string]any)
+		count := 0
+		for _, s := range keys {
+			if _, ok := v[s]; ok {
+				count++
+			}
+		}
+		if count > 1 {
+			return fmt.Errorf("%s: %s attributes are mutually exclusive", p, strings.Join(keys, "|"))
+		}
+		if count == 0 {
+			if _, ok := v["driver"]; ok {
+				// User specified a custom driver, which might have it's own way to set content
+				return nil
+			}
+			if _, ok := v["external"]; !ok {
+				return fmt.Errorf("%s: one of %s must be set", p, strings.Join(keys, "|"))
+			}
+		}
+		return nil
+	}
+}
+
+func checkPath(value any, p tree.Path) error {
+	v := value.(string)
+	if v == "" {
+		return fmt.Errorf("%s: value can't be blank", p)
+	}
+	return nil
+}
+
+func checkDeviceRequest(value any, p tree.Path) error {
+	v := value.(map[string]any)
+	_, hasCount := v["count"]
+	_, hasIds := v["device_ids"]
+	if hasCount && hasIds {
+		return fmt.Errorf(`%s: "count" and "device_ids" attributes are exclusive`, p)
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go
--- 0.19.3+ds1-4/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,39 @@
+/*
+   Copyright 2020 The Compose Specification Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+
+	"github.com/compose-spec/compose-go/v2/tree"
+)
+
+func checkVolume(value any, p tree.Path) error {
+	if value == nil {
+		return nil
+	}
+	v, ok := value.(map[string]any)
+	if !ok {
+		return fmt.Errorf("expected volume, got %s", value)
+	}
+
+	err := checkExternal(v, p)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/.golangci.yml 0.21.3-0ubuntu1/vendor/github.com/containerd/console/.golangci.yml
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/.golangci.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/.golangci.yml	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,20 @@
+linters:
+  enable:
+    - gofmt
+    - goimports
+    - ineffassign
+    - misspell
+    - revive
+    - staticcheck
+    - structcheck
+    - unconvert
+    - unused
+    - varcheck
+    - vet
+  disable:
+    - errcheck
+
+run:
+  timeout: 3m
+  skip-dirs:
+    - vendor
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/LICENSE 0.21.3-0ubuntu1/vendor/github.com/containerd/console/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        https://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright The containerd Authors
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/README.md 0.21.3-0ubuntu1/vendor/github.com/containerd/console/README.md
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/README.md	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,29 @@
+# console
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/console)](https://pkg.go.dev/github.com/containerd/console)
+[![Build Status](https://github.com/containerd/console/workflows/CI/badge.svg)](https://github.com/containerd/console/actions?query=workflow%3ACI)
+[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/console)](https://goreportcard.com/report/github.com/containerd/console)
+
+Golang package for dealing with consoles.  Light on deps and a simple API.
+
+## Modifying the current process
+
+```go
+current := console.Current()
+defer current.Reset()
+
+if err := current.SetRaw(); err != nil {
+}
+ws, err := current.Size()
+current.Resize(ws)
+```
+
+## Project details
+
+console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/console.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/console.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,90 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"errors"
+	"io"
+	"os"
+)
+
+var (
+	ErrNotAConsole    = errors.New("provided file is not a console")
+	ErrNotImplemented = errors.New("not implemented")
+)
+
+type File interface {
+	io.ReadWriteCloser
+
+	// Fd returns its file descriptor
+	Fd() uintptr
+	// Name returns its file name
+	Name() string
+}
+
+type Console interface {
+	File
+
+	// Resize resizes the console to the provided window size
+	Resize(WinSize) error
+	// ResizeFrom resizes the calling console to the size of the
+	// provided console
+	ResizeFrom(Console) error
+	// SetRaw sets the console in raw mode
+	SetRaw() error
+	// DisableEcho disables echo on the console
+	DisableEcho() error
+	// Reset restores the console to its original state
+	Reset() error
+	// Size returns the window size of the console
+	Size() (WinSize, error)
+}
+
+// WinSize specifies the window size of the console
+type WinSize struct {
+	// Height of the console
+	Height uint16
+	// Width of the console
+	Width uint16
+	x     uint16
+	y     uint16
+}
+
+// Current returns the current process' console
+func Current() (c Console) {
+	var err error
+	// Usually all three streams (stdin, stdout, and stderr)
+	// are open to the same console, but some might be redirected,
+	// so try all three.
+	for _, s := range []*os.File{os.Stderr, os.Stdout, os.Stdin} {
+		if c, err = ConsoleFromFile(s); err == nil {
+			return c
+		}
+	}
+	// One of the std streams should always be a console
+	// for the design of this function.
+	panic(err)
+}
+
+// ConsoleFromFile returns a console using the provided file
+// nolint:revive
+func ConsoleFromFile(f File) (Console, error) {
+	if err := checkConsole(f); err != nil {
+		return nil, err
+	}
+	return newMaster(f)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/console_linux.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console_linux.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/console_linux.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console_linux.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,281 @@
+//go:build linux
+// +build linux
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"io"
+	"os"
+	"sync"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	maxEvents = 128
+)
+
+// Epoller manages multiple epoll consoles using edge-triggered epoll api so we
+// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP.
+// For more details, see:
+// - https://github.com/systemd/systemd/pull/4262
+// - https://github.com/moby/moby/issues/27202
+//
+// Example usage of Epoller and EpollConsole can be as follow:
+//
+//	epoller, _ := NewEpoller()
+//	epollConsole, _ := epoller.Add(console)
+//	go epoller.Wait()
+//	var (
+//		b  bytes.Buffer
+//		wg sync.WaitGroup
+//	)
+//	wg.Add(1)
+//	go func() {
+//		io.Copy(&b, epollConsole)
+//		wg.Done()
+//	}()
+//	// perform I/O on the console
+//	epollConsole.Shutdown(epoller.CloseConsole)
+//	wg.Wait()
+//	epollConsole.Close()
+type Epoller struct {
+	efd       int
+	mu        sync.Mutex
+	fdMapping map[int]*EpollConsole
+	closeOnce sync.Once
+}
+
+// NewEpoller returns an instance of epoller with a valid epoll fd.
+func NewEpoller() (*Epoller, error) {
+	efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC)
+	if err != nil {
+		return nil, err
+	}
+	return &Epoller{
+		efd:       efd,
+		fdMapping: make(map[int]*EpollConsole),
+	}, nil
+}
+
+// Add creates an epoll console based on the provided console. The console will
+// be registered with EPOLLET (i.e. using edge-triggered notification) and its
+// file descriptor will be set to non-blocking mode. After this, user should use
+// the return console to perform I/O.
+func (e *Epoller) Add(console Console) (*EpollConsole, error) {
+	sysfd := int(console.Fd())
+	// Set sysfd to non-blocking mode
+	if err := unix.SetNonblock(sysfd, true); err != nil {
+		return nil, err
+	}
+
+	ev := unix.EpollEvent{
+		Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET,
+		Fd:     int32(sysfd),
+	}
+	if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil {
+		return nil, err
+	}
+	ef := &EpollConsole{
+		Console: console,
+		sysfd:   sysfd,
+		readc:   sync.NewCond(&sync.Mutex{}),
+		writec:  sync.NewCond(&sync.Mutex{}),
+	}
+	e.mu.Lock()
+	e.fdMapping[sysfd] = ef
+	e.mu.Unlock()
+	return ef, nil
+}
+
+// Wait starts the loop to wait for its consoles' notifications and signal
+// appropriate console that it can perform I/O.
+func (e *Epoller) Wait() error {
+	events := make([]unix.EpollEvent, maxEvents)
+	for {
+		n, err := unix.EpollWait(e.efd, events, -1)
+		if err != nil {
+			// EINTR: The call was interrupted by a signal handler before either
+			// any of the requested events occurred or the timeout expired
+			if err == unix.EINTR {
+				continue
+			}
+			return err
+		}
+		for i := 0; i < n; i++ {
+			ev := &events[i]
+			// the console is ready to be read from
+			if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
+				if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
+					epfile.signalRead()
+				}
+			}
+			// the console is ready to be written to
+			if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
+				if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
+					epfile.signalWrite()
+				}
+			}
+		}
+	}
+}
+
+// CloseConsole unregisters the console's file descriptor from epoll interface
+func (e *Epoller) CloseConsole(fd int) error {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	delete(e.fdMapping, fd)
+	return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{})
+}
+
+func (e *Epoller) getConsole(sysfd int) *EpollConsole {
+	e.mu.Lock()
+	f := e.fdMapping[sysfd]
+	e.mu.Unlock()
+	return f
+}
+
+// Close closes the epoll fd
+func (e *Epoller) Close() error {
+	closeErr := os.ErrClosed // default to "file already closed"
+	e.closeOnce.Do(func() {
+		closeErr = unix.Close(e.efd)
+	})
+	return closeErr
+}
+
+// EpollConsole acts like a console but registers its file descriptor with an
+// epoll fd and uses epoll API to perform I/O.
+type EpollConsole struct {
+	Console
+	readc  *sync.Cond
+	writec *sync.Cond
+	sysfd  int
+	closed bool
+}
+
+// Read reads up to len(p) bytes into p. It returns the number of bytes read
+// (0 <= n <= len(p)) and any error encountered.
+//
+// If the console's read returns EAGAIN or EIO, we assume that it's a
+// temporary error because the other side went away and wait for the signal
+// generated by epoll event to continue.
+func (ec *EpollConsole) Read(p []byte) (n int, err error) {
+	var read int
+	ec.readc.L.Lock()
+	defer ec.readc.L.Unlock()
+	for {
+		read, err = ec.Console.Read(p[n:])
+		n += read
+		if err != nil {
+			var hangup bool
+			if perr, ok := err.(*os.PathError); ok {
+				hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
+			} else {
+				hangup = (err == unix.EAGAIN || err == unix.EIO)
+			}
+			// if the other end disappear, assume this is temporary and wait for the
+			// signal to continue again. Unless we didnt read anything and the
+			// console is already marked as closed then we should exit
+			if hangup && !(n == 0 && len(p) > 0 && ec.closed) {
+				ec.readc.Wait()
+				continue
+			}
+		}
+		break
+	}
+	// if we didnt read anything then return io.EOF to end gracefully
+	if n == 0 && len(p) > 0 && err == nil {
+		err = io.EOF
+	}
+	// signal for others that we finished the read
+	ec.readc.Signal()
+	return n, err
+}
+
+// Writes len(p) bytes from p to the console. It returns the number of bytes
+// written from p (0 <= n <= len(p)) and any error encountered that caused
+// the write to stop early.
+//
+// If writes to the console returns EAGAIN or EIO, we assume that it's a
+// temporary error because the other side went away and wait for the signal
+// generated by epoll event to continue.
+func (ec *EpollConsole) Write(p []byte) (n int, err error) {
+	var written int
+	ec.writec.L.Lock()
+	defer ec.writec.L.Unlock()
+	for {
+		written, err = ec.Console.Write(p[n:])
+		n += written
+		if err != nil {
+			var hangup bool
+			if perr, ok := err.(*os.PathError); ok {
+				hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
+			} else {
+				hangup = (err == unix.EAGAIN || err == unix.EIO)
+			}
+			// if the other end disappears, assume this is temporary and wait for the
+			// signal to continue again.
+			if hangup {
+				ec.writec.Wait()
+				continue
+			}
+		}
+		// unrecoverable error, break the loop and return the error
+		break
+	}
+	if n < len(p) && err == nil {
+		err = io.ErrShortWrite
+	}
+	// signal for others that we finished the write
+	ec.writec.Signal()
+	return n, err
+}
+
+// Shutdown closes the file descriptor and signals call waiters for this fd.
+// It accepts a callback which will be called with the console's fd. The
+// callback typically will be used to do further cleanup such as unregister the
+// console's fd from the epoll interface.
+// User should call Shutdown and wait for all I/O operation to be finished
+// before closing the console.
+func (ec *EpollConsole) Shutdown(close func(int) error) error {
+	ec.readc.L.Lock()
+	defer ec.readc.L.Unlock()
+	ec.writec.L.Lock()
+	defer ec.writec.L.Unlock()
+
+	ec.readc.Broadcast()
+	ec.writec.Broadcast()
+	ec.closed = true
+	return close(ec.sysfd)
+}
+
+// signalRead signals that the console is readable.
+func (ec *EpollConsole) signalRead() {
+	ec.readc.L.Lock()
+	ec.readc.Signal()
+	ec.readc.L.Unlock()
+}
+
+// signalWrite signals that the console is writable.
+func (ec *EpollConsole) signalWrite() {
+	ec.writec.L.Lock()
+	ec.writec.Signal()
+	ec.writec.L.Unlock()
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/console_other.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console_other.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/console_other.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console_other.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,36 @@
+//go:build !darwin && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
+// +build !darwin,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+// NewPty creates a new pty pair
+// The master is returned as the first console and a string
+// with the path to the pty slave is returned as the second
+func NewPty() (Console, string, error) {
+	return nil, "", ErrNotImplemented
+}
+
+// checkConsole checks if the provided file is a console
+func checkConsole(f File) error {
+	return ErrNotAConsole
+}
+
+func newMaster(f File) (Console, error) {
+	return nil, ErrNotImplemented
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/console_unix.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console_unix.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/console_unix.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console_unix.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,157 @@
+//go:build darwin || freebsd || linux || netbsd || openbsd || zos
+// +build darwin freebsd linux netbsd openbsd zos
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+// NewPty creates a new pty pair
+// The master is returned as the first console and a string
+// with the path to the pty slave is returned as the second
+func NewPty() (Console, string, error) {
+	f, err := openpt()
+	if err != nil {
+		return nil, "", err
+	}
+	slave, err := ptsname(f)
+	if err != nil {
+		return nil, "", err
+	}
+	if err := unlockpt(f); err != nil {
+		return nil, "", err
+	}
+	m, err := newMaster(f)
+	if err != nil {
+		return nil, "", err
+	}
+	return m, slave, nil
+}
+
+type master struct {
+	f        File
+	original *unix.Termios
+}
+
+func (m *master) Read(b []byte) (int, error) {
+	return m.f.Read(b)
+}
+
+func (m *master) Write(b []byte) (int, error) {
+	return m.f.Write(b)
+}
+
+func (m *master) Close() error {
+	return m.f.Close()
+}
+
+func (m *master) Resize(ws WinSize) error {
+	return tcswinsz(m.f.Fd(), ws)
+}
+
+func (m *master) ResizeFrom(c Console) error {
+	ws, err := c.Size()
+	if err != nil {
+		return err
+	}
+	return m.Resize(ws)
+}
+
+func (m *master) Reset() error {
+	if m.original == nil {
+		return nil
+	}
+	return tcset(m.f.Fd(), m.original)
+}
+
+func (m *master) getCurrent() (unix.Termios, error) {
+	var termios unix.Termios
+	if err := tcget(m.f.Fd(), &termios); err != nil {
+		return unix.Termios{}, err
+	}
+	return termios, nil
+}
+
+func (m *master) SetRaw() error {
+	rawState, err := m.getCurrent()
+	if err != nil {
+		return err
+	}
+	rawState = cfmakeraw(rawState)
+	rawState.Oflag = rawState.Oflag | unix.OPOST
+	return tcset(m.f.Fd(), &rawState)
+}
+
+func (m *master) DisableEcho() error {
+	rawState, err := m.getCurrent()
+	if err != nil {
+		return err
+	}
+	rawState.Lflag = rawState.Lflag &^ unix.ECHO
+	return tcset(m.f.Fd(), &rawState)
+}
+
+func (m *master) Size() (WinSize, error) {
+	return tcgwinsz(m.f.Fd())
+}
+
+func (m *master) Fd() uintptr {
+	return m.f.Fd()
+}
+
+func (m *master) Name() string {
+	return m.f.Name()
+}
+
+// checkConsole checks if the provided file is a console
+func checkConsole(f File) error {
+	var termios unix.Termios
+	if tcget(f.Fd(), &termios) != nil {
+		return ErrNotAConsole
+	}
+	return nil
+}
+
+func newMaster(f File) (Console, error) {
+	m := &master{
+		f: f,
+	}
+	t, err := m.getCurrent()
+	if err != nil {
+		return nil, err
+	}
+	m.original = &t
+	return m, nil
+}
+
+// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
+// created by us acts normally. In particular, a not-very-well-known default of
+// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
+// problem for terminal emulators, because we relay data from the terminal we
+// also relay that funky line discipline.
+func ClearONLCR(fd uintptr) error {
+	return setONLCR(fd, false)
+}
+
+// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
+// created by us acts as intended for a terminal emulator.
+func SetONLCR(fd uintptr) error {
+	return setONLCR(fd, true)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/console_windows.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console_windows.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/console_windows.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/console_windows.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,219 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"errors"
+	"fmt"
+	"os"
+
+	"golang.org/x/sys/windows"
+)
+
+var vtInputSupported bool
+
+func (m *master) initStdios() {
+	// Note: We discard console mode warnings, because in/out can be redirected.
+	//
+	// TODO: Investigate opening CONOUT$/CONIN$ to handle this correctly
+
+	m.in = windows.Handle(os.Stdin.Fd())
+	if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
+		// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
+		if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil {
+			vtInputSupported = true
+		}
+		// Unconditionally set the console mode back even on failure because SetConsoleMode
+		// remembers invalid bits on input handles.
+		windows.SetConsoleMode(m.in, m.inMode)
+	}
+
+	m.out = windows.Handle(os.Stdout.Fd())
+	if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil {
+		if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
+			m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+		} else {
+			windows.SetConsoleMode(m.out, m.outMode)
+		}
+	}
+
+	m.err = windows.Handle(os.Stderr.Fd())
+	if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil {
+		if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
+			m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+		} else {
+			windows.SetConsoleMode(m.err, m.errMode)
+		}
+	}
+}
+
+type master struct {
+	in     windows.Handle
+	inMode uint32
+
+	out     windows.Handle
+	outMode uint32
+
+	err     windows.Handle
+	errMode uint32
+}
+
+func (m *master) SetRaw() error {
+	if err := makeInputRaw(m.in, m.inMode); err != nil {
+		return err
+	}
+
+	// Set StdOut and StdErr to raw mode, we ignore failures since
+	// windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of
+	// Windows.
+
+	windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
+
+	windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
+
+	return nil
+}
+
+func (m *master) Reset() error {
+	var errs []error
+
+	for _, s := range []struct {
+		fd   windows.Handle
+		mode uint32
+	}{
+		{m.in, m.inMode},
+		{m.out, m.outMode},
+		{m.err, m.errMode},
+	} {
+		if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
+			// we can't just abort on the first error, otherwise we might leave
+			// the console in an unexpected state.
+			errs = append(errs, fmt.Errorf("unable to restore console mode: %w", err))
+		}
+	}
+
+	if len(errs) > 0 {
+		return errs[0]
+	}
+
+	return nil
+}
+
+func (m *master) Size() (WinSize, error) {
+	var info windows.ConsoleScreenBufferInfo
+	err := windows.GetConsoleScreenBufferInfo(m.out, &info)
+	if err != nil {
+		return WinSize{}, fmt.Errorf("unable to get console info: %w", err)
+	}
+
+	winsize := WinSize{
+		Width:  uint16(info.Window.Right - info.Window.Left + 1),
+		Height: uint16(info.Window.Bottom - info.Window.Top + 1),
+	}
+
+	return winsize, nil
+}
+
+func (m *master) Resize(ws WinSize) error {
+	return ErrNotImplemented
+}
+
+func (m *master) ResizeFrom(c Console) error {
+	return ErrNotImplemented
+}
+
+func (m *master) DisableEcho() error {
+	mode := m.inMode &^ windows.ENABLE_ECHO_INPUT
+	mode |= windows.ENABLE_PROCESSED_INPUT
+	mode |= windows.ENABLE_LINE_INPUT
+
+	if err := windows.SetConsoleMode(m.in, mode); err != nil {
+		return fmt.Errorf("unable to set console to disable echo: %w", err)
+	}
+
+	return nil
+}
+
+func (m *master) Close() error {
+	return nil
+}
+
+func (m *master) Read(b []byte) (int, error) {
+	return os.Stdin.Read(b)
+}
+
+func (m *master) Write(b []byte) (int, error) {
+	return os.Stdout.Write(b)
+}
+
+func (m *master) Fd() uintptr {
+	return uintptr(m.in)
+}
+
+// on windows, console can only be made from os.Std{in,out,err}, hence there
+// isnt a single name here we can use. Return a dummy "console" value in this
+// case should be sufficient.
+func (m *master) Name() string {
+	return "console"
+}
+
+// makeInputRaw puts the terminal (Windows Console) connected to the given
+// file descriptor into raw mode
+func makeInputRaw(fd windows.Handle, mode uint32) error {
+	// See
+	// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+	// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+	// Disable these modes
+	mode &^= windows.ENABLE_ECHO_INPUT
+	mode &^= windows.ENABLE_LINE_INPUT
+	mode &^= windows.ENABLE_MOUSE_INPUT
+	mode &^= windows.ENABLE_WINDOW_INPUT
+	mode &^= windows.ENABLE_PROCESSED_INPUT
+
+	// Enable these modes
+	mode |= windows.ENABLE_EXTENDED_FLAGS
+	mode |= windows.ENABLE_INSERT_MODE
+	mode |= windows.ENABLE_QUICK_EDIT_MODE
+
+	if vtInputSupported {
+		mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
+	}
+
+	if err := windows.SetConsoleMode(fd, mode); err != nil {
+		return fmt.Errorf("unable to set console to raw mode: %w", err)
+	}
+
+	return nil
+}
+
+func checkConsole(f File) error {
+	var mode uint32
+	if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newMaster(f File) (Console, error) {
+	if f != os.Stdin && f != os.Stdout && f != os.Stderr {
+		return nil, errors.New("creating a console from a file is not supported on windows")
+	}
+	m := &master{}
+	m.initStdios()
+	return m, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/pty_freebsd_cgo.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/pty_freebsd_cgo.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/pty_freebsd_cgo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/pty_freebsd_cgo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,46 @@
+//go:build freebsd && cgo
+// +build freebsd,cgo
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"fmt"
+	"os"
+)
+
+/*
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+*/
+import "C"
+
+// openpt allocates a new pseudo-terminal and establishes a connection with its
+// control device.
+func openpt() (*os.File, error) {
+	fd, err := C.posix_openpt(C.O_RDWR)
+	if err != nil {
+		return nil, fmt.Errorf("posix_openpt: %w", err)
+	}
+	if _, err := C.grantpt(fd); err != nil {
+		C.close(fd)
+		return nil, fmt.Errorf("grantpt: %w", err)
+	}
+	return os.NewFile(uintptr(fd), ""), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/pty_freebsd_nocgo.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/pty_freebsd_nocgo.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/pty_freebsd_nocgo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/pty_freebsd_nocgo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,37 @@
+//go:build freebsd && !cgo
+// +build freebsd,!cgo
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"os"
+)
+
+//
+// Implementing the functions below requires cgo support.  Non-cgo stubs
+// versions are defined below to enable cross-compilation of source code
+// that depends on these functions, but the resultant cross-compiled
+// binaries cannot actually be used.  If the stub function(s) below are
+// actually invoked they will display an error message and cause the
+// calling process to exit.
+//
+
+func openpt() (*os.File, error) {
+	panic("openpt() support requires cgo.")
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/pty_unix.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/pty_unix.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/pty_unix.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/pty_unix.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,31 @@
+//go:build darwin || linux || netbsd || openbsd
+// +build darwin linux netbsd openbsd
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// openpt allocates a new pseudo-terminal by opening the /dev/ptmx device
+func openpt() (*os.File, error) {
+	return os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0)
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/pty_zos.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/pty_zos.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/pty_zos.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/pty_zos.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,43 @@
+//go:build zos
+// +build zos
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"fmt"
+	"os"
+)
+
+// openpt allocates a new pseudo-terminal by opening the first available /dev/ptypXX device
+func openpt() (*os.File, error) {
+	var f *os.File
+	var err error
+	for i := 0; ; i++ {
+		ptyp := fmt.Sprintf("/dev/ptyp%04d", i)
+		f, err = os.OpenFile(ptyp, os.O_RDWR, 0600)
+		if err == nil {
+			break
+		}
+		if os.IsNotExist(err) {
+			return nil, err
+		}
+		// else probably Resource Busy
+	}
+	return f, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_darwin.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_darwin.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_darwin.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_darwin.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,44 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"fmt"
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TIOCGETA
+	cmdTcSet = unix.TIOCSETA
+)
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+	return unix.IoctlSetPointerInt(int(f.Fd()), unix.TIOCPTYUNLK, 0)
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("/dev/pts/%d", n), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_freebsd_cgo.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_freebsd_cgo.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_freebsd_cgo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_freebsd_cgo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,58 @@
+//go:build freebsd && cgo
+// +build freebsd,cgo
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"fmt"
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+/*
+#include <stdlib.h>
+#include <unistd.h>
+*/
+import "C"
+
+const (
+	cmdTcGet = unix.TIOCGETA
+	cmdTcSet = unix.TIOCSETA
+)
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+	fd := C.int(f.Fd())
+	if _, err := C.unlockpt(fd); err != nil {
+		C.close(fd)
+		return fmt.Errorf("unlockpt: %w", err)
+	}
+	return nil
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("/dev/pts/%d", n), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_freebsd_nocgo.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_freebsd_nocgo.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_freebsd_nocgo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_freebsd_nocgo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,56 @@
+//go:build freebsd && !cgo
+// +build freebsd,!cgo
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"fmt"
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TIOCGETA
+	cmdTcSet = unix.TIOCSETA
+)
+
+//
+// Implementing the functions below requires cgo support.  Non-cgo stubs
+// versions are defined below to enable cross-compilation of source code
+// that depends on these functions, but the resultant cross-compiled
+// binaries cannot actually be used.  If the stub function(s) below are
+// actually invoked they will display an error message and cause the
+// calling process to exit.
+//
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+	panic("unlockpt() support requires cgo.")
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("/dev/pts/%d", n), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_linux.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_linux.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_linux.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_linux.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,51 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"fmt"
+	"os"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TCGETS
+	cmdTcSet = unix.TCSETS
+)
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+	var u int32
+	// XXX do not use unix.IoctlSetPointerInt here, see commit dbd69c59b81.
+	if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 {
+		return err
+	}
+	return nil
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	var u uint32
+	// XXX do not use unix.IoctlGetInt here, see commit dbd69c59b81.
+	if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 {
+		return "", err
+	}
+	return fmt.Sprintf("/dev/pts/%d", u), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_netbsd.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_netbsd.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_netbsd.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_netbsd.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,45 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"bytes"
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TIOCGETA
+	cmdTcSet = unix.TIOCSETA
+)
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+// This does not exist on NetBSD, it does not allocate controlling terminals on open
+func unlockpt(f *os.File) error {
+	return nil
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	ptm, err := unix.IoctlGetPtmget(int(f.Fd()), unix.TIOCPTSNAME)
+	if err != nil {
+		return "", err
+	}
+	return string(ptm.Sn[:bytes.IndexByte(ptm.Sn[:], 0)]), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_openbsd_cgo.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_openbsd_cgo.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_openbsd_cgo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_openbsd_cgo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,52 @@
+//go:build openbsd && cgo
+// +build openbsd,cgo
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+//#include <stdlib.h>
+import "C"
+
+const (
+	cmdTcGet = unix.TIOCGETA
+	cmdTcSet = unix.TIOCSETA
+)
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	ptspath, err := C.ptsname(C.int(f.Fd()))
+	if err != nil {
+		return "", err
+	}
+	return C.GoString(ptspath), nil
+}
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+	if _, err := C.grantpt(C.int(f.Fd())); err != nil {
+		return err
+	}
+	return nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_openbsd_nocgo.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_openbsd_nocgo.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_openbsd_nocgo.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_openbsd_nocgo.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,48 @@
+//go:build openbsd && !cgo
+// +build openbsd,!cgo
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+//
+// Implementing the functions below requires cgo support.  Non-cgo stubs
+// versions are defined below to enable cross-compilation of source code
+// that depends on these functions, but the resultant cross-compiled
+// binaries cannot actually be used.  If the stub function(s) below are
+// actually invoked they will display an error message and cause the
+// calling process to exit.
+//
+
+package console
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TIOCGETA
+	cmdTcSet = unix.TIOCSETA
+)
+
+func ptsname(f *os.File) (string, error) {
+	panic("ptsname() support requires cgo.")
+}
+
+func unlockpt(f *os.File) error {
+	panic("unlockpt() support requires cgo.")
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_unix.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_unix.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_unix.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_unix.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,92 @@
+//go:build darwin || freebsd || linux || netbsd || openbsd || zos
+// +build darwin freebsd linux netbsd openbsd zos
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+func tcget(fd uintptr, p *unix.Termios) error {
+	termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet)
+	if err != nil {
+		return err
+	}
+	*p = *termios
+	return nil
+}
+
+func tcset(fd uintptr, p *unix.Termios) error {
+	return unix.IoctlSetTermios(int(fd), cmdTcSet, p)
+}
+
+func tcgwinsz(fd uintptr) (WinSize, error) {
+	var ws WinSize
+
+	uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+	if err != nil {
+		return ws, err
+	}
+
+	// Translate from unix.Winsize to console.WinSize
+	ws.Height = uws.Row
+	ws.Width = uws.Col
+	ws.x = uws.Xpixel
+	ws.y = uws.Ypixel
+	return ws, nil
+}
+
+func tcswinsz(fd uintptr, ws WinSize) error {
+	// Translate from console.WinSize to unix.Winsize
+
+	var uws unix.Winsize
+	uws.Row = ws.Height
+	uws.Col = ws.Width
+	uws.Xpixel = ws.x
+	uws.Ypixel = ws.y
+
+	return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws)
+}
+
+func setONLCR(fd uintptr, enable bool) error {
+	var termios unix.Termios
+	if err := tcget(fd, &termios); err != nil {
+		return err
+	}
+	if enable {
+		// Set +onlcr so we can act like a real terminal
+		termios.Oflag |= unix.ONLCR
+	} else {
+		// Set -onlcr so we don't have to deal with \r.
+		termios.Oflag &^= unix.ONLCR
+	}
+	return tcset(fd, &termios)
+}
+
+func cfmakeraw(t unix.Termios) unix.Termios {
+	t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+	t.Oflag &^= unix.OPOST
+	t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+	t.Cflag &^= (unix.CSIZE | unix.PARENB)
+	t.Cflag |= unix.CS8
+	t.Cc[unix.VMIN] = 1
+	t.Cc[unix.VTIME] = 0
+
+	return t
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_zos.go 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_zos.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/console/tc_zos.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/console/tc_zos.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,39 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"os"
+	"strings"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TCGETS
+	cmdTcSet = unix.TCSETS
+)
+
+// unlockpt is a no-op on zos.
+func unlockpt(_ *os.File) error {
+	return nil
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	return "/dev/ttyp" + strings.TrimPrefix(f.Name(), "/dev/ptyp"), nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/LICENSE 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        https://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright The containerd Authors
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,1788 @@
+//
+//Copyright The containerd Authors.
+//
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.28.1
+// 	protoc        v3.20.1
+// source: github.com/containerd/containerd/api/services/content/v1/content.proto
+
+package content
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	emptypb "google.golang.org/protobuf/types/known/emptypb"
+	fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// WriteAction defines the behavior of a WriteRequest.
+type WriteAction int32
+
+const (
+	// WriteActionStat instructs the writer to return the current status while
+	// holding the lock on the write.
+	WriteAction_STAT WriteAction = 0
+	// WriteActionWrite sets the action for the write request to write data.
+	//
+	// Any data included will be written at the provided offset. The
+	// transaction will be left open for further writes.
+	//
+	// This is the default.
+	WriteAction_WRITE WriteAction = 1
+	// WriteActionCommit will write any outstanding data in the message and
+	// commit the write, storing it under the digest.
+	//
+	// This can be used in a single message to send the data, verify it and
+	// commit it.
+	//
+	// This action will always terminate the write.
+	WriteAction_COMMIT WriteAction = 2
+)
+
+// Enum value maps for WriteAction.
+var (
+	WriteAction_name = map[int32]string{
+		0: "STAT",
+		1: "WRITE",
+		2: "COMMIT",
+	}
+	WriteAction_value = map[string]int32{
+		"STAT":   0,
+		"WRITE":  1,
+		"COMMIT": 2,
+	}
+)
+
+func (x WriteAction) Enum() *WriteAction {
+	p := new(WriteAction)
+	*p = x
+	return p
+}
+
+func (x WriteAction) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (WriteAction) Descriptor() protoreflect.EnumDescriptor {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes[0].Descriptor()
+}
+
+func (WriteAction) Type() protoreflect.EnumType {
+	return &file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes[0]
+}
+
+func (x WriteAction) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use WriteAction.Descriptor instead.
+func (WriteAction) EnumDescriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{0}
+}
+
+type Info struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Digest is the hash identity of the blob.
+	Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
+	// Size is the total number of bytes in the blob.
+	Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
+	// CreatedAt provides the time at which the blob was committed.
+	CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
+	// UpdatedAt provides the time the info was last updated.
+	UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Info) Reset() {
+	*x = Info{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Info) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Info.ProtoReflect.Descriptor instead.
+func (*Info) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Info) GetDigest() string {
+	if x != nil {
+		return x.Digest
+	}
+	return ""
+}
+
+func (x *Info) GetSize() int64 {
+	if x != nil {
+		return x.Size
+	}
+	return 0
+}
+
+func (x *Info) GetCreatedAt() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedAt
+	}
+	return nil
+}
+
+func (x *Info) GetUpdatedAt() *timestamppb.Timestamp {
+	if x != nil {
+		return x.UpdatedAt
+	}
+	return nil
+}
+
+func (x *Info) GetLabels() map[string]string {
+	if x != nil {
+		return x.Labels
+	}
+	return nil
+}
+
+type InfoRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
+}
+
+func (x *InfoRequest) Reset() {
+	*x = InfoRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *InfoRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InfoRequest) ProtoMessage() {}
+
+func (x *InfoRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use InfoRequest.ProtoReflect.Descriptor instead.
+func (*InfoRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *InfoRequest) GetDigest() string {
+	if x != nil {
+		return x.Digest
+	}
+	return ""
+}
+
+type InfoResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"`
+}
+
+func (x *InfoResponse) Reset() {
+	*x = InfoResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *InfoResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InfoResponse) ProtoMessage() {}
+
+func (x *InfoResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use InfoResponse.ProtoReflect.Descriptor instead.
+func (*InfoResponse) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *InfoResponse) GetInfo() *Info {
+	if x != nil {
+		return x.Info
+	}
+	return nil
+}
+
+type UpdateRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"`
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	//
+	// In info, Digest, Size, and CreatedAt are immutable,
+	// other field may be updated using this mask.
+	// If no mask is provided, all mutable field are updated.
+	UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateRequest) Reset() {
+	*x = UpdateRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UpdateRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateRequest) ProtoMessage() {}
+
+func (x *UpdateRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead.
+func (*UpdateRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UpdateRequest) GetInfo() *Info {
+	if x != nil {
+		return x.Info
+	}
+	return nil
+}
+
+func (x *UpdateRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+	if x != nil {
+		return x.UpdateMask
+	}
+	return nil
+}
+
+type UpdateResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"`
+}
+
+func (x *UpdateResponse) Reset() {
+	*x = UpdateResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UpdateResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateResponse) ProtoMessage() {}
+
+func (x *UpdateResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateResponse.ProtoReflect.Descriptor instead.
+func (*UpdateResponse) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateResponse) GetInfo() *Info {
+	if x != nil {
+		return x.Info
+	}
+	return nil
+}
+
+type ListContentRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, containers that match the following will be
+	// returned:
+	//
+	//	filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+}
+
+func (x *ListContentRequest) Reset() {
+	*x = ListContentRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ListContentRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListContentRequest) ProtoMessage() {}
+
+func (x *ListContentRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListContentRequest.ProtoReflect.Descriptor instead.
+func (*ListContentRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListContentRequest) GetFilters() []string {
+	if x != nil {
+		return x.Filters
+	}
+	return nil
+}
+
+type ListContentResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"`
+}
+
+func (x *ListContentResponse) Reset() {
+	*x = ListContentResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ListContentResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListContentResponse) ProtoMessage() {}
+
+func (x *ListContentResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListContentResponse.ProtoReflect.Descriptor instead.
+func (*ListContentResponse) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListContentResponse) GetInfo() []*Info {
+	if x != nil {
+		return x.Info
+	}
+	return nil
+}
+
+type DeleteContentRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Digest specifies which content to delete.
+	Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
+}
+
+func (x *DeleteContentRequest) Reset() {
+	*x = DeleteContentRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *DeleteContentRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteContentRequest) ProtoMessage() {}
+
+func (x *DeleteContentRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteContentRequest.ProtoReflect.Descriptor instead.
+func (*DeleteContentRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *DeleteContentRequest) GetDigest() string {
+	if x != nil {
+		return x.Digest
+	}
+	return ""
+}
+
+// ReadContentRequest defines the fields that make up a request to read a portion of
+// data from a stored object.
+type ReadContentRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Digest is the hash identity to read.
+	Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
+	// Offset specifies the number of bytes from the start at which to begin
+	// the read. If zero or less, the read will be from the start. This uses
+	// standard zero-indexed semantics.
+	Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+	// size is the total size of the read. If zero, the entire blob will be
+	// returned by the service.
+	Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+}
+
+func (x *ReadContentRequest) Reset() {
+	*x = ReadContentRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ReadContentRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadContentRequest) ProtoMessage() {}
+
+func (x *ReadContentRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadContentRequest.ProtoReflect.Descriptor instead.
+func (*ReadContentRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *ReadContentRequest) GetDigest() string {
+	if x != nil {
+		return x.Digest
+	}
+	return ""
+}
+
+func (x *ReadContentRequest) GetOffset() int64 {
+	if x != nil {
+		return x.Offset
+	}
+	return 0
+}
+
+func (x *ReadContentRequest) GetSize() int64 {
+	if x != nil {
+		return x.Size
+	}
+	return 0
+}
+
+// ReadContentResponse carries byte data for a read request.
+type ReadContentResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Offset int64  `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` // offset of the returned data
+	Data   []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`      // actual data
+}
+
+func (x *ReadContentResponse) Reset() {
+	*x = ReadContentResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ReadContentResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReadContentResponse) ProtoMessage() {}
+
+func (x *ReadContentResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReadContentResponse.ProtoReflect.Descriptor instead.
+func (*ReadContentResponse) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ReadContentResponse) GetOffset() int64 {
+	if x != nil {
+		return x.Offset
+	}
+	return 0
+}
+
+func (x *ReadContentResponse) GetData() []byte {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+type Status struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	StartedAt *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"`
+	UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
+	Ref       string                 `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+	Offset    int64                  `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
+	Total     int64                  `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
+	Expected  string                 `protobuf:"bytes,6,opt,name=expected,proto3" json:"expected,omitempty"`
+}
+
+func (x *Status) Reset() {
+	*x = Status{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Status) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Status) GetStartedAt() *timestamppb.Timestamp {
+	if x != nil {
+		return x.StartedAt
+	}
+	return nil
+}
+
+func (x *Status) GetUpdatedAt() *timestamppb.Timestamp {
+	if x != nil {
+		return x.UpdatedAt
+	}
+	return nil
+}
+
+func (x *Status) GetRef() string {
+	if x != nil {
+		return x.Ref
+	}
+	return ""
+}
+
+func (x *Status) GetOffset() int64 {
+	if x != nil {
+		return x.Offset
+	}
+	return 0
+}
+
+func (x *Status) GetTotal() int64 {
+	if x != nil {
+		return x.Total
+	}
+	return 0
+}
+
+func (x *Status) GetExpected() string {
+	if x != nil {
+		return x.Expected
+	}
+	return ""
+}
+
+type StatusRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+}
+
+func (x *StatusRequest) Reset() {
+	*x = StatusRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *StatusRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusRequest) ProtoMessage() {}
+
+func (x *StatusRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead.
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *StatusRequest) GetRef() string {
+	if x != nil {
+		return x.Ref
+	}
+	return ""
+}
+
+type StatusResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *StatusResponse) Reset() {
+	*x = StatusResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *StatusResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse) ProtoMessage() {}
+
+func (x *StatusResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead.
+func (*StatusResponse) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *StatusResponse) GetStatus() *Status {
+	if x != nil {
+		return x.Status
+	}
+	return nil
+}
+
+type ListStatusesRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+}
+
+func (x *ListStatusesRequest) Reset() {
+	*x = ListStatusesRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ListStatusesRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListStatusesRequest) ProtoMessage() {}
+
+func (x *ListStatusesRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListStatusesRequest.ProtoReflect.Descriptor instead.
+func (*ListStatusesRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *ListStatusesRequest) GetFilters() []string {
+	if x != nil {
+		return x.Filters
+	}
+	return nil
+}
+
+type ListStatusesResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Statuses []*Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty"`
+}
+
+func (x *ListStatusesResponse) Reset() {
+	*x = ListStatusesResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ListStatusesResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListStatusesResponse) ProtoMessage() {}
+
+func (x *ListStatusesResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListStatusesResponse.ProtoReflect.Descriptor instead.
+func (*ListStatusesResponse) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *ListStatusesResponse) GetStatuses() []*Status {
+	if x != nil {
+		return x.Statuses
+	}
+	return nil
+}
+
+// WriteContentRequest writes data to the request ref at offset.
+type WriteContentRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Action sets the behavior of the write.
+	//
+	// When this is a write and the ref is not yet allocated, the ref will be
+	// allocated and the data will be written at offset.
+	//
+	// If the action is write and the ref is allocated, it will accept data to
+	// an offset that has not yet been written.
+	//
+	// If the action is write and there is no data, the current write status
+	// will be returned. This works differently from status because the stream
+	// holds a lock.
+	Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"`
+	// Ref identifies the pre-commit object to write to.
+	Ref string `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"`
+	// Total can be set to have the service validate the total size of the
+	// committed content.
+	//
+	// The latest value before or with the commit action message will be use to
+	// validate the content. If the offset overflows total, the service may
+	// report an error. It is only required on one message for the write.
+	//
+	// If the value is zero or less, no validation of the final content will be
+	// performed.
+	Total int64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"`
+	// Expected can be set to have the service validate the final content against
+	// the provided digest.
+	//
+	// If the digest is already present in the object store, an AlreadyExists
+	// error will be returned.
+	//
+	// Only the latest version will be used to check the content against the
+	// digest. It is only required to include it on a single message, before or
+	// with the commit action message.
+	Expected string `protobuf:"bytes,4,opt,name=expected,proto3" json:"expected,omitempty"`
+	// Offset specifies the number of bytes from the start at which to begin
+	// the write. For most implementations, this means from the start of the
+	// file. This uses standard, zero-indexed semantics.
+	//
+	// If the action is write, the remote may remove all previously written
+	// data after the offset. Implementations may support arbitrary offsets but
+	// MUST support reseting this value to zero with a write. If an
+	// implementation does not support a write at a particular offset, an
+	// OutOfRange error must be returned.
+	Offset int64 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset,omitempty"`
+	// Data is the actual bytes to be written.
+	//
+	// If this is empty and the message is not a commit, a response will be
+	// returned with the current write state.
+	Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"`
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *WriteContentRequest) Reset() {
+	*x = WriteContentRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *WriteContentRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WriteContentRequest) ProtoMessage() {}
+
+func (x *WriteContentRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use WriteContentRequest.ProtoReflect.Descriptor instead.
+func (*WriteContentRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *WriteContentRequest) GetAction() WriteAction {
+	if x != nil {
+		return x.Action
+	}
+	return WriteAction_STAT
+}
+
+func (x *WriteContentRequest) GetRef() string {
+	if x != nil {
+		return x.Ref
+	}
+	return ""
+}
+
+func (x *WriteContentRequest) GetTotal() int64 {
+	if x != nil {
+		return x.Total
+	}
+	return 0
+}
+
+func (x *WriteContentRequest) GetExpected() string {
+	if x != nil {
+		return x.Expected
+	}
+	return ""
+}
+
+func (x *WriteContentRequest) GetOffset() int64 {
+	if x != nil {
+		return x.Offset
+	}
+	return 0
+}
+
+func (x *WriteContentRequest) GetData() []byte {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+func (x *WriteContentRequest) GetLabels() map[string]string {
+	if x != nil {
+		return x.Labels
+	}
+	return nil
+}
+
+// WriteContentResponse is returned on the culmination of a write call.
+type WriteContentResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Action contains the action for the final message of the stream. A writer
+	// should confirm that they match the intended result.
+	Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"`
+	// StartedAt provides the time at which the write began.
+	//
+	// This must be set for stat and commit write actions. All other write
+	// actions may omit this.
+	StartedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"`
+	// UpdatedAt provides the last time of a successful write.
+	//
+	// This must be set for stat and commit write actions. All other write
+	// actions may omit this.
+	UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
+	// Offset is the current committed size for the write.
+	Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
+	// Total provides the current, expected total size of the write.
+	//
+	// We include this to provide consistency with the Status structure on the
+	// client writer.
+	//
+	// This is only valid on the Stat and Commit response.
+	Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
+	// Digest, if present, includes the digest up to the currently committed
+	// bytes. If action is commit, this field will be set. It is implementation
+	// defined if this is set for other actions.
+	Digest string `protobuf:"bytes,6,opt,name=digest,proto3" json:"digest,omitempty"`
+}
+
+func (x *WriteContentResponse) Reset() {
+	*x = WriteContentResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *WriteContentResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WriteContentResponse) ProtoMessage() {}
+
+func (x *WriteContentResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use WriteContentResponse.ProtoReflect.Descriptor instead.
+func (*WriteContentResponse) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *WriteContentResponse) GetAction() WriteAction {
+	if x != nil {
+		return x.Action
+	}
+	return WriteAction_STAT
+}
+
+func (x *WriteContentResponse) GetStartedAt() *timestamppb.Timestamp {
+	if x != nil {
+		return x.StartedAt
+	}
+	return nil
+}
+
+func (x *WriteContentResponse) GetUpdatedAt() *timestamppb.Timestamp {
+	if x != nil {
+		return x.UpdatedAt
+	}
+	return nil
+}
+
+func (x *WriteContentResponse) GetOffset() int64 {
+	if x != nil {
+		return x.Offset
+	}
+	return 0
+}
+
+func (x *WriteContentResponse) GetTotal() int64 {
+	if x != nil {
+		return x.Total
+	}
+	return 0
+}
+
+func (x *WriteContentResponse) GetDigest() string {
+	if x != nil {
+		return x.Digest
+	}
+	return ""
+}
+
+type AbortRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+}
+
+func (x *AbortRequest) Reset() {
+	*x = AbortRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *AbortRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AbortRequest) ProtoMessage() {}
+
+func (x *AbortRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use AbortRequest.ProtoReflect.Descriptor instead.
+func (*AbortRequest) Descriptor() ([]byte, []int) {
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *AbortRequest) GetRef() string {
+	if x != nil {
+		return x.Ref
+	}
+	return ""
+}
+
+var File_github_com_containerd_containerd_api_services_content_v1_content_proto protoreflect.FileDescriptor
+
+var file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc = []byte{
+	0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
+	0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+	0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+	0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+	0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+	0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f,
+	0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+	0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70,
+	0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x02, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+	0x6f, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a,
+	0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63,
+	0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61,
+	0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+	0x64, 0x41, 0x74, 0x12, 0x48, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+	0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+	0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a,
+	0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x0b, 0x49, 0x6e, 0x66, 0x6f,
+	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73,
+	0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22,
+	0x48, 0x0a, 0x0c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+	0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+	0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+	0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49,
+	0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x55, 0x70,
+	0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x69,
+	0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+	0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
+	0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52,
+	0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+	0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+	0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61,
+	0x73, 0x6b, 0x22, 0x4a, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
+	0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
+	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+	0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x2e,
+	0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71,
+	0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x4f,
+	0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73,
+	0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+	0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+	0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22,
+	0x2e, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73,
+	0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22,
+	0x58, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65,
+	0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a,
+	0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f,
+	0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x61,
+	0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+	0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+	0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01, 0x0a,
+	0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74,
+	0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64,
+	0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x10, 0x0a,
+	0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+	0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52,
+	0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+	0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1a, 0x0a,
+	0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, 0x61,
+	0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65,
+	0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x22, 0x50, 0x0a, 0x0e,
+	0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e,
+	0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26,
+	0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76,
+	0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e,
+	0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x2f,
+	0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65,
+	0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
+	0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22,
+	0x5a, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52,
+	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75,
+	0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+	0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
+	0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75,
+	0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0xde, 0x02, 0x0a, 0x13,
+	0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
+	0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+	0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+	0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f,
+	0x74, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+	0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06,
+	0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66,
+	0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01,
+	0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x57, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65,
+	0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+	0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63,
+	0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43,
+	0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61,
+	0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+	0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x97, 0x02, 0x0a,
+	0x14, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73,
+	0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+	0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+	0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69,
+	0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74,
+	0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72,
+	0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+	0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74,
+	0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03,
+	0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61,
+	0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x16,
+	0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+	0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52,
+	0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x2a, 0x2e, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74,
+	0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x41, 0x54, 0x10,
+	0x00, 0x12, 0x09, 0x0a, 0x05, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06,
+	0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x32, 0xbe, 0x07, 0x0a, 0x07, 0x43, 0x6f, 0x6e,
+	0x74, 0x65, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x63,
+	0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
+	0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+	0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
+	0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52,
+	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74,
+	0x65, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e,
+	0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+	0x1a, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65,
+	0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76,
+	0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+	0x12, 0x71, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+	0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63,
+	0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f,
+	0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63,
+	0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
+	0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+	0x65, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x34, 0x2e,
+	0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+	0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44,
+	0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
+	0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x71, 0x0a, 0x04, 0x52,
+	0x65, 0x61, 0x64, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+	0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+	0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+	0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f,
+	0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e,
+	0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x67,
+	0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+	0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63,
+	0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+	0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f,
+	0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53,
+	0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+	0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f,
+	0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61,
+	0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63,
+	0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
+	0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x12, 0x76, 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x33, 0x2e, 0x63, 0x6f,
+	0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+	0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69,
+	0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+	0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65,
+	0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76,
+	0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65,
+	0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4d, 0x0a, 0x05, 0x41, 0x62,
+	0x6f, 0x72, 0x74, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
+	0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+	0x74, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+	0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74,
+	0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+	0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70,
+	0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+	0x6e, 0x74, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescOnce sync.Once
+	file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData = file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc
+)
+
+func file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP() []byte {
+	file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescOnce.Do(func() {
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData)
+	})
+	return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData
+}
+
+var file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
+var file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes = []interface{}{
+	(WriteAction)(0),              // 0: containerd.services.content.v1.WriteAction
+	(*Info)(nil),                  // 1: containerd.services.content.v1.Info
+	(*InfoRequest)(nil),           // 2: containerd.services.content.v1.InfoRequest
+	(*InfoResponse)(nil),          // 3: containerd.services.content.v1.InfoResponse
+	(*UpdateRequest)(nil),         // 4: containerd.services.content.v1.UpdateRequest
+	(*UpdateResponse)(nil),        // 5: containerd.services.content.v1.UpdateResponse
+	(*ListContentRequest)(nil),    // 6: containerd.services.content.v1.ListContentRequest
+	(*ListContentResponse)(nil),   // 7: containerd.services.content.v1.ListContentResponse
+	(*DeleteContentRequest)(nil),  // 8: containerd.services.content.v1.DeleteContentRequest
+	(*ReadContentRequest)(nil),    // 9: containerd.services.content.v1.ReadContentRequest
+	(*ReadContentResponse)(nil),   // 10: containerd.services.content.v1.ReadContentResponse
+	(*Status)(nil),                // 11: containerd.services.content.v1.Status
+	(*StatusRequest)(nil),         // 12: containerd.services.content.v1.StatusRequest
+	(*StatusResponse)(nil),        // 13: containerd.services.content.v1.StatusResponse
+	(*ListStatusesRequest)(nil),   // 14: containerd.services.content.v1.ListStatusesRequest
+	(*ListStatusesResponse)(nil),  // 15: containerd.services.content.v1.ListStatusesResponse
+	(*WriteContentRequest)(nil),   // 16: containerd.services.content.v1.WriteContentRequest
+	(*WriteContentResponse)(nil),  // 17: containerd.services.content.v1.WriteContentResponse
+	(*AbortRequest)(nil),          // 18: containerd.services.content.v1.AbortRequest
+	nil,                           // 19: containerd.services.content.v1.Info.LabelsEntry
+	nil,                           // 20: containerd.services.content.v1.WriteContentRequest.LabelsEntry
+	(*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp
+	(*fieldmaskpb.FieldMask)(nil), // 22: google.protobuf.FieldMask
+	(*emptypb.Empty)(nil),         // 23: google.protobuf.Empty
+}
+var file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs = []int32{
+	21, // 0: containerd.services.content.v1.Info.created_at:type_name -> google.protobuf.Timestamp
+	21, // 1: containerd.services.content.v1.Info.updated_at:type_name -> google.protobuf.Timestamp
+	19, // 2: containerd.services.content.v1.Info.labels:type_name -> containerd.services.content.v1.Info.LabelsEntry
+	1,  // 3: containerd.services.content.v1.InfoResponse.info:type_name -> containerd.services.content.v1.Info
+	1,  // 4: containerd.services.content.v1.UpdateRequest.info:type_name -> containerd.services.content.v1.Info
+	22, // 5: containerd.services.content.v1.UpdateRequest.update_mask:type_name -> google.protobuf.FieldMask
+	1,  // 6: containerd.services.content.v1.UpdateResponse.info:type_name -> containerd.services.content.v1.Info
+	1,  // 7: containerd.services.content.v1.ListContentResponse.info:type_name -> containerd.services.content.v1.Info
+	21, // 8: containerd.services.content.v1.Status.started_at:type_name -> google.protobuf.Timestamp
+	21, // 9: containerd.services.content.v1.Status.updated_at:type_name -> google.protobuf.Timestamp
+	11, // 10: containerd.services.content.v1.StatusResponse.status:type_name -> containerd.services.content.v1.Status
+	11, // 11: containerd.services.content.v1.ListStatusesResponse.statuses:type_name -> containerd.services.content.v1.Status
+	0,  // 12: containerd.services.content.v1.WriteContentRequest.action:type_name -> containerd.services.content.v1.WriteAction
+	20, // 13: containerd.services.content.v1.WriteContentRequest.labels:type_name -> containerd.services.content.v1.WriteContentRequest.LabelsEntry
+	0,  // 14: containerd.services.content.v1.WriteContentResponse.action:type_name -> containerd.services.content.v1.WriteAction
+	21, // 15: containerd.services.content.v1.WriteContentResponse.started_at:type_name -> google.protobuf.Timestamp
+	21, // 16: containerd.services.content.v1.WriteContentResponse.updated_at:type_name -> google.protobuf.Timestamp
+	2,  // 17: containerd.services.content.v1.Content.Info:input_type -> containerd.services.content.v1.InfoRequest
+	4,  // 18: containerd.services.content.v1.Content.Update:input_type -> containerd.services.content.v1.UpdateRequest
+	6,  // 19: containerd.services.content.v1.Content.List:input_type -> containerd.services.content.v1.ListContentRequest
+	8,  // 20: containerd.services.content.v1.Content.Delete:input_type -> containerd.services.content.v1.DeleteContentRequest
+	9,  // 21: containerd.services.content.v1.Content.Read:input_type -> containerd.services.content.v1.ReadContentRequest
+	12, // 22: containerd.services.content.v1.Content.Status:input_type -> containerd.services.content.v1.StatusRequest
+	14, // 23: containerd.services.content.v1.Content.ListStatuses:input_type -> containerd.services.content.v1.ListStatusesRequest
+	16, // 24: containerd.services.content.v1.Content.Write:input_type -> containerd.services.content.v1.WriteContentRequest
+	18, // 25: containerd.services.content.v1.Content.Abort:input_type -> containerd.services.content.v1.AbortRequest
+	3,  // 26: containerd.services.content.v1.Content.Info:output_type -> containerd.services.content.v1.InfoResponse
+	5,  // 27: containerd.services.content.v1.Content.Update:output_type -> containerd.services.content.v1.UpdateResponse
+	7,  // 28: containerd.services.content.v1.Content.List:output_type -> containerd.services.content.v1.ListContentResponse
+	23, // 29: containerd.services.content.v1.Content.Delete:output_type -> google.protobuf.Empty
+	10, // 30: containerd.services.content.v1.Content.Read:output_type -> containerd.services.content.v1.ReadContentResponse
+	13, // 31: containerd.services.content.v1.Content.Status:output_type -> containerd.services.content.v1.StatusResponse
+	15, // 32: containerd.services.content.v1.Content.ListStatuses:output_type -> containerd.services.content.v1.ListStatusesResponse
+	17, // 33: containerd.services.content.v1.Content.Write:output_type -> containerd.services.content.v1.WriteContentResponse
+	23, // 34: containerd.services.content.v1.Content.Abort:output_type -> google.protobuf.Empty
+	26, // [26:35] is the sub-list for method output_type
+	17, // [17:26] is the sub-list for method input_type
+	17, // [17:17] is the sub-list for extension type_name
+	17, // [17:17] is the sub-list for extension extendee
+	0,  // [0:17] is the sub-list for field type_name
+}
+
+func init() { file_github_com_containerd_containerd_api_services_content_v1_content_proto_init() }
+func file_github_com_containerd_containerd_api_services_content_v1_content_proto_init() {
+	if File_github_com_containerd_containerd_api_services_content_v1_content_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Info); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*InfoRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*InfoResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UpdateRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UpdateResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ListContentRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ListContentResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*DeleteContentRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ReadContentRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ReadContentResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Status); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*StatusRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*StatusResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ListStatusesRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ListStatusesResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*WriteContentRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*WriteContentResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*AbortRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   20,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes,
+		DependencyIndexes: file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs,
+		EnumInfos:         file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes,
+		MessageInfos:      file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes,
+	}.Build()
+	File_github_com_containerd_containerd_api_services_content_v1_content_proto = out.File
+	file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc = nil
+	file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes = nil
+	file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs = nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
--- 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,330 @@
+/*
+	Copyright The containerd Authors.
+
+	Licensed under the Apache License, Version 2.0 (the "License");
+	you may not use this file except in compliance with the License.
+	You may obtain a copy of the License at
+
+		http://www.apache.org/licenses/LICENSE-2.0
+
+	Unless required by applicable law or agreed to in writing, software
+	distributed under the License is distributed on an "AS IS" BASIS,
+	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+	See the License for the specific language governing permissions and
+	limitations under the License.
+*/
+
+syntax = "proto3";
+
+package containerd.services.content.v1;
+
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/content/v1;content";
+
+// Content provides access to a content addressable storage system.
+service Content {
+	// Info returns information about a committed object.
+	//
+	// This call can be used for getting the size of content and checking for
+	// existence.
+	rpc Info(InfoRequest) returns (InfoResponse);
+
+	// Update updates content metadata.
+	//
+	// This call can be used to manage the mutable content labels. The
+	// immutable metadata such as digest, size, and committed at cannot
+	// be updated.
+	rpc Update(UpdateRequest) returns (UpdateResponse);
+
+	// List streams the entire set of content as Info objects and closes the
+	// stream.
+	//
+	// Typically, this will yield a large response, chunked into messages.
+	// Clients should make provisions to ensure they can handle the entire data
+	// set.
+	rpc List(ListContentRequest) returns (stream ListContentResponse);
+
+	// Delete will delete the referenced object.
+	rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
+
+	// Read allows one to read an object based on the offset into the content.
+	//
+	// The requested data may be returned in one or more messages.
+	rpc Read(ReadContentRequest) returns (stream ReadContentResponse);
+
+	// Status returns the status for a single reference.
+	rpc Status(StatusRequest) returns (StatusResponse);
+
+	// ListStatuses returns the status of ongoing object ingestions, started via
+	// Write.
+	//
+	// Only those matching the regular expression will be provided in the
+	// response. If the provided regular expression is empty, all ingestions
+	// will be provided.
+	rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);
+
+	// Write begins or resumes writes to a resource identified by a unique ref.
+	// Only one active stream may exist at a time for each ref.
+	//
+	// Once a write stream has started, it may only write to a single ref, thus
+	// once a stream is started, the ref may be omitted on subsequent writes.
+	//
+	// For any write transaction represented by a ref, only a single write may
+	// be made to a given offset. If overlapping writes occur, it is an error.
+	// Writes should be sequential and implementations may throw an error if
+	// this is required.
+	//
+	// If expected_digest is set and already part of the content store, the
+	// write will fail.
+	//
+	// When completed, the commit flag should be set to true. If expected size
+	// or digest is set, the content will be validated against those values.
+	rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);
+
+	// Abort cancels the ongoing write named in the request. Any resources
+	// associated with the write will be collected.
+	rpc Abort(AbortRequest) returns (google.protobuf.Empty);
+}
+
+message Info {
+	// Digest is the hash identity of the blob.
+	string digest = 1;
+
+	// Size is the total number of bytes in the blob.
+	int64 size = 2;
+
+	// CreatedAt provides the time at which the blob was committed.
+	google.protobuf.Timestamp created_at = 3;
+
+	// UpdatedAt provides the time the info was last updated.
+	google.protobuf.Timestamp updated_at = 4;
+
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels  = 5;
+}
+
+message InfoRequest {
+	string digest = 1;
+}
+
+message InfoResponse {
+	Info info = 1;
+}
+
+message UpdateRequest {
+	Info info = 1;
+
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	//
+	// In info, Digest, Size, and CreatedAt are immutable,
+	// other field may be updated using this mask.
+	// If no mask is provided, all mutable field are updated.
+	google.protobuf.FieldMask update_mask = 2;
+}
+
+message UpdateResponse {
+	Info info = 1;
+}
+
+message ListContentRequest {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, containers that match the following will be
+	// returned:
+	//
+	//	filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	repeated string filters = 1;
+}
+
+message ListContentResponse {
+	repeated Info info = 1;
+}
+
+message DeleteContentRequest {
+	// Digest specifies which content to delete.
+	string digest = 1;
+}
+
+// ReadContentRequest defines the fields that make up a request to read a portion of
+// data from a stored object.
+message ReadContentRequest {
+	// Digest is the hash identity to read.
+	string digest = 1;
+
+	// Offset specifies the number of bytes from the start at which to begin
+	// the read. If zero or less, the read will be from the start. This uses
+	// standard zero-indexed semantics.
+	int64 offset = 2;
+
+	// size is the total size of the read. If zero, the entire blob will be
+	// returned by the service.
+	int64 size = 3;
+}
+
+// ReadContentResponse carries byte data for a read request.
+message ReadContentResponse {
+	int64 offset = 1; // offset of the returned data
+	bytes data = 2; // actual data
+}
+
+message Status {
+	google.protobuf.Timestamp started_at = 1;
+	google.protobuf.Timestamp updated_at = 2;
+	string ref = 3;
+	int64 offset = 4;
+	int64 total = 5;
+	string expected = 6;
+}
+
+
+message StatusRequest {
+	string ref = 1;
+}
+
+message StatusResponse {
+	Status status = 1;
+}
+
+message ListStatusesRequest {
+	repeated string filters = 1;
+}
+
+message ListStatusesResponse {
+	repeated Status statuses = 1;
+}
+
+// WriteAction defines the behavior of a WriteRequest.
+enum WriteAction {
+	// WriteActionStat instructs the writer to return the current status while
+	// holding the lock on the write.
+	STAT = 0;
+
+	// WriteActionWrite sets the action for the write request to write data.
+	//
+	// Any data included will be written at the provided offset. The
+	// transaction will be left open for further writes.
+	//
+	// This is the default.
+	WRITE = 1;
+
+	// WriteActionCommit will write any outstanding data in the message and
+	// commit the write, storing it under the digest.
+	//
+	// This can be used in a single message to send the data, verify it and
+	// commit it.
+	//
+	// This action will always terminate the write.
+	COMMIT = 2;
+}
+
+// WriteContentRequest writes data to the request ref at offset.
+message WriteContentRequest {
+	// Action sets the behavior of the write.
+	//
+	// When this is a write and the ref is not yet allocated, the ref will be
+	// allocated and the data will be written at offset.
+	//
+	// If the action is write and the ref is allocated, it will accept data to
+	// an offset that has not yet been written.
+	//
+	// If the action is write and there is no data, the current write status
+	// will be returned. This works differently from status because the stream
+	// holds a lock.
+	WriteAction action = 1;
+
+	// Ref identifies the pre-commit object to write to.
+	string ref = 2;
+
+	// Total can be set to have the service validate the total size of the
+	// committed content.
+	//
+	// The latest value before or with the commit action message will be use to
+	// validate the content. If the offset overflows total, the service may
+	// report an error. It is only required on one message for the write.
+	//
+	// If the value is zero or less, no validation of the final content will be
+	// performed.
+	int64 total = 3;
+
+	// Expected can be set to have the service validate the final content against
+	// the provided digest.
+	//
+	// If the digest is already present in the object store, an AlreadyExists
+	// error will be returned.
+	//
+	// Only the latest version will be used to check the content against the
+	// digest. It is only required to include it on a single message, before or
+	// with the commit action message.
+	string expected = 4;
+
+	// Offset specifies the number of bytes from the start at which to begin
+	// the write. For most implementations, this means from the start of the
+	// file. This uses standard, zero-indexed semantics.
+	//
+	// If the action is write, the remote may remove all previously written
+	// data after the offset. Implementations may support arbitrary offsets but
+	// MUST support reseting this value to zero with a write. If an
+	// implementation does not support a write at a particular offset, an
+	// OutOfRange error must be returned.
+	int64 offset = 5;
+
+	// Data is the actual bytes to be written.
+	//
+	// If this is empty and the message is not a commit, a response will be
+	// returned with the current write state.
+	bytes data = 6;
+
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels  = 7;
+}
+
+// WriteContentResponse is returned on the culmination of a write call.
+message WriteContentResponse {
+	// Action contains the action for the final message of the stream. A writer
+	// should confirm that they match the intended result.
+	WriteAction action = 1;
+
+	// StartedAt provides the time at which the write began.
+	//
+	// This must be set for stat and commit write actions. All other write
+	// actions may omit this.
+	google.protobuf.Timestamp started_at = 2;
+
+	// UpdatedAt provides the last time of a successful write.
+	//
+	// This must be set for stat and commit write actions. All other write
+	// actions may omit this.
+	google.protobuf.Timestamp updated_at = 3;
+
+	// Offset is the current committed size for the write.
+	int64 offset = 4;
+
+	// Total provides the current, expected total size of the write.
+	//
+	// We include this to provide consistency with the Status structure on the
+	// client writer.
+	//
+	// This is only valid on the Stat and Commit response.
+	int64 total = 5;
+
+	// Digest, if present, includes the digest up to the currently committed
+	// bytes. If action is commit, this field will be set. It is implementation
+	// defined if this is set for other actions.
+	string digest = 6;
+}
+
+message AbortRequest {
+	string ref = 1;
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,571 @@
+//go:build !no_grpc
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.2.0
+// - protoc             v3.20.1
+// source: github.com/containerd/containerd/api/services/content/v1/content.proto
+
+package content
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// ContentClient is the client API for Content service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ContentClient interface {
+	// Info returns information about a committed object.
+	//
+	// This call can be used for getting the size of content and checking for
+	// existence.
+	Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error)
+	// Update updates content metadata.
+	//
+	// This call can be used to manage the mutable content labels. The
+	// immutable metadata such as digest, size, and committed at cannot
+	// be updated.
+	Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error)
+	// List streams the entire set of content as Info objects and closes the
+	// stream.
+	//
+	// Typically, this will yield a large response, chunked into messages.
+	// Clients should make provisions to ensure they can handle the entire data
+	// set.
+	List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error)
+	// Delete will delete the referenced object.
+	Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+	// Read allows one to read an object based on the offset into the content.
+	//
+	// The requested data may be returned in one or more messages.
+	Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error)
+	// Status returns the status for a single reference.
+	Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+	// ListStatuses returns the status of ongoing object ingestions, started via
+	// Write.
+	//
+	// Only those matching the regular expression will be provided in the
+	// response. If the provided regular expression is empty, all ingestions
+	// will be provided.
+	ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error)
+	// Write begins or resumes writes to a resource identified by a unique ref.
+	// Only one active stream may exist at a time for each ref.
+	//
+	// Once a write stream has started, it may only write to a single ref, thus
+	// once a stream is started, the ref may be omitted on subsequent writes.
+	//
+	// For any write transaction represented by a ref, only a single write may
+	// be made to a given offset. If overlapping writes occur, it is an error.
+	// Writes should be sequential and implementations may throw an error if
+	// this is required.
+	//
+	// If expected_digest is set and already part of the content store, the
+	// write will fail.
+	//
+	// When completed, the commit flag should be set to true. If expected size
+	// or digest is set, the content will be validated against those values.
+	Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error)
+	// Abort cancels the ongoing write named in the request. Any resources
+	// associated with the write will be collected.
+	Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+}
+
+type contentClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewContentClient(cc grpc.ClientConnInterface) ContentClient {
+	return &contentClient{cc}
+}
+
+func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) {
+	out := new(InfoResponse)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) {
+	out := new(UpdateResponse)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) {
+	stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[0], "/containerd.services.content.v1.Content/List", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &contentListClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Content_ListClient interface {
+	Recv() (*ListContentResponse, error)
+	grpc.ClientStream
+}
+
+type contentListClient struct {
+	grpc.ClientStream
+}
+
+func (x *contentListClient) Recv() (*ListContentResponse, error) {
+	m := new(ListContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+	out := new(emptypb.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) {
+	stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[1], "/containerd.services.content.v1.Content/Read", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &contentReadClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Content_ReadClient interface {
+	Recv() (*ReadContentResponse, error)
+	grpc.ClientStream
+}
+
+type contentReadClient struct {
+	grpc.ClientStream
+}
+
+func (x *contentReadClient) Recv() (*ReadContentResponse, error) {
+	m := new(ReadContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+	out := new(StatusResponse)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) {
+	out := new(ListStatusesResponse)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) {
+	stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[2], "/containerd.services.content.v1.Content/Write", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &contentWriteClient{stream}
+	return x, nil
+}
+
+type Content_WriteClient interface {
+	Send(*WriteContentRequest) error
+	Recv() (*WriteContentResponse, error)
+	grpc.ClientStream
+}
+
+type contentWriteClient struct {
+	grpc.ClientStream
+}
+
+func (x *contentWriteClient) Send(m *WriteContentRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *contentWriteClient) Recv() (*WriteContentResponse, error) {
+	m := new(WriteContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+	out := new(emptypb.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ContentServer is the server API for Content service.
+// All implementations must embed UnimplementedContentServer
+// for forward compatibility
+type ContentServer interface {
+	// Info returns information about a committed object.
+	//
+	// This call can be used for getting the size of content and checking for
+	// existence.
+	Info(context.Context, *InfoRequest) (*InfoResponse, error)
+	// Update updates content metadata.
+	//
+	// This call can be used to manage the mutable content labels. The
+	// immutable metadata such as digest, size, and committed at cannot
+	// be updated.
+	Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
+	// List streams the entire set of content as Info objects and closes the
+	// stream.
+	//
+	// Typically, this will yield a large response, chunked into messages.
+	// Clients should make provisions to ensure they can handle the entire data
+	// set.
+	List(*ListContentRequest, Content_ListServer) error
+	// Delete will delete the referenced object.
+	Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error)
+	// Read allows one to read an object based on the offset into the content.
+	//
+	// The requested data may be returned in one or more messages.
+	Read(*ReadContentRequest, Content_ReadServer) error
+	// Status returns the status for a single reference.
+	Status(context.Context, *StatusRequest) (*StatusResponse, error)
+	// ListStatuses returns the status of ongoing object ingestions, started via
+	// Write.
+	//
+	// Only those matching the regular expression will be provided in the
+	// response. If the provided regular expression is empty, all ingestions
+	// will be provided.
+	ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error)
+	// Write begins or resumes writes to a resource identified by a unique ref.
+	// Only one active stream may exist at a time for each ref.
+	//
+	// Once a write stream has started, it may only write to a single ref, thus
+	// once a stream is started, the ref may be omitted on subsequent writes.
+	//
+	// For any write transaction represented by a ref, only a single write may
+	// be made to a given offset. If overlapping writes occur, it is an error.
+	// Writes should be sequential and implementations may throw an error if
+	// this is required.
+	//
+	// If expected_digest is set and already part of the content store, the
+	// write will fail.
+	//
+	// When completed, the commit flag should be set to true. If expected size
+	// or digest is set, the content will be validated against those values.
+	Write(Content_WriteServer) error
+	// Abort cancels the ongoing write named in the request. Any resources
+	// associated with the write will be collected.
+	Abort(context.Context, *AbortRequest) (*emptypb.Empty, error)
+	mustEmbedUnimplementedContentServer()
+}
+
+// UnimplementedContentServer must be embedded to have forward compatible implementations.
+type UnimplementedContentServer struct {
+}
+
+func (UnimplementedContentServer) Info(context.Context, *InfoRequest) (*InfoResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Info not implemented")
+}
+func (UnimplementedContentServer) Update(context.Context, *UpdateRequest) (*UpdateResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
+}
+func (UnimplementedContentServer) List(*ListContentRequest, Content_ListServer) error {
+	return status.Errorf(codes.Unimplemented, "method List not implemented")
+}
+func (UnimplementedContentServer) Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
+}
+func (UnimplementedContentServer) Read(*ReadContentRequest, Content_ReadServer) error {
+	return status.Errorf(codes.Unimplemented, "method Read not implemented")
+}
+func (UnimplementedContentServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (UnimplementedContentServer) ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListStatuses not implemented")
+}
+func (UnimplementedContentServer) Write(Content_WriteServer) error {
+	return status.Errorf(codes.Unimplemented, "method Write not implemented")
+}
+func (UnimplementedContentServer) Abort(context.Context, *AbortRequest) (*emptypb.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Abort not implemented")
+}
+func (UnimplementedContentServer) mustEmbedUnimplementedContentServer() {}
+
+// UnsafeContentServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ContentServer will
+// result in compilation errors.
+type UnsafeContentServer interface {
+	mustEmbedUnimplementedContentServer()
+}
+
+func RegisterContentServer(s grpc.ServiceRegistrar, srv ContentServer) {
+	s.RegisterService(&Content_ServiceDesc, srv)
+}
+
+func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(InfoRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Info(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Info",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Info(ctx, req.(*InfoRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Update(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Update",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Update(ctx, req.(*UpdateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(ListContentRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(ContentServer).List(m, &contentListServer{stream})
+}
+
+type Content_ListServer interface {
+	Send(*ListContentResponse) error
+	grpc.ServerStream
+}
+
+type contentListServer struct {
+	grpc.ServerStream
+}
+
+func (x *contentListServer) Send(m *ListContentResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteContentRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Delete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Delete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(ReadContentRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(ContentServer).Read(m, &contentReadServer{stream})
+}
+
+type Content_ReadServer interface {
+	Send(*ReadContentResponse) error
+	grpc.ServerStream
+}
+
+type contentReadServer struct {
+	grpc.ServerStream
+}
+
+func (x *contentReadServer) Send(m *ReadContentResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StatusRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Status(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Status",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Status(ctx, req.(*StatusRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListStatusesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).ListStatuses(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/ListStatuses",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(ContentServer).Write(&contentWriteServer{stream})
+}
+
+type Content_WriteServer interface {
+	Send(*WriteContentResponse) error
+	Recv() (*WriteContentRequest, error)
+	grpc.ServerStream
+}
+
+type contentWriteServer struct {
+	grpc.ServerStream
+}
+
+func (x *contentWriteServer) Send(m *WriteContentResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *contentWriteServer) Recv() (*WriteContentRequest, error) {
+	m := new(WriteContentRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AbortRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Abort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Abort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Abort(ctx, req.(*AbortRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+// Content_ServiceDesc is the grpc.ServiceDesc for Content service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Content_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.content.v1.Content",
+	HandlerType: (*ContentServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Info",
+			Handler:    _Content_Info_Handler,
+		},
+		{
+			MethodName: "Update",
+			Handler:    _Content_Update_Handler,
+		},
+		{
+			MethodName: "Delete",
+			Handler:    _Content_Delete_Handler,
+		},
+		{
+			MethodName: "Status",
+			Handler:    _Content_Status_Handler,
+		},
+		{
+			MethodName: "ListStatuses",
+			Handler:    _Content_ListStatuses_Handler,
+		},
+		{
+			MethodName: "Abort",
+			Handler:    _Content_Abort_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "List",
+			Handler:       _Content_List_Handler,
+			ServerStreams: true,
+		},
+		{
+			StreamName:    "Read",
+			Handler:       _Content_Read_Handler,
+			ServerStreams: true,
+		},
+		{
+			StreamName:    "Write",
+			Handler:       _Content_Write_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto",
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,311 @@
+// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/content/v1/content.proto
+package content
+
+import (
+	context "context"
+	ttrpc "github.com/containerd/ttrpc"
+	emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+type TTRPCContentService interface {
+	Info(context.Context, *InfoRequest) (*InfoResponse, error)
+	Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
+	List(context.Context, *ListContentRequest, TTRPCContent_ListServer) error
+	Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error)
+	Read(context.Context, *ReadContentRequest, TTRPCContent_ReadServer) error
+	Status(context.Context, *StatusRequest) (*StatusResponse, error)
+	ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error)
+	Write(context.Context, TTRPCContent_WriteServer) error
+	Abort(context.Context, *AbortRequest) (*emptypb.Empty, error)
+}
+
+type TTRPCContent_ListServer interface {
+	Send(*ListContentResponse) error
+	ttrpc.StreamServer
+}
+
+type ttrpccontentListServer struct {
+	ttrpc.StreamServer
+}
+
+func (x *ttrpccontentListServer) Send(m *ListContentResponse) error {
+	return x.StreamServer.SendMsg(m)
+}
+
+type TTRPCContent_ReadServer interface {
+	Send(*ReadContentResponse) error
+	ttrpc.StreamServer
+}
+
+type ttrpccontentReadServer struct {
+	ttrpc.StreamServer
+}
+
+func (x *ttrpccontentReadServer) Send(m *ReadContentResponse) error {
+	return x.StreamServer.SendMsg(m)
+}
+
+type TTRPCContent_WriteServer interface {
+	Send(*WriteContentResponse) error
+	Recv() (*WriteContentRequest, error)
+	ttrpc.StreamServer
+}
+
+type ttrpccontentWriteServer struct {
+	ttrpc.StreamServer
+}
+
+func (x *ttrpccontentWriteServer) Send(m *WriteContentResponse) error {
+	return x.StreamServer.SendMsg(m)
+}
+
+func (x *ttrpccontentWriteServer) Recv() (*WriteContentRequest, error) {
+	m := new(WriteContentRequest)
+	if err := x.StreamServer.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func RegisterTTRPCContentService(srv *ttrpc.Server, svc TTRPCContentService) {
+	srv.RegisterService("containerd.services.content.v1.Content", &ttrpc.ServiceDesc{
+		Methods: map[string]ttrpc.Method{
+			"Info": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+				var req InfoRequest
+				if err := unmarshal(&req); err != nil {
+					return nil, err
+				}
+				return svc.Info(ctx, &req)
+			},
+			"Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+				var req UpdateRequest
+				if err := unmarshal(&req); err != nil {
+					return nil, err
+				}
+				return svc.Update(ctx, &req)
+			},
+			"Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+				var req DeleteContentRequest
+				if err := unmarshal(&req); err != nil {
+					return nil, err
+				}
+				return svc.Delete(ctx, &req)
+			},
+			"Status": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+				var req StatusRequest
+				if err := unmarshal(&req); err != nil {
+					return nil, err
+				}
+				return svc.Status(ctx, &req)
+			},
+			"ListStatuses": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+				var req ListStatusesRequest
+				if err := unmarshal(&req); err != nil {
+					return nil, err
+				}
+				return svc.ListStatuses(ctx, &req)
+			},
+			"Abort": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+				var req AbortRequest
+				if err := unmarshal(&req); err != nil {
+					return nil, err
+				}
+				return svc.Abort(ctx, &req)
+			},
+		},
+		Streams: map[string]ttrpc.Stream{
+			"List": {
+				Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+					m := new(ListContentRequest)
+					if err := stream.RecvMsg(m); err != nil {
+						return nil, err
+					}
+					return nil, svc.List(ctx, m, &ttrpccontentListServer{stream})
+				},
+				StreamingClient: false,
+				StreamingServer: true,
+			},
+			"Read": {
+				Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+					m := new(ReadContentRequest)
+					if err := stream.RecvMsg(m); err != nil {
+						return nil, err
+					}
+					return nil, svc.Read(ctx, m, &ttrpccontentReadServer{stream})
+				},
+				StreamingClient: false,
+				StreamingServer: true,
+			},
+			"Write": {
+				Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
+					return nil, svc.Write(ctx, &ttrpccontentWriteServer{stream})
+				},
+				StreamingClient: true,
+				StreamingServer: true,
+			},
+		},
+	})
+}
+
+type TTRPCContentClient interface {
+	Info(context.Context, *InfoRequest) (*InfoResponse, error)
+	Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
+	List(context.Context, *ListContentRequest) (TTRPCContent_ListClient, error)
+	Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error)
+	Read(context.Context, *ReadContentRequest) (TTRPCContent_ReadClient, error)
+	Status(context.Context, *StatusRequest) (*StatusResponse, error)
+	ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error)
+	Write(context.Context) (TTRPCContent_WriteClient, error)
+	Abort(context.Context, *AbortRequest) (*emptypb.Empty, error)
+}
+
+type ttrpccontentClient struct {
+	client *ttrpc.Client
+}
+
+func NewTTRPCContentClient(client *ttrpc.Client) TTRPCContentClient {
+	return &ttrpccontentClient{
+		client: client,
+	}
+}
+
+func (c *ttrpccontentClient) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) {
+	var resp InfoResponse
+	if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Info", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *ttrpccontentClient) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) {
+	var resp UpdateResponse
+	if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Update", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *ttrpccontentClient) List(ctx context.Context, req *ListContentRequest) (TTRPCContent_ListClient, error) {
+	stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+		StreamingClient: false,
+		StreamingServer: true,
+	}, "containerd.services.content.v1.Content", "List", req)
+	if err != nil {
+		return nil, err
+	}
+	x := &ttrpccontentListClient{stream}
+	return x, nil
+}
+
+type TTRPCContent_ListClient interface {
+	Recv() (*ListContentResponse, error)
+	ttrpc.ClientStream
+}
+
+type ttrpccontentListClient struct {
+	ttrpc.ClientStream
+}
+
+func (x *ttrpccontentListClient) Recv() (*ListContentResponse, error) {
+	m := new(ListContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *ttrpccontentClient) Delete(ctx context.Context, req *DeleteContentRequest) (*emptypb.Empty, error) {
+	var resp emptypb.Empty
+	if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Delete", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *ttrpccontentClient) Read(ctx context.Context, req *ReadContentRequest) (TTRPCContent_ReadClient, error) {
+	stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+		StreamingClient: false,
+		StreamingServer: true,
+	}, "containerd.services.content.v1.Content", "Read", req)
+	if err != nil {
+		return nil, err
+	}
+	x := &ttrpccontentReadClient{stream}
+	return x, nil
+}
+
+type TTRPCContent_ReadClient interface {
+	Recv() (*ReadContentResponse, error)
+	ttrpc.ClientStream
+}
+
+type ttrpccontentReadClient struct {
+	ttrpc.ClientStream
+}
+
+func (x *ttrpccontentReadClient) Recv() (*ReadContentResponse, error) {
+	m := new(ReadContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *ttrpccontentClient) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
+	var resp StatusResponse
+	if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Status", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *ttrpccontentClient) ListStatuses(ctx context.Context, req *ListStatusesRequest) (*ListStatusesResponse, error) {
+	var resp ListStatusesResponse
+	if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "ListStatuses", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *ttrpccontentClient) Write(ctx context.Context) (TTRPCContent_WriteClient, error) {
+	stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
+		StreamingClient: true,
+		StreamingServer: true,
+	}, "containerd.services.content.v1.Content", "Write", nil)
+	if err != nil {
+		return nil, err
+	}
+	x := &ttrpccontentWriteClient{stream}
+	return x, nil
+}
+
+type TTRPCContent_WriteClient interface {
+	Send(*WriteContentRequest) error
+	Recv() (*WriteContentResponse, error)
+	ttrpc.ClientStream
+}
+
+type ttrpccontentWriteClient struct {
+	ttrpc.ClientStream
+}
+
+func (x *ttrpccontentWriteClient) Send(m *WriteContentRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *ttrpccontentWriteClient) Recv() (*WriteContentResponse, error) {
+	m := new(WriteContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *ttrpccontentClient) Abort(ctx context.Context, req *AbortRequest) (*emptypb.Empty, error) {
+	var resp emptypb.Empty
+	if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Abort", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/doc.go 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/doc.go
--- 0.19.3+ds1-4/vendor/github.com/containerd/containerd/api/services/content/v1/doc.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/api/services/content/v1/doc.go	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,17 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package content
diff -pruN 0.19.3+ds1-4/vendor/github.com/containerd/containerd/v2/LICENSE 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/v2/LICENSE
--- 0.19.3+ds1-4/vendor/github.com/containerd/containerd/v2/LICENSE	1970-01-01 00:00:00.000000000 +0000
+++ 0.21.3-0ubuntu1/vendor/github.com/containerd/containerd/v2/LICENSE	2025-03-17 16:14:25.000000000 +0000
@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        https://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, M