diff -pruN 0.2.1-1.1/.circleci/config.yml 0.4.49+ds1-1/.circleci/config.yml
--- 0.2.1-1.1/.circleci/config.yml	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/.circleci/config.yml	2025-08-21 19:15:53.000000000 +0000
@@ -1,25 +1,202 @@
 version: 2
 jobs:
-  build:
-    working_directory: /go/src/github.com/segmentio/kafka-go
+  lint:
     docker:
-      - image: circleci/golang
-      - image: wurstmeister/zookeeper
-        ports: ['2181:2181']
-      - image: wurstmeister/kafka:0.11.0.1
-        ports: ['9092:9092']
-        environment:
-          KAFKA_VERSION: '0.11.0.1'
-          KAFKA_BROKER_ID: '1'
-          KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1'
-          KAFKA_DELETE_TOPIC_ENABLE: 'true'
-          KAFKA_ADVERTISED_HOST_NAME: 'localhost'
-          KAFKA_ADVERTISED_PORT: '9092'
-          KAFKA_ZOOKEEPER_CONNECT: 'localhost:2181'
-          KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
+      - image: golangci/golangci-lint:v1.45-alpine
     steps:
       - checkout
-      - setup_remote_docker: { reusable: true, docker_layer_caching: true }
-      - run: go get -v -t ./...
-      - run: go vet ./...
-      - run: go test -v -race ./...
+      - run: golangci-lint run
+
+  kafka-270:
+    working_directory: &working_directory /go/src/github.com/segmentio/kafka-go
+    environment:
+      KAFKA_VERSION: "2.7.0"
+
+      # Need to skip nettest to avoid these kinds of errors:
+      #  --- FAIL: TestConn/nettest (17.56s)
+      #    --- FAIL: TestConn/nettest/PingPong (7.40s)
+      #      conntest.go:112: unexpected Read error: [7] Request Timed Out: the request exceeded the user-specified time limit in the request
+      #      conntest.go:118: mismatching value: got 77, want 78
+      #      conntest.go:118: mismatching value: got 78, want 79
+      # ...
+      #
+      # TODO: Figure out why these are happening and fix them (they don't appear to be new).
+      KAFKA_SKIP_NETTEST: "1"
+    docker:
+    - image: circleci/golang
+    - image: bitnamilegacy/zookeeper:latest
+      ports:
+      - 2181:2181
+      environment:
+        ALLOW_ANONYMOUS_LOGIN: yes
+    - image: bitnamilegacy/kafka:2.7.0
+      ports:
+      - 9092:9092
+      - 9093:9093
+      environment: &environment
+        KAFKA_CFG_BROKER_ID: 1
+        KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+        KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost'
+        KAFKA_CFG_ADVERTISED_PORT: '9092'
+        KAFKA_CFG_ZOOKEEPER_CONNECT: localhost:2181
+        KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
+        KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000'
+        KAFKA_CFG_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093'
+        KAFKA_CFG_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093'
+        KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512'
+        KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'kafka.security.auth.SimpleAclAuthorizer'
+        KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
+        KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf"
+        ALLOW_PLAINTEXT_LISTENER: yes
+      entrypoint: &entrypoint
+        - "/bin/bash"
+        - "-c"
+        - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n  };' > /opt/bitnami/kafka/config/kafka_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper localhost:2181 --alter --add-config "SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]" --entity-type users --entity-name adminscram; exec /entrypoint.sh /run.sh
+    steps: &steps
+    - checkout
+    - restore_cache:
+        key: kafka-go-mod-{{ checksum "go.sum" }}-1
+    - run:
+        name: Download dependencies
+        command: go mod download
+    - save_cache:
+        key: kafka-go-mod-{{ checksum "go.sum" }}-1
+        paths:
+        - /go/pkg/mod
+    - run:
+        name: Wait for kafka
+        command: ./scripts/wait-for-kafka.sh
+    - run:
+        name: Test kafka-go
+        command: go test -race -cover ./...
+    - run:
+        name: Test kafka-go unsafe
+        command: go test -tags=unsafe -race -cover ./...
+    - run:
+        name: Test kafka-go/sasl/aws_msk_iam
+        working_directory: ./sasl/aws_msk_iam
+        command: go test -race -cover ./...
+
+  kafka-281:
+    working_directory: *working_directory
+    environment:
+      KAFKA_VERSION: "2.8.1"
+
+      # Need to skip nettest to avoid these kinds of errors:
+      #  --- FAIL: TestConn/nettest (17.56s)
+      #    --- FAIL: TestConn/nettest/PingPong (7.40s)
+      #      conntest.go:112: unexpected Read error: [7] Request Timed Out: the request exceeded the user-specified time limit in the request
+      #      conntest.go:118: mismatching value: got 77, want 78
+      #      conntest.go:118: mismatching value: got 78, want 79
+      # ...
+      #
+      # TODO: Figure out why these are happening and fix them (they don't appear to be new).
+      KAFKA_SKIP_NETTEST: "1"
+    docker:
+    - image: circleci/golang
+    - image: bitnamilegacy/zookeeper:latest
+      ports:
+      - 2181:2181
+      environment:
+        ALLOW_ANONYMOUS_LOGIN: yes
+    - image: bitnamilegacy/kafka:2.8.1
+      ports:
+      - 9092:9092
+      - 9093:9093
+      environment: *environment
+      entrypoint: *entrypoint
+    steps: *steps
+
+  kafka-370:
+    working_directory: *working_directory
+    environment:
+      KAFKA_VERSION: "3.7.0"
+
+      # Need to skip nettest to avoid these kinds of errors:
+      #  --- FAIL: TestConn/nettest (17.56s)
+      #    --- FAIL: TestConn/nettest/PingPong (7.40s)
+      #      conntest.go:112: unexpected Read error: [7] Request Timed Out: the request exceeded the user-specified time limit in the request
+      #      conntest.go:118: mismatching value: got 77, want 78
+      #      conntest.go:118: mismatching value: got 78, want 79
+      # ...
+      #
+      # TODO: Figure out why these are happening and fix them (they don't appear to be new).
+      KAFKA_SKIP_NETTEST: "1"
+    docker:
+    - image: circleci/golang
+    - image: bitnamilegacy/zookeeper:latest
+      ports:
+      - 2181:2181
+      environment:
+        ALLOW_ANONYMOUS_LOGIN: yes
+    - image: bitnamilegacy/kafka:3.7.0
+      ports:
+        - 9092:9092
+        - 9093:9093
+      environment:
+        <<: *environment
+        KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'kafka.security.authorizer.AclAuthorizer'
+      entrypoint: *entrypoint
+    steps: *steps
+
+  # NOTE: this fails quite often due to Java heap errors from Kafka.
+  # Once we figure out how to fix that, we can re-enable this.
+  # https://github.com/segmentio/kafka-go/issues/1360#issuecomment-2858935900
+  # kafka-400:
+  #   working_directory: *working_directory
+  #   environment:
+  #     KAFKA_VERSION: "4.0.0"
+
+  #     # Need to skip nettest to avoid these kinds of errors:
+  #     #  --- FAIL: TestConn/nettest (17.56s)
+  #     #    --- FAIL: TestConn/nettest/PingPong (7.40s)
+  #     #      conntest.go:112: unexpected Read error: [7] Request Timed Out: the request exceeded the user-specified time limit in the request
+  #     #      conntest.go:118: mismatching value: got 77, want 78
+  #     #      conntest.go:118: mismatching value: got 78, want 79
+  #     # ...
+  #     #
+  #     # TODO: Figure out why these are happening and fix them (they don't appear to be new).
+  #     KAFKA_SKIP_NETTEST: "1"
+  #   docker:
+  #   - image: circleci/golang
+  #   - image: bitnamilegacy/kafka:4.0.0
+  #     ports:
+  #       - 9092:9092
+  #       - 9093:9093
+  #     environment:
+  #       KAFKA_CFG_NODE_ID: 1
+  #       KAFKA_CFG_BROKER_ID: 1
+  #       KAFKA_CFG_PROCESS_ROLES: broker,controller
+  #       KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost'
+  #       KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
+  #       KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAIN:PLAINTEXT,SASL:SASL_PLAINTEXT
+  #       KAFKA_CFG_LISTENERS: CONTROLLER://:9094,PLAIN://:9092,SASL://:9093
+  #       KAFKA_CFG_ADVERTISED_LISTENERS: PLAIN://localhost:9092,SASL://localhost:9093
+  #       KAFKA_CFG_INTER_BROKER_LISTENER_NAME: PLAIN
+  #       KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512'
+  #       KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 1@localhost:9094
+  #       ALLOW_PLAINTEXT_LISTENER: yes
+  #       KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
+  #       KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf"
+  #       KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
+  #       KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+  #       KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000'
+  #       KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'org.apache.kafka.metadata.authorizer.StandardAuthorizer'
+  #       KAFKA_CFG_SUPER_USERS: User:adminscram256;User:adminscram512;User:adminplain
+  #       KAFKA_CLIENT_USERS: adminscram256,adminscram512,adminplain
+  #       KAFKA_CLIENT_PASSWORDS: admin-secret-256,admin-secret-512,admin-secret
+  #       KAFKA_CLIENT_SASL_MECHANISMS: SCRAM-SHA-256,SCRAM-SHA-512,PLAIN
+  #       KAFKA_INTER_BROKER_USER: adminscram512
+  #       KAFKA_INTER_BROKER_PASSWORD: admin-secret-512
+  #       KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: SCRAM-SHA-512
+  #   steps: *steps
+
+workflows:
+  version: 2
+  run:
+    jobs:
+    - lint
+    - kafka-270
+    - kafka-281
+    - kafka-370 
+    #- kafka-400
diff -pruN 0.2.1-1.1/.gitattributes 0.4.49+ds1-1/.gitattributes
--- 0.2.1-1.1/.gitattributes	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/.gitattributes	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+fixtures/*.hex binary
diff -pruN 0.2.1-1.1/.github/ISSUE_TEMPLATE/bug_report.md 0.4.49+ds1-1/.github/ISSUE_TEMPLATE/bug_report.md
--- 0.2.1-1.1/.github/ISSUE_TEMPLATE/bug_report.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/.github/ISSUE_TEMPLATE/bug_report.md	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,66 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: bug
+assignees: ''
+
+---
+
+**Describe the bug**
+
+> A clear and concise description of what the bug is.
+
+**Kafka Version**
+
+> * What version(s) of Kafka are you testing against?
+> * What version of kafka-go are you using?
+
+**To Reproduce**
+
+> Resources to reproduce the behavior:
+
+```yaml
+---
+# docker-compose.yaml
+#
+# Adding a docker-compose file will help the maintainers setup the environment
+# to reproduce the issue.
+#
+# If one the docker-compose files available in the repository may be used,
+# mentioning it is also a useful alternative.
+...
+```
+
+```go
+package main
+
+import (
+    "github.com/segmentio/kafka-go"
+)
+
+func main() {
+    // Adding a fully reproducible example will help maintainers provide
+    // assistance to debug the issues.
+    ...
+}
+```
+
+**Expected Behavior**
+
+> A clear and concise description of what you expected to happen.
+
+**Observed Behavior**
+
+> A clear and concise description of the behavior you observed.
+
+```
+Often times, pasting the logging output from a kafka.Reader or kafka.Writer will
+provide useful details to help maintainers investigate the issue and provide a
+fix. If possible, providing stack traces or CPU/memory profiles may also contain
+valuable information to understand the conditions that triggered the issue.
+```
+
+**Additional Context**
+
+> Add any other context about the problem here.
diff -pruN 0.2.1-1.1/.github/ISSUE_TEMPLATE/feature_request.md 0.4.49+ds1-1/.github/ISSUE_TEMPLATE/feature_request.md
--- 0.2.1-1.1/.github/ISSUE_TEMPLATE/feature_request.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/.github/ISSUE_TEMPLATE/feature_request.md	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,16 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: enhancement
+assignees: ''
+
+---
+
+**Describe the solution you would like**
+
+> A clear and concise description of what you want to happen.
+
+**Supporting documentation**
+
+> Please provides links to relevant Kafka protocol docs and/or KIPs.
diff -pruN 0.2.1-1.1/.gitignore 0.4.49+ds1-1/.gitignore
--- 0.2.1-1.1/.gitignore	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/.gitignore	2025-08-21 19:15:53.000000000 +0000
@@ -27,8 +27,14 @@ _testmain.go
 # Emacs
 *~
 
+# VIM
+*.swp
+
 # Goland
 .idea
 
+#IntelliJ
+*.iml
+
 # govendor
 /vendor/*/
diff -pruN 0.2.1-1.1/.golangci.yml 0.4.49+ds1-1/.golangci.yml
--- 0.2.1-1.1/.golangci.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/.golangci.yml	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,18 @@
+linters:
+  enable:
+    - bodyclose
+    - errorlint
+    - goconst
+    - godot
+    - gofmt
+    - goimports
+    - prealloc
+
+  disable:
+    # Temporarily disabling so it can be addressed in a dedicated PR.
+    - errcheck
+    - goerr113
+
+linters-settings:
+  goconst:
+    ignore-tests: true
diff -pruN 0.2.1-1.1/CODE_OF_CONDUCT.md 0.4.49+ds1-1/CODE_OF_CONDUCT.md
--- 0.2.1-1.1/CODE_OF_CONDUCT.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/CODE_OF_CONDUCT.md	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,75 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+- Using welcoming and inclusive language
+- Being respectful of differing viewpoints and experiences
+- Gracefully accepting constructive criticism
+- Focusing on what is best for the community
+- Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+- The use of sexualized language or imagery and unwelcome sexual attention or
+  advances
+- Trolling, insulting/derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or electronic
+  address, without explicit permission
+- Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+Project maintainers are available at [#kafka-go](https://gophers.slack.com/archives/CG4H0N9PX) channel inside the [Gophers Slack](https://gophers.slack.com)
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at open-source@twilio.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
diff -pruN 0.2.1-1.1/CONTRIBUTING.md 0.4.49+ds1-1/CONTRIBUTING.md
--- 0.2.1-1.1/CONTRIBUTING.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/CONTRIBUTING.md	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,139 @@
+# Contributing to kafka-go
+
+kafka-go is an open source project.  We welcome contributions to kafka-go of any kind including documentation,
+organization, tutorials, bug reports, issues, feature requests, feature implementations, pull requests, etc.
+
+## Table of Contents
+
+* [Reporting Issues](#reporting-issues)
+* [Submitting Patches](#submitting-patches)
+  * [Code Contribution Guidelines](#code-contribution-guidelines)
+  * [Git Commit Message Guidelines](#git-commit-message-guidelines)
+  * [Fetching the Source From GitHub](#fetching-the-sources-from-github)
+  * [Building kafka-go with Your Changes](#building-kakfa-go-with-your-changes)
+
+## Reporting Issues
+
+If you believe you have found a defect in kafka-go, use the GitHub issue tracker to report
+the problem to the maintainers.  
+When reporting the issue, please provide the version of kafka-go, what version(s) of Kafka 
+are you testing against, and your operating system.
+
+ - [kafka-go Issues segmentio/kafka-go](https://github.com/segmentio/kafka-go/issues)
+
+## Submitting Patches
+
+kafka-go project welcomes all contributors and contributions regardless of skill or experience levels.  If you are
+interested in helping with the project, we will help you with your contribution.
+
+### Code Contribution
+
+To make contributions as seamless as possible, we ask the following:
+
+* Go ahead and fork the project and make your changes.  We encourage pull requests to allow for review and discussion of code changes.
+* When you’re ready to create a pull request, be sure to:
+    * Have test cases for the new code. If you have questions about how to do this, please ask in your pull request.
+    * Run `go fmt`.
+    * Squash your commits into a single commit. `git rebase -i`. It’s okay to force update your pull request with `git push -f`.
+    * Follow the **Git Commit Message Guidelines** below.
+
+### Git Commit Message Guidelines
+
+This [blog article](http://chris.beams.io/posts/git-commit/) is a good resource for learning how to write good commit messages,
+the most important part being that each commit message should have a title/subject in imperative mood starting with a capital letter and no trailing period:
+*"Return error on wrong use of the Reader"*, **NOT** *"returning some error."*
+
+Also, if your commit references one or more GitHub issues, always end your commit message body with *See #1234* or *Fixes #1234*.
+Replace *1234* with the GitHub issue ID. The last example will close the issue when the commit is merged into *master*.
+
+Please use a short and descriptive branch name, e.g. NOT "patch-1". It's very common but creates a naming conflict each
+time when a submission is pulled for a review.
+
+An example:
+
+```text
+Add Code of Conduct and Code Contribution Guidelines
+
+Add a full Code of Conduct and Code Contribution Guidelines document. 
+Provide description on how best to retrieve code, fork, checkout, and commit changes.
+
+Fixes #688
+```
+
+### Fetching the Sources From GitHub
+
+We use Go Modules support built into Go 1.11 to build.  The easiest way is to clone kafka-go into a directory outside of
+`GOPATH`, as in the following example:
+
+```bash
+mkdir $HOME/src
+cd $HOME/src
+git clone https://github.com/segmentio/kafka-go.git
+cd kafka-go
+go build ./...
+```
+
+To make changes to kafka-go's source:
+
+1. Create a new branch for your changes (the branch name is arbitrary):
+
+    ```bash
+    git checkout -b branch1234
+    ```
+
+1. After making your changes, commit them to your new branch:
+
+    ```bash
+    git commit -a -v
+    ```
+
+1. Fork kafka-go in GitHub
+
+1. Add your fork as a new remote (the remote name, "upstream" in this example, is arbitrary):
+
+    ```bash
+    git remote add upstream git@github.com:USERNAME/kafka-go.git
+    ```
+
+1. Push your branch (the remote name, "upstream" in this example, is arbitrary):
+
+   ```bash
+   git push upstream  
+   ```
+
+1. You are now ready to submit a PR based upon the new branch in your forked repository.
+
+### Using the forked library
+
+To replace the original version of kafka-go library with a forked version is accomplished this way.
+
+1. Make sure your application already has a go.mod entry depending on kafka-go
+
+    ```bash
+    module github.com/myusername/myapp
+
+    require (
+        ...
+        github.com/segmentio/kafka-go v1.2.3
+        ...
+    )
+    ```
+
+1. Add the following entry to the beginning of the modules file.
+
+    ```bash
+    module github.com/myusername/myapp
+
+    replace github.com/segmentio/kafka-go v1.2.3 => ../local/directory
+
+    require (
+        ...
+        github.com/segmentio/kafka-go v1.2.3
+        ...
+    )
+    ```
+1. Depending on if you are using `vendor`ing or not you might need to run the following command to pull in the new bits.
+
+    ```bash
+    > go mod vendor
+    ```
diff -pruN 0.2.1-1.1/Makefile 0.4.49+ds1-1/Makefile
--- 0.2.1-1.1/Makefile	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/Makefile	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,7 @@
+test:
+	KAFKA_SKIP_NETTEST=1 \
+	KAFKA_VERSION=2.3.1 \
+	go test -race -cover ./...
+
+docker:
+	docker compose up -d
diff -pruN 0.2.1-1.1/README.md 0.4.49+ds1-1/README.md
--- 0.2.1-1.1/README.md	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/README.md	2025-08-21 19:15:53.000000000 +0000
@@ -30,6 +30,22 @@ APIs for interacting with Kafka, mirrori
 the Go standard library to make it easy to use and integrate with existing
 software.
 
+#### Note:
+
+In order to better align with our newly adopted Code of Conduct, the kafka-go
+project has renamed our default branch to `main`. For the full details of our
+Code Of Conduct see [this](./CODE_OF_CONDUCT.md) document.
+
+## Kafka versions
+
+`kafka-go` is currently tested with Kafka versions 0.10.1.0 to 2.7.1.
+While it should also be compatible with later versions, newer features available
+in the Kafka API may not yet be implemented in the client.
+
+## Go versions
+
+`kafka-go` requires Go version 1.15 or later.
+
 ## Connection [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Conn)
 
 The `Conn` type is the core of the `kafka-go` package. It wraps around a raw
@@ -41,40 +57,147 @@ Here are some examples showing typical u
 topic := "my-topic"
 partition := 0
 
-conn, _ := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition)
+conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition)
+if err != nil {
+    log.Fatal("failed to dial leader:", err)
+}
 
 conn.SetWriteDeadline(time.Now().Add(10*time.Second))
-conn.WriteMessages(
+_, err = conn.WriteMessages(
     kafka.Message{Value: []byte("one!")},
     kafka.Message{Value: []byte("two!")},
     kafka.Message{Value: []byte("three!")},
 )
+if err != nil {
+    log.Fatal("failed to write messages:", err)
+}
 
-conn.Close()
+if err := conn.Close(); err != nil {
+    log.Fatal("failed to close writer:", err)
+}
 ```
 ```go
 // to consume messages
 topic := "my-topic"
 partition := 0
 
-conn, _ := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition)
+conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition)
+if err != nil {
+    log.Fatal("failed to dial leader:", err)
+}
 
 conn.SetReadDeadline(time.Now().Add(10*time.Second))
 batch := conn.ReadBatch(10e3, 1e6) // fetch 10KB min, 1MB max
 
 b := make([]byte, 10e3) // 10KB max per message
 for {
-    _, err := batch.Read(b)
+    n, err := batch.Read(b)
     if err != nil {
         break
     }
-    fmt.Println(string(b))
+    fmt.Println(string(b[:n]))
+}
+
+if err := batch.Close(); err != nil {
+    log.Fatal("failed to close batch:", err)
 }
 
-batch.Close()
-conn.Close()
+if err := conn.Close(); err != nil {
+    log.Fatal("failed to close connection:", err)
+}
+```
+
+### To Create Topics
+By default kafka has the `auto.create.topics.enable='true'` (`KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE='true'` in the bitnami/kafka kafka docker image). If this value is set to `'true'` then topics will be created as a side effect of `kafka.DialLeader` like so:
+```go
+// to create topics when auto.create.topics.enable='true'
+conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", "my-topic", 0)
+if err != nil {
+    panic(err.Error())
+}
+```
+
+If `auto.create.topics.enable='false'` then you will need to create topics explicitly like so:
+```go
+// to create topics when auto.create.topics.enable='false'
+topic := "my-topic"
+
+conn, err := kafka.Dial("tcp", "localhost:9092")
+if err != nil {
+    panic(err.Error())
+}
+defer conn.Close()
+
+controller, err := conn.Controller()
+if err != nil {
+    panic(err.Error())
+}
+var controllerConn *kafka.Conn
+controllerConn, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+if err != nil {
+    panic(err.Error())
+}
+defer controllerConn.Close()
+
+
+topicConfigs := []kafka.TopicConfig{
+    {
+        Topic:             topic,
+        NumPartitions:     1,
+        ReplicationFactor: 1,
+    },
+}
+
+err = controllerConn.CreateTopics(topicConfigs...)
+if err != nil {
+    panic(err.Error())
+}
 ```
 
+### To Connect To Leader Via a Non-leader Connection
+```go
+// to connect to the kafka leader via an existing non-leader connection rather than using DialLeader
+conn, err := kafka.Dial("tcp", "localhost:9092")
+if err != nil {
+    panic(err.Error())
+}
+defer conn.Close()
+controller, err := conn.Controller()
+if err != nil {
+    panic(err.Error())
+}
+var connLeader *kafka.Conn
+connLeader, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+if err != nil {
+    panic(err.Error())
+}
+defer connLeader.Close()
+```
+
+### To list topics
+```go
+conn, err := kafka.Dial("tcp", "localhost:9092")
+if err != nil {
+    panic(err.Error())
+}
+defer conn.Close()
+
+partitions, err := conn.ReadPartitions()
+if err != nil {
+    panic(err.Error())
+}
+
+m := map[string]struct{}{}
+
+for _, p := range partitions {
+    m[p.Topic] = struct{}{}
+}
+for k := range m {
+    fmt.Println(k)
+}
+```
+
+
 Because it is low level, the `Conn` type turns out to be a great building block
 for higher level abstractions, like the `Reader` for example.
 
@@ -87,13 +210,21 @@ A `Reader` also automatically handles re
 exposes an API that supports asynchronous cancellations and timeouts using Go
 contexts.
 
+Note that it is important to call `Close()` on a `Reader` when a process exits.
+The kafka server needs a graceful disconnect to stop it from continuing to
+attempt to send messages to the connected clients. The given example will not
+call `Close()` if the process is terminated with SIGINT (ctrl-c at the shell) or
+SIGTERM (as docker stop or a kubernetes restart does). This can result in a
+delay when a new reader on the same topic connects (e.g. new process started
+or new container running). Use a `signal.Notify` handler to close the reader on
+process shutdown.
+
 ```go
 // make a new reader that consumes from topic-A, partition 0, at offset 42
 r := kafka.NewReader(kafka.ReaderConfig{
-    Brokers:   []string{"localhost:9092"},
+    Brokers:   []string{"localhost:9092","localhost:9093", "localhost:9094"},
     Topic:     "topic-A",
     Partition: 0,
-    MinBytes:  10e3, // 10KB
     MaxBytes:  10e6, // 10MB
 })
 r.SetOffset(42)
@@ -106,23 +237,24 @@ for {
     fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value))
 }
 
-r.Close()
+if err := r.Close(); err != nil {
+    log.Fatal("failed to close reader:", err)
+}
 ```
 
 ### Consumer Groups
 
 ```kafka-go``` also supports Kafka consumer groups including broker managed offsets.
-To enable consumer groups, simplify specify the GroupID in the ReaderConfig.
+To enable consumer groups, simply specify the GroupID in the ReaderConfig.
 
 ReadMessage automatically commits offsets when using consumer groups.
 
 ```go
 // make a new reader that consumes from topic-A
 r := kafka.NewReader(kafka.ReaderConfig{
-    Brokers:   []string{"localhost:9092"},
+    Brokers:   []string{"localhost:9092", "localhost:9093", "localhost:9094"},
     GroupID:   "consumer-group-id",
     Topic:     "topic-A",
-    MinBytes:  10e3, // 10KB
     MaxBytes:  10e6, // 10MB
 })
 
@@ -134,7 +266,9 @@ for {
     fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))
 }
 
-r.Close()
+if err := r.Close(); err != nil {
+    log.Fatal("failed to close reader:", err)
+}
 ```
 
 There are a number of limitations when using consumer groups:
@@ -158,10 +292,19 @@ for {
         break
     }
     fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))
-    r.CommitMessages(ctx, m)
+    if err := r.CommitMessages(ctx, m); err != nil {
+        log.Fatal("failed to commit messages:", err)
+    }
 }
 ```
 
+When committing messages in consumer groups, the message with the highest offset
+for a given topic/partition determines the value of the committed offset for
+that partition. For example, if messages at offset 1, 2, and 3 of a single
+partition were retrieved by call to `FetchMessage`, calling `CommitMessages`
+with message offset 3 will also result in committing the messages at offsets 1
+and 2 for that partition.
+
 ### Managing Commits
 
 By default, CommitMessages will synchronously commit offsets to Kafka.  For
@@ -172,10 +315,9 @@ by setting CommitInterval on the ReaderC
 ```go
 // make a new reader that consumes from topic-A
 r := kafka.NewReader(kafka.ReaderConfig{
-    Brokers:        []string{"localhost:9092"},
+    Brokers:        []string{"localhost:9092", "localhost:9093", "localhost:9094"},
     GroupID:        "consumer-group-id",
     Topic:          "topic-A",
-    MinBytes:       10e3, // 10KB
     MaxBytes:       10e6, // 10MB
     CommitInterval: time.Second, // flushes commits to Kafka every second
 })
@@ -192,16 +334,17 @@ to use in most cases as it provides addi
 - Synchronous or asynchronous writes of messages to Kafka.
 - Asynchronous cancellation using contexts.
 - Flushing of pending messages on close to support graceful shutdowns.
+- Creation of a missing topic before publishing a message. *Note!* it was the default behaviour up to the version `v0.4.30`.
 
 ```go
 // make a writer that produces to topic-A, using the least-bytes distribution
-w := kafka.NewWriter(kafka.WriterConfig{
-	Brokers: []string{"localhost:9092"},
+w := &kafka.Writer{
+	Addr:     kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
 	Topic:   "topic-A",
 	Balancer: &kafka.LeastBytes{},
-})
+}
 
-w.WriteMessages(context.Background(),
+err := w.WriteMessages(context.Background(),
 	kafka.Message{
 		Key:   []byte("Key-A"),
 		Value: []byte("Hello World!"),
@@ -215,44 +358,182 @@ w.WriteMessages(context.Background(),
 		Value: []byte("Two!"),
 	},
 )
+if err != nil {
+    log.Fatal("failed to write messages:", err)
+}
 
-w.Close()
+if err := w.Close(); err != nil {
+    log.Fatal("failed to close writer:", err)
+}
 ```
 
-**Note:** Even though kafka.Message contain ```Topic``` and ```Partition``` fields, they **MUST NOT** be
-set when writing messages.  They are intended for read use only.
+### Missing topic creation before publication
 
-### Compatibility with Sarama
+```go
+// Make a writer that publishes messages to topic-A.
+// The topic will be created if it is missing.
+w := &Writer{
+    Addr:                   kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+    Topic:                  "topic-A",
+    AllowAutoTopicCreation: true,
+}
 
-If you're switching from Sarama and need/want to use the same algorithm for message
-partitioning, you can use the ```kafka.Hash``` balancer.  ```kafka.Hash``` routes
-messages to the same partitions that sarama's default partitioner would route to.
+messages := []kafka.Message{
+    {
+        Key:   []byte("Key-A"),
+        Value: []byte("Hello World!"),
+    },
+    {
+        Key:   []byte("Key-B"),
+        Value: []byte("One!"),
+    },
+    {
+        Key:   []byte("Key-C"),
+        Value: []byte("Two!"),
+    },
+}
+
+var err error
+const retries = 3
+for i := 0; i < retries; i++ {
+    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+    defer cancel()
+    
+    // attempt to create topic prior to publishing the message
+    err = w.WriteMessages(ctx, messages...)
+    if errors.Is(err, kafka.LeaderNotAvailable) || errors.Is(err, context.DeadlineExceeded) {
+        time.Sleep(time.Millisecond * 250)
+        continue
+    }
+
+    if err != nil {
+        log.Fatalf("unexpected error %v", err)
+    }
+    break
+}
+
+if err := w.Close(); err != nil {
+    log.Fatal("failed to close writer:", err)
+}
+```
+
+### Writing to multiple topics
+
+Normally, the `WriterConfig.Topic` is used to initialize a single-topic writer.
+By excluding that particular configuration, you are given the ability to define
+the topic on a per-message basis by setting `Message.Topic`.
 
 ```go
-w := kafka.NewWriter(kafka.WriterConfig{
-	Brokers: []string{"localhost:9092"},
-	Topic:   "topic-A",
+w := &kafka.Writer{
+	Addr:     kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+    // NOTE: When Topic is not defined here, each Message must define it instead.
+	Balancer: &kafka.LeastBytes{},
+}
+
+err := w.WriteMessages(context.Background(),
+    // NOTE: Each Message has Topic defined, otherwise an error is returned.
+	kafka.Message{
+        Topic: "topic-A",
+		Key:   []byte("Key-A"),
+		Value: []byte("Hello World!"),
+	},
+	kafka.Message{
+        Topic: "topic-B",
+		Key:   []byte("Key-B"),
+		Value: []byte("One!"),
+	},
+	kafka.Message{
+        Topic: "topic-C",
+		Key:   []byte("Key-C"),
+		Value: []byte("Two!"),
+	},
+)
+if err != nil {
+    log.Fatal("failed to write messages:", err)
+}
+
+if err := w.Close(); err != nil {
+    log.Fatal("failed to close writer:", err)
+}
+```
+
+**NOTE:** These 2 patterns are mutually exclusive, if you set `Writer.Topic`,
+you must not also explicitly define `Message.Topic` on the messages you are
+writing. The opposite applies when you do not define a topic for the writer.
+The `Writer` will return an error if it detects this ambiguity.
+
+### Compatibility with other clients
+
+#### Sarama
+
+If you're switching from Sarama and need/want to use the same algorithm for message partitioning, you can either use 
+the `kafka.Hash` balancer or the `kafka.ReferenceHash` balancer:
+* `kafka.Hash` = `sarama.NewHashPartitioner`
+* `kafka.ReferenceHash` = `sarama.NewReferenceHashPartitioner`
+
+The `kafka.Hash` and `kafka.ReferenceHash` balancers would route messages to the same partitions that the two 
+aforementioned Sarama partitioners would route them to.
+
+```go
+w := &kafka.Writer{
+	Addr:     kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+	Topic:    "topic-A",
 	Balancer: &kafka.Hash{},
-})
+}
+```
+
+#### librdkafka and confluent-kafka-go
+
+Use the ```kafka.CRC32Balancer``` balancer to get the same behaviour as librdkafka's
+default ```consistent_random``` partition strategy.
+
+```go
+w := &kafka.Writer{
+	Addr:     kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+	Topic:    "topic-A",
+	Balancer: kafka.CRC32Balancer{},
+}
+```
+
+#### Java
+
+Use the ```kafka.Murmur2Balancer``` balancer to get the same behaviour as the canonical
+Java client's default partitioner.  Note: the Java class allows you to directly specify
+the partition which is not permitted.
+
+```go
+w := &kafka.Writer{
+	Addr:     kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+	Topic:    "topic-A",
+	Balancer: kafka.Murmur2Balancer{},
+}
 ```
 
 ### Compression
 
-Compression can be enable on the writer :
+Compression can be enabled on the `Writer` by setting the `Compression` field:
 
 ```go
-w := kafka.NewWriter(kafka.WriterConfig{
-	Brokers: []string{"localhost:9092"},
-	Topic:   "topic-A",
-	CompressionCodec: snappy.NewCompressionCodec(),
-})
+w := &kafka.Writer{
+	Addr:        kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+	Topic:       "topic-A",
+	Compression: kafka.Snappy,
+}
 ```
 
-The reader will by default figure out if the consumed messages are compressed by intepreting the message attributes.
+The `Reader` will by determine if the consumed messages are compressed by
+examining the message attributes.  However, the package(s) for all expected
+codecs must be imported so that they get loaded correctly.
+
+_Note: in versions prior to 0.4 programs had to import compression packages to
+install codecs and support reading compressed messages from kafka. This is no
+longer the case and import of the compression packages are now no-ops._
 
 ## TLS Support
 
 For a bare bones Conn type or in the Reader/Writer configs you can specify a dialer option for TLS support. If the TLS field is nil, it will not connect with TLS.
+*Note:* Connecting to a Kafka cluster with TLS enabled without configuring TLS on the Conn/Reader/Writer can manifest in opaque io.ErrUnexpectedEOF errors.
+
 
 ### Connection
 
@@ -276,7 +557,7 @@ dialer := &kafka.Dialer{
 }
 
 r := kafka.NewReader(kafka.ReaderConfig{
-    Brokers:        []string{"localhost:9093"},
+    Brokers:        []string{"localhost:9092", "localhost:9093", "localhost:9094"},
     GroupID:        "consumer-group-id",
     Topic:          "topic-A",
     Dialer:         dialer,
@@ -285,6 +566,22 @@ r := kafka.NewReader(kafka.ReaderConfig{
 
 ### Writer
 
+
+Direct Writer creation
+
+```go
+w := kafka.Writer{
+    Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), 
+    Topic:   "topic-A",
+    Balancer: &kafka.Hash{},
+    Transport: &kafka.Transport{
+        TLS: &tls.Config{},
+      },
+    }
+```
+
+Using `kafka.NewWriter`
+
 ```go
 dialer := &kafka.Dialer{
     Timeout:   10 * time.Second,
@@ -293,9 +590,215 @@ dialer := &kafka.Dialer{
 }
 
 w := kafka.NewWriter(kafka.WriterConfig{
-	Brokers: []string{"localhost:9093"},
+	Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
 	Topic:   "topic-A",
 	Balancer: &kafka.Hash{},
 	Dialer:   dialer,
 })
 ```
+Note that `kafka.NewWriter` and `kafka.WriterConfig` are deprecated and will be removed in a future release.
+
+## SASL Support
+
+You can specify an option on the `Dialer` to use SASL authentication. The `Dialer` can be used directly to open a `Conn` or it can be passed to a `Reader` or `Writer` via their respective configs. If the `SASLMechanism` field is `nil`, it will not authenticate with SASL.
+
+### SASL Authentication Types
+
+#### [Plain](https://godoc.org/github.com/segmentio/kafka-go/sasl/plain#Mechanism)
+```go
+mechanism := plain.Mechanism{
+    Username: "username",
+    Password: "password",
+}
+```
+
+#### [SCRAM](https://godoc.org/github.com/segmentio/kafka-go/sasl/scram#Mechanism)
+```go
+mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
+if err != nil {
+    panic(err)
+}
+```
+
+### Connection
+
+```go
+mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
+if err != nil {
+    panic(err)
+}
+
+dialer := &kafka.Dialer{
+    Timeout:       10 * time.Second,
+    DualStack:     true,
+    SASLMechanism: mechanism,
+}
+
+conn, err := dialer.DialContext(ctx, "tcp", "localhost:9093")
+```
+
+
+### Reader
+
+```go
+mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
+if err != nil {
+    panic(err)
+}
+
+dialer := &kafka.Dialer{
+    Timeout:       10 * time.Second,
+    DualStack:     true,
+    SASLMechanism: mechanism,
+}
+
+r := kafka.NewReader(kafka.ReaderConfig{
+    Brokers:        []string{"localhost:9092","localhost:9093", "localhost:9094"},
+    GroupID:        "consumer-group-id",
+    Topic:          "topic-A",
+    Dialer:         dialer,
+})
+```
+
+### Writer
+
+```go
+mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
+if err != nil {
+    panic(err)
+}
+
+// Transports are responsible for managing connection pools and other resources,
+// it's generally best to create a few of these and share them across your
+// application.
+sharedTransport := &kafka.Transport{
+    SASL: mechanism,
+}
+
+w := kafka.Writer{
+	Addr:      kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+	Topic:     "topic-A",
+	Balancer:  &kafka.Hash{},
+	Transport: sharedTransport,
+}
+```
+
+### Client
+
+```go
+mechanism, err := scram.Mechanism(scram.SHA512, "username", "password")
+if err != nil {
+    panic(err)
+}
+
+// Transports are responsible for managing connection pools and other resources,
+// it's generally best to create a few of these and share them across your
+// application.
+sharedTransport := &kafka.Transport{
+    SASL: mechanism,
+}
+
+client := &kafka.Client{
+    Addr:      kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+    Timeout:   10 * time.Second,
+    Transport: sharedTransport,
+}
+```
+
+#### Reading all messages within a time range
+
+```go
+startTime := time.Now().Add(-time.Hour)
+endTime := time.Now()
+batchSize := int(10e6) // 10MB
+
+r := kafka.NewReader(kafka.ReaderConfig{
+    Brokers:   []string{"localhost:9092", "localhost:9093", "localhost:9094"},
+    Topic:     "my-topic1",
+    Partition: 0,
+    MaxBytes:  batchSize,
+})
+
+r.SetOffsetAt(context.Background(), startTime)
+
+for {
+    m, err := r.ReadMessage(context.Background())
+
+    if err != nil {
+        break
+    }
+    if m.Time.After(endTime) {
+        break
+    }
+    // TODO: process message
+    fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value))
+}
+
+if err := r.Close(); err != nil {
+    log.Fatal("failed to close reader:", err)
+}
+```
+
+
+## Logging
+
+For visiblity into the operations of the Reader/Writer types, configure a logger on creation.
+
+
+### Reader
+
+```go
+func logf(msg string, a ...interface{}) {
+	fmt.Printf(msg, a...)
+	fmt.Println()
+}
+
+r := kafka.NewReader(kafka.ReaderConfig{
+	Brokers:     []string{"localhost:9092", "localhost:9093", "localhost:9094"},
+	Topic:       "my-topic1",
+	Partition:   0,
+	Logger:      kafka.LoggerFunc(logf),
+	ErrorLogger: kafka.LoggerFunc(logf),
+})
+```
+
+### Writer
+
+```go
+func logf(msg string, a ...interface{}) {
+	fmt.Printf(msg, a...)
+	fmt.Println()
+}
+
+w := &kafka.Writer{
+	Addr:        kafka.TCP("localhost:9092"),
+	Topic:       "topic",
+	Logger:      kafka.LoggerFunc(logf),
+	ErrorLogger: kafka.LoggerFunc(logf),
+}
+```
+
+
+
+## Testing
+
+Subtle behavior changes in later Kafka versions have caused some historical tests to break, if you are running against Kafka 2.3.1 or later, exporting the `KAFKA_SKIP_NETTEST=1` environment variables will skip those tests.
+
+Run Kafka locally in docker
+
+```bash
+docker-compose up -d
+```
+
+Run tests
+
+```bash
+KAFKA_VERSION=2.3.1 \
+  KAFKA_SKIP_NETTEST=1 \
+  go test -race ./...
+```
+
+(or) to clean up the cached test results and run tests:
+```
+go clean -cache && make test
+```
diff -pruN 0.2.1-1.1/addoffsetstotxn.go 0.4.49+ds1-1/addoffsetstotxn.go
--- 0.2.1-1.1/addoffsetstotxn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/addoffsetstotxn.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,67 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/addoffsetstotxn"
+)
+
+// AddOffsetsToTxnRequest is the request structure for the AddOffsetsToTxn function.
+type AddOffsetsToTxnRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The transactional id key
+	TransactionalID string
+
+	// The Producer ID (PID) for the current producer session;
+	// received from an InitProducerID request.
+	ProducerID int
+
+	// The epoch associated with the current producer session for the given PID
+	ProducerEpoch int
+
+	// The unique group identifier.
+	GroupID string
+}
+
+// AddOffsetsToTxnResponse is the response structure for the AddOffsetsToTxn function.
+type AddOffsetsToTxnResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// An error that may have occurred when attempting to add the offsets
+	// to a transaction.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Error error
+}
+
+// AddOffsetsToTnx sends an add offsets to txn request to a kafka broker and returns the response.
+func (c *Client) AddOffsetsToTxn(
+	ctx context.Context,
+	req *AddOffsetsToTxnRequest,
+) (*AddOffsetsToTxnResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &addoffsetstotxn.Request{
+		TransactionalID: req.TransactionalID,
+		ProducerID:      int64(req.ProducerID),
+		ProducerEpoch:   int16(req.ProducerEpoch),
+		GroupID:         req.GroupID,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).AddOffsetsToTxn: %w", err)
+	}
+
+	r := m.(*addoffsetstotxn.Response)
+
+	res := &AddOffsetsToTxnResponse{
+		Throttle: makeDuration(r.ThrottleTimeMs),
+		Error:    makeError(r.ErrorCode, ""),
+	}
+
+	return res, nil
+}
diff -pruN 0.2.1-1.1/addoffsetstotxn_test.go 0.4.49+ds1-1/addoffsetstotxn_test.go
--- 0.2.1-1.1/addoffsetstotxn_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/addoffsetstotxn_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,129 @@
+package kafka
+
+import (
+	"context"
+	"log"
+	"os"
+	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientAddOffsetsToTxn(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		t.Skip("Skipping test because kafka version is not high enough.")
+	}
+
+	// TODO: look into why this test fails on Kafka 3.0.0 and higher when transactional support
+	// work is revisited.
+	if ktesting.KafkaIsAtLeast("3.0.0") {
+		t.Skip("Skipping test because it fails on Kafka version 3.0.0 or higher.")
+	}
+
+	topic := makeTopic()
+	transactionalID := makeTransactionalID()
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	err := clientCreateTopic(client, topic, 3)
+	defer deleteTopic(t, topic)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+	waitForTopic(ctx, t, topic)
+	defer cancel()
+	respc, err := waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     transactionalID,
+		KeyType: CoordinatorKeyTypeConsumer,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if respc.Error != nil {
+		t.Fatal(err)
+	}
+
+	groupID := makeGroupID()
+
+	group, err := NewConsumerGroup(ConsumerGroupConfig{
+		ID:                groupID,
+		Topics:            []string{topic},
+		Brokers:           []string{"localhost:9092"},
+		HeartbeatInterval: 2 * time.Second,
+		RebalanceTimeout:  2 * time.Second,
+		RetentionTime:     time.Hour,
+		Logger:            log.New(os.Stdout, "cg-test: ", 0),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer group.Close()
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	_, err = group.Next(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	respc, err = waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     transactionalID,
+		KeyType: CoordinatorKeyTypeTransaction,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if respc.Error != nil {
+		t.Fatal(err)
+	}
+
+	ipResp, err := client.InitProducerID(ctx, &InitProducerIDRequest{
+		TransactionalID:      transactionalID,
+		TransactionTimeoutMs: 10000,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ipResp.Error != nil {
+		t.Fatal(ipResp.Error)
+	}
+
+	defer func() {
+		err := clientEndTxn(client, &EndTxnRequest{
+			TransactionalID: transactionalID,
+			ProducerID:      ipResp.Producer.ProducerID,
+			ProducerEpoch:   ipResp.Producer.ProducerEpoch,
+			Committed:       false,
+		})
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+
+	resp, err := client.AddOffsetsToTxn(ctx, &AddOffsetsToTxnRequest{
+		TransactionalID: transactionalID,
+		ProducerID:      ipResp.Producer.ProducerID,
+		ProducerEpoch:   ipResp.Producer.ProducerEpoch,
+		GroupID:         groupID,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if resp.Error != nil {
+		t.Fatal(err)
+	}
+}
diff -pruN 0.2.1-1.1/addpartitionstotxn.go 0.4.49+ds1-1/addpartitionstotxn.go
--- 0.2.1-1.1/addpartitionstotxn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/addpartitionstotxn.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,108 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/addpartitionstotxn"
+)
+
+// AddPartitionToTxn represents a partition to be added
+// to a transaction.
+type AddPartitionToTxn struct {
+	// Partition is the ID of a partition to add to the transaction.
+	Partition int
+}
+
+// AddPartitionsToTxnRequest is the request structure fo the AddPartitionsToTxn function.
+type AddPartitionsToTxnRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The transactional id key
+	TransactionalID string
+
+	// The Producer ID (PID) for the current producer session;
+	// received from an InitProducerID request.
+	ProducerID int
+
+	// The epoch associated with the current producer session for the given PID
+	ProducerEpoch int
+
+	// Mappings of topic names to lists of partitions.
+	Topics map[string][]AddPartitionToTxn
+}
+
+// AddPartitionsToTxnResponse is the response structure for the AddPartitionsToTxn function.
+type AddPartitionsToTxnResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Mappings of topic names to partitions being added to a transactions.
+	Topics map[string][]AddPartitionToTxnPartition
+}
+
+// AddPartitionToTxnPartition represents the state of a single partition
+// in response to adding to a transaction.
+type AddPartitionToTxnPartition struct {
+	// The ID of the partition.
+	Partition int
+
+	// An error that may have occurred when attempting to add the partition
+	// to a transaction.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Error error
+}
+
+// AddPartitionsToTnx sends an add partitions to txn request to a kafka broker and returns the response.
+func (c *Client) AddPartitionsToTxn(
+	ctx context.Context,
+	req *AddPartitionsToTxnRequest,
+) (*AddPartitionsToTxnResponse, error) {
+	protoReq := &addpartitionstotxn.Request{
+		TransactionalID: req.TransactionalID,
+		ProducerID:      int64(req.ProducerID),
+		ProducerEpoch:   int16(req.ProducerEpoch),
+	}
+	protoReq.Topics = make([]addpartitionstotxn.RequestTopic, 0, len(req.Topics))
+
+	for topic, partitions := range req.Topics {
+		reqTopic := addpartitionstotxn.RequestTopic{
+			Name:       topic,
+			Partitions: make([]int32, len(partitions)),
+		}
+		for i, partition := range partitions {
+			reqTopic.Partitions[i] = int32(partition.Partition)
+		}
+		protoReq.Topics = append(protoReq.Topics, reqTopic)
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, protoReq)
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).AddPartitionsToTxn: %w", err)
+	}
+
+	r := m.(*addpartitionstotxn.Response)
+
+	res := &AddPartitionsToTxnResponse{
+		Throttle: makeDuration(r.ThrottleTimeMs),
+		Topics:   make(map[string][]AddPartitionToTxnPartition, len(r.Results)),
+	}
+
+	for _, result := range r.Results {
+		partitions := make([]AddPartitionToTxnPartition, 0, len(result.Results))
+		for _, rp := range result.Results {
+			partitions = append(partitions, AddPartitionToTxnPartition{
+				Partition: int(rp.PartitionIndex),
+				Error:     makeError(rp.ErrorCode, ""),
+			})
+		}
+		res.Topics[result.Name] = partitions
+	}
+
+	return res, nil
+}
diff -pruN 0.2.1-1.1/addpartitionstotxn_test.go 0.4.49+ds1-1/addpartitionstotxn_test.go
--- 0.2.1-1.1/addpartitionstotxn_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/addpartitionstotxn_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,133 @@
+package kafka
+
+import (
+	"context"
+	"net"
+	"strconv"
+	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientAddPartitionsToTxn(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		t.Skip("Skipping test because kafka version is not high enough.")
+	}
+
+	// TODO: look into why this test fails on Kafka 3.0.0 and higher when transactional support
+	// work is revisited.
+	if ktesting.KafkaIsAtLeast("3.0.0") {
+		t.Skip("Skipping test because it fails on Kafka version 3.0.0 or higher.")
+	}
+
+	topic1 := makeTopic()
+	topic2 := makeTopic()
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	err := clientCreateTopic(client, topic1, 3)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = clientCreateTopic(client, topic2, 3)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	transactionalID := makeTransactionalID()
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	respc, err := waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     transactionalID,
+		KeyType: CoordinatorKeyTypeTransaction,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	transactionCoordinator := TCP(net.JoinHostPort(respc.Coordinator.Host, strconv.Itoa(int(respc.Coordinator.Port))))
+	client, shutdown = newClient(transactionCoordinator)
+	defer shutdown()
+
+	ipResp, err := client.InitProducerID(ctx, &InitProducerIDRequest{
+		TransactionalID:      transactionalID,
+		TransactionTimeoutMs: 10000,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ipResp.Error != nil {
+		t.Fatal(ipResp.Error)
+	}
+
+	defer func() {
+		err := clientEndTxn(client, &EndTxnRequest{
+			TransactionalID: transactionalID,
+			ProducerID:      ipResp.Producer.ProducerID,
+			ProducerEpoch:   ipResp.Producer.ProducerEpoch,
+			Committed:       false,
+		})
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	resp, err := client.AddPartitionsToTxn(ctx, &AddPartitionsToTxnRequest{
+		TransactionalID: transactionalID,
+		ProducerID:      ipResp.Producer.ProducerID,
+		ProducerEpoch:   ipResp.Producer.ProducerEpoch,
+		Topics: map[string][]AddPartitionToTxn{
+			topic1: {
+				{
+					Partition: 0,
+				},
+				{
+					Partition: 1,
+				},
+				{
+					Partition: 2,
+				},
+			},
+			topic2: {
+				{
+					Partition: 0,
+				},
+				{
+					Partition: 2,
+				},
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(resp.Topics) != 2 {
+		t.Errorf("expected responses for 2 topics; got: %d", len(resp.Topics))
+	}
+	for topic, partitions := range resp.Topics {
+		if topic == topic1 {
+			if len(partitions) != 3 {
+				t.Errorf("expected 3 partitions in response for topic %s; got: %d", topic, len(partitions))
+			}
+		}
+		if topic == topic2 {
+			if len(partitions) != 2 {
+				t.Errorf("expected 2 partitions in response for topic %s; got: %d", topic, len(partitions))
+			}
+		}
+		for _, partition := range partitions {
+			if partition.Error != nil {
+				t.Error(partition.Error)
+			}
+		}
+	}
+}
diff -pruN 0.2.1-1.1/address.go 0.4.49+ds1-1/address.go
--- 0.2.1-1.1/address.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/address.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,64 @@
+package kafka
+
+import (
+	"net"
+	"strings"
+)
+
+// TCP constructs an address with the network set to "tcp".
+func TCP(address ...string) net.Addr { return makeNetAddr("tcp", address) }
+
+func makeNetAddr(network string, addresses []string) net.Addr {
+	switch len(addresses) {
+	case 0:
+		return nil // maybe panic instead?
+	case 1:
+		return makeAddr(network, addresses[0])
+	default:
+		return makeMultiAddr(network, addresses)
+	}
+}
+
+func makeAddr(network, address string) net.Addr {
+	return &networkAddress{
+		network: network,
+		address: canonicalAddress(address),
+	}
+}
+
+func makeMultiAddr(network string, addresses []string) net.Addr {
+	multi := make(multiAddr, len(addresses))
+	for i, address := range addresses {
+		multi[i] = makeAddr(network, address)
+	}
+	return multi
+}
+
+type networkAddress struct {
+	network string
+	address string
+}
+
+func (a *networkAddress) Network() string { return a.network }
+
+func (a *networkAddress) String() string { return a.address }
+
+type multiAddr []net.Addr
+
+func (m multiAddr) Network() string { return m.join(net.Addr.Network) }
+
+func (m multiAddr) String() string { return m.join(net.Addr.String) }
+
+func (m multiAddr) join(f func(net.Addr) string) string {
+	switch len(m) {
+	case 0:
+		return ""
+	case 1:
+		return f(m[0])
+	}
+	s := make([]string, len(m))
+	for i, a := range m {
+		s[i] = f(a)
+	}
+	return strings.Join(s, ",")
+}
diff -pruN 0.2.1-1.1/address_test.go 0.4.49+ds1-1/address_test.go
--- 0.2.1-1.1/address_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/address_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,55 @@
+package kafka
+
+import (
+	"net"
+	"testing"
+)
+
+func TestNetworkAddress(t *testing.T) {
+	tests := []struct {
+		addr    net.Addr
+		network string
+		address string
+	}{
+		{
+			addr:    TCP("127.0.0.1"),
+			network: "tcp",
+			address: "127.0.0.1:9092",
+		},
+
+		{
+			addr:    TCP("::1"),
+			network: "tcp",
+			address: "[::1]:9092",
+		},
+
+		{
+			addr:    TCP("localhost"),
+			network: "tcp",
+			address: "localhost:9092",
+		},
+
+		{
+			addr:    TCP("localhost:9092"),
+			network: "tcp",
+			address: "localhost:9092",
+		},
+
+		{
+			addr:    TCP("localhost", "localhost:9093", "localhost:9094"),
+			network: "tcp,tcp,tcp",
+			address: "localhost:9092,localhost:9093,localhost:9094",
+		},
+	}
+
+	for _, test := range tests {
+		t.Run(test.network+"+"+test.address, func(t *testing.T) {
+			if s := test.addr.Network(); s != test.network {
+				t.Errorf("network mismatch: want %q but got %q", test.network, s)
+			}
+			if s := test.addr.String(); s != test.address {
+				t.Errorf("network mismatch: want %q but got %q", test.address, s)
+			}
+		})
+	}
+}
diff -pruN 0.2.1-1.1/alterclientquotas.go 0.4.49+ds1-1/alterclientquotas.go
--- 0.2.1-1.1/alterclientquotas.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/alterclientquotas.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,131 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/alterclientquotas"
+)
+
+// AlterClientQuotasRequest represents a request sent to a kafka broker to
+// alter client quotas.
+type AlterClientQuotasRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of client quotas entries to alter.
+	Entries []AlterClientQuotaEntry
+
+	// Whether the alteration should be validated, but not performed.
+	ValidateOnly bool
+}
+
+type AlterClientQuotaEntry struct {
+	// The quota entities to alter.
+	Entities []AlterClientQuotaEntity
+
+	// An individual quota configuration entry to alter.
+	Ops []AlterClientQuotaOps
+}
+
+type AlterClientQuotaEntity struct {
+	// The quota entity type.
+	EntityType string
+
+	// The name of the quota entity, or null if the default.
+	EntityName string
+}
+
+type AlterClientQuotaOps struct {
+	// The quota configuration key.
+	Key string
+
+	// The quota configuration value to set, otherwise ignored if the value is to be removed.
+	Value float64
+
+	// Whether the quota configuration value should be removed, otherwise set.
+	Remove bool
+}
+
+type AlterClientQuotaResponseQuotas struct {
+	// Error is set to a non-nil value including the code and message if a top-level
+	// error was encountered when doing the update.
+	Error error
+
+	// The altered quota entities.
+	Entities []AlterClientQuotaEntity
+}
+
+// AlterClientQuotasResponse represents a response from a kafka broker to an alter client
+// quotas request.
+type AlterClientQuotasResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// List of altered client quotas responses.
+	Entries []AlterClientQuotaResponseQuotas
+}
+
+// AlterClientQuotas sends client quotas alteration request to a kafka broker and returns
+// the response.
+func (c *Client) AlterClientQuotas(ctx context.Context, req *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) {
+	entries := make([]alterclientquotas.Entry, len(req.Entries))
+
+	for entryIdx, entry := range req.Entries {
+		entities := make([]alterclientquotas.Entity, len(entry.Entities))
+		for entityIdx, entity := range entry.Entities {
+			entities[entityIdx] = alterclientquotas.Entity{
+				EntityType: entity.EntityType,
+				EntityName: entity.EntityName,
+			}
+		}
+
+		ops := make([]alterclientquotas.Ops, len(entry.Ops))
+		for opsIdx, op := range entry.Ops {
+			ops[opsIdx] = alterclientquotas.Ops{
+				Key:    op.Key,
+				Value:  op.Value,
+				Remove: op.Remove,
+			}
+		}
+
+		entries[entryIdx] = alterclientquotas.Entry{
+			Entities: entities,
+			Ops:      ops,
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &alterclientquotas.Request{
+		Entries:      entries,
+		ValidateOnly: req.ValidateOnly,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).AlterClientQuotas: %w", err)
+	}
+
+	res := m.(*alterclientquotas.Response)
+	responseEntries := make([]AlterClientQuotaResponseQuotas, len(res.Results))
+
+	for responseEntryIdx, responseEntry := range res.Results {
+		responseEntities := make([]AlterClientQuotaEntity, len(responseEntry.Entities))
+		for responseEntityIdx, responseEntity := range responseEntry.Entities {
+			responseEntities[responseEntityIdx] = AlterClientQuotaEntity{
+				EntityType: responseEntity.EntityType,
+				EntityName: responseEntity.EntityName,
+			}
+		}
+
+		responseEntries[responseEntryIdx] = AlterClientQuotaResponseQuotas{
+			Error:    makeError(responseEntry.ErrorCode, responseEntry.ErrorMessage),
+			Entities: responseEntities,
+		}
+	}
+	ret := &AlterClientQuotasResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Entries:  responseEntries,
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/alterclientquotas_test.go 0.4.49+ds1-1/alterclientquotas_test.go
--- 0.2.1-1.1/alterclientquotas_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/alterclientquotas_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,110 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestClientAlterClientQuotas(t *testing.T) {
+	// Added in Version 2.6.0 https://issues.apache.org/jira/browse/KAFKA-7740
+	if !ktesting.KafkaIsAtLeast("2.6.0") {
+		return
+	}
+
+	const (
+		entityType = "client-id"
+		entityName = "my-client-id"
+		key        = "producer_byte_rate"
+		value      = 500000.0
+	)
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	alterResp, err := client.AlterClientQuotas(context.Background(), &AlterClientQuotasRequest{
+		Entries: []AlterClientQuotaEntry{
+			{
+				Entities: []AlterClientQuotaEntity{
+					{
+						EntityType: entityType,
+						EntityName: entityName,
+					},
+				},
+				Ops: []AlterClientQuotaOps{
+					{
+						Key:    key,
+						Value:  value,
+						Remove: false,
+					},
+				},
+			},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedAlterResp := AlterClientQuotasResponse{
+		Throttle: 0,
+		Entries: []AlterClientQuotaResponseQuotas{
+			{
+				Error: makeError(0, ""),
+				Entities: []AlterClientQuotaEntity{
+					{
+						EntityName: entityName,
+						EntityType: entityType,
+					},
+				},
+			},
+		},
+	}
+
+	assert.Equal(t, expectedAlterResp, *alterResp)
+
+	// kraft mode is slow
+	if ktesting.KafkaIsAtLeast("3.7.0") {
+		time.Sleep(3 * time.Second)
+	}
+
+	describeResp, err := client.DescribeClientQuotas(context.Background(), &DescribeClientQuotasRequest{
+		Components: []DescribeClientQuotasRequestComponent{
+			{
+				EntityType: entityType,
+				MatchType:  0,
+				Match:      entityName,
+			},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedDescribeResp := DescribeClientQuotasResponse{
+		Throttle: 0,
+		Error:    makeError(0, ""),
+		Entries: []DescribeClientQuotasResponseQuotas{
+			{
+				Entities: []DescribeClientQuotasEntity{
+					{
+						EntityType: entityType,
+						EntityName: entityName,
+					},
+				},
+				Values: []DescribeClientQuotasValue{
+					{
+						Key:   key,
+						Value: value,
+					},
+				},
+			},
+		},
+	}
+
+	assert.Equal(t, expectedDescribeResp, *describeResp)
+}
diff -pruN 0.2.1-1.1/alterconfigs.go 0.4.49+ds1-1/alterconfigs.go
--- 0.2.1-1.1/alterconfigs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/alterconfigs.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,107 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/alterconfigs"
+)
+
+// AlterConfigsRequest represents a request sent to a kafka broker to alter configs.
+type AlterConfigsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of resources to update.
+	Resources []AlterConfigRequestResource
+
+	// When set to true, topics are not created but the configuration is
+	// validated as if they were.
+	ValidateOnly bool
+}
+
+type AlterConfigRequestResource struct {
+	// Resource Type
+	ResourceType ResourceType
+
+	// Resource Name
+	ResourceName string
+
+	// Configs is a list of configuration updates.
+	Configs []AlterConfigRequestConfig
+}
+
+type AlterConfigRequestConfig struct {
+	// Configuration key name
+	Name string
+
+	// The value to set for the configuration key.
+	Value string
+}
+
+// AlterConfigsResponse represents a response from a kafka broker to an alter config request.
+type AlterConfigsResponse struct {
+	// Duration for which the request was throttled due to a quota violation.
+	Throttle time.Duration
+
+	// Mapping of topic names to errors that occurred while attempting to create
+	// the topics.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Errors map[AlterConfigsResponseResource]error
+}
+
+// AlterConfigsResponseResource helps map errors to specific resources in an
+// alter config response.
+type AlterConfigsResponseResource struct {
+	Type int8
+	Name string
+}
+
+// AlterConfigs sends a config altering request to a kafka broker and returns the
+// response.
+func (c *Client) AlterConfigs(ctx context.Context, req *AlterConfigsRequest) (*AlterConfigsResponse, error) {
+	resources := make([]alterconfigs.RequestResources, len(req.Resources))
+
+	for i, t := range req.Resources {
+		configs := make([]alterconfigs.RequestConfig, len(t.Configs))
+		for j, v := range t.Configs {
+			configs[j] = alterconfigs.RequestConfig{
+				Name:  v.Name,
+				Value: v.Value,
+			}
+		}
+		resources[i] = alterconfigs.RequestResources{
+			ResourceType: int8(t.ResourceType),
+			ResourceName: t.ResourceName,
+			Configs:      configs,
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &alterconfigs.Request{
+		Resources:    resources,
+		ValidateOnly: req.ValidateOnly,
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).AlterConfigs: %w", err)
+	}
+
+	res := m.(*alterconfigs.Response)
+	ret := &AlterConfigsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Errors:   make(map[AlterConfigsResponseResource]error, len(res.Responses)),
+	}
+
+	for _, t := range res.Responses {
+		ret.Errors[AlterConfigsResponseResource{
+			Type: t.ResourceType,
+			Name: t.ResourceName,
+		}] = makeError(t.ErrorCode, t.ErrorMessage)
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/alterconfigs_test.go 0.4.49+ds1-1/alterconfigs_test.go
--- 0.2.1-1.1/alterconfigs_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/alterconfigs_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,67 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestClientAlterConfigs(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		return
+	}
+
+	const (
+		MaxMessageBytes      = "max.message.bytes"
+		MaxMessageBytesValue = "200000"
+	)
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	_, err := client.AlterConfigs(context.Background(), &AlterConfigsRequest{
+		Resources: []AlterConfigRequestResource{{
+			ResourceType: ResourceTypeTopic,
+			ResourceName: topic,
+			Configs: []AlterConfigRequestConfig{{
+				Name:  MaxMessageBytes,
+				Value: MaxMessageBytesValue,
+			},
+			},
+		}},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	describeResp, err := client.DescribeConfigs(context.Background(), &DescribeConfigsRequest{
+		Resources: []DescribeConfigRequestResource{{
+			ResourceType: ResourceTypeTopic,
+			ResourceName: topic,
+			ConfigNames:  []string{MaxMessageBytes},
+		}},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	maxMessageBytesValue := "0"
+	for _, resource := range describeResp.Resources {
+		if resource.ResourceType == int8(ResourceTypeTopic) && resource.ResourceName == topic {
+			for _, entry := range resource.ConfigEntries {
+				if entry.ConfigName == MaxMessageBytes {
+					maxMessageBytesValue = entry.ConfigValue
+				}
+			}
+		}
+	}
+	assert.Equal(t, maxMessageBytesValue, MaxMessageBytesValue)
+}
diff -pruN 0.2.1-1.1/alterpartitionreassignments.go 0.4.49+ds1-1/alterpartitionreassignments.go
--- 0.2.1-1.1/alterpartitionreassignments.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/alterpartitionreassignments.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,134 @@
+package kafka
+
+import (
+	"context"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/alterpartitionreassignments"
+)
+
+// AlterPartitionReassignmentsRequest is a request to the AlterPartitionReassignments API.
+type AlterPartitionReassignmentsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// Topic is the name of the topic to alter partitions in. Keep this field empty and use Topic in AlterPartitionReassignmentsRequestAssignment to
+	// reassign to multiple topics.
+	Topic string
+
+	// Assignments is the list of partition reassignments to submit to the API.
+	Assignments []AlterPartitionReassignmentsRequestAssignment
+
+	// Timeout is the amount of time to wait for the request to complete.
+	Timeout time.Duration
+}
+
+// AlterPartitionReassignmentsRequestAssignment contains the requested reassignments for a single
+// partition.
+type AlterPartitionReassignmentsRequestAssignment struct {
+	// Topic is the name of the topic to alter partitions in. If empty, the value of Topic in AlterPartitionReassignmentsRequest is used.
+	Topic string
+
+	// PartitionID is the ID of the partition to make the reassignments in.
+	PartitionID int
+
+	// BrokerIDs is a slice of brokers to set the partition replicas to, or null to cancel a pending reassignment for this partition.
+	BrokerIDs []int
+}
+
+// AlterPartitionReassignmentsResponse is a response from the AlterPartitionReassignments API.
+type AlterPartitionReassignmentsResponse struct {
+	// Error is set to a non-nil value including the code and message if a top-level
+	// error was encountered when doing the update.
+	Error error
+
+	// PartitionResults contains the specific results for each partition.
+	PartitionResults []AlterPartitionReassignmentsResponsePartitionResult
+}
+
+// AlterPartitionReassignmentsResponsePartitionResult contains the detailed result of
+// doing reassignments for a single partition.
+type AlterPartitionReassignmentsResponsePartitionResult struct {
+	// Topic is the topic name.
+	Topic string
+
+	// PartitionID is the ID of the partition that was altered.
+	PartitionID int
+
+	// Error is set to a non-nil value including the code and message if an error was encountered
+	// during the update for this partition.
+	Error error
+}
+
+func (c *Client) AlterPartitionReassignments(
+	ctx context.Context,
+	req *AlterPartitionReassignmentsRequest,
+) (*AlterPartitionReassignmentsResponse, error) {
+	apiTopicMap := make(map[string]*alterpartitionreassignments.RequestTopic)
+
+	for _, assignment := range req.Assignments {
+		topic := assignment.Topic
+		if topic == "" {
+			topic = req.Topic
+		}
+
+		apiTopic := apiTopicMap[topic]
+		if apiTopic == nil {
+			apiTopic = &alterpartitionreassignments.RequestTopic{
+				Name: topic,
+			}
+			apiTopicMap[topic] = apiTopic
+		}
+
+		replicas := []int32{}
+		for _, brokerID := range assignment.BrokerIDs {
+			replicas = append(replicas, int32(brokerID))
+		}
+
+		apiTopic.Partitions = append(
+			apiTopic.Partitions,
+			alterpartitionreassignments.RequestPartition{
+				PartitionIndex: int32(assignment.PartitionID),
+				Replicas:       replicas,
+			},
+		)
+	}
+
+	apiReq := &alterpartitionreassignments.Request{
+		TimeoutMs: int32(req.Timeout.Milliseconds()),
+	}
+
+	for _, apiTopic := range apiTopicMap {
+		apiReq.Topics = append(apiReq.Topics, *apiTopic)
+	}
+
+	protoResp, err := c.roundTrip(
+		ctx,
+		req.Addr,
+		apiReq,
+	)
+	if err != nil {
+		return nil, err
+	}
+	apiResp := protoResp.(*alterpartitionreassignments.Response)
+
+	resp := &AlterPartitionReassignmentsResponse{
+		Error: makeError(apiResp.ErrorCode, apiResp.ErrorMessage),
+	}
+
+	for _, topicResult := range apiResp.Results {
+		for _, partitionResult := range topicResult.Partitions {
+			resp.PartitionResults = append(
+				resp.PartitionResults,
+				AlterPartitionReassignmentsResponsePartitionResult{
+					Topic:       topicResult.Name,
+					PartitionID: int(partitionResult.PartitionIndex),
+					Error:       makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage),
+				},
+			)
+		}
+	}
+
+	return resp, nil
+}
diff -pruN 0.2.1-1.1/alterpartitionreassignments_test.go 0.4.49+ds1-1/alterpartitionreassignments_test.go
--- 0.2.1-1.1/alterpartitionreassignments_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/alterpartitionreassignments_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,122 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientAlterPartitionReassignments(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.4.0") {
+		return
+	}
+
+	ctx := context.Background()
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 2)
+	defer deleteTopic(t, topic)
+
+	// Local kafka only has 1 broker, so any partition reassignments are really no-ops.
+	resp, err := client.AlterPartitionReassignments(
+		ctx,
+		&AlterPartitionReassignmentsRequest{
+			Topic: topic,
+			Assignments: []AlterPartitionReassignmentsRequestAssignment{
+				{
+					PartitionID: 0,
+					BrokerIDs:   []int{1},
+				},
+				{
+					PartitionID: 1,
+					BrokerIDs:   []int{1},
+				},
+			},
+			Timeout: 5 * time.Second,
+		},
+	)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	if resp.Error != nil {
+		t.Error(
+			"Unexpected error in response",
+			"expected", nil,
+			"got", resp.Error,
+		)
+	}
+	if len(resp.PartitionResults) != 2 {
+		t.Error(
+			"Unexpected length of partition results",
+			"expected", 2,
+			"got", len(resp.PartitionResults),
+		)
+	}
+}
+
+func TestClientAlterPartitionReassignmentsMultiTopics(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.4.0") {
+		return
+	}
+
+	ctx := context.Background()
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic1 := makeTopic()
+	topic2 := makeTopic()
+	createTopic(t, topic1, 2)
+	createTopic(t, topic2, 2)
+	defer func() {
+		deleteTopic(t, topic1)
+		deleteTopic(t, topic2)
+	}()
+
+	// Local kafka only has 1 broker, so any partition reassignments are really no-ops.
+	resp, err := client.AlterPartitionReassignments(
+		ctx,
+		&AlterPartitionReassignmentsRequest{
+			Assignments: []AlterPartitionReassignmentsRequestAssignment{
+				{
+					Topic:       topic1,
+					PartitionID: 0,
+					BrokerIDs:   []int{1},
+				},
+				{
+					Topic:       topic1,
+					PartitionID: 1,
+					BrokerIDs:   []int{1},
+				},
+				{
+					Topic:       topic2,
+					PartitionID: 0,
+					BrokerIDs:   []int{1},
+				},
+			},
+			Timeout: 5 * time.Second,
+		},
+	)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	if resp.Error != nil {
+		t.Error(
+			"Unexpected error in response",
+			"expected", nil,
+			"got", resp.Error,
+		)
+	}
+	if len(resp.PartitionResults) != 3 {
+		t.Error(
+			"Unexpected length of partition results",
+			"expected", 3,
+			"got", len(resp.PartitionResults),
+		)
+	}
+}
diff -pruN 0.2.1-1.1/alteruserscramcredentials.go 0.4.49+ds1-1/alteruserscramcredentials.go
--- 0.2.1-1.1/alteruserscramcredentials.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/alteruserscramcredentials.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,107 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/alteruserscramcredentials"
+)
+
+// AlterUserScramCredentialsRequest represents a request sent to a kafka broker to
+// alter user scram credentials.
+type AlterUserScramCredentialsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of credentials to delete.
+	Deletions []UserScramCredentialsDeletion
+
+	// List of credentials to upsert.
+	Upsertions []UserScramCredentialsUpsertion
+}
+
+type ScramMechanism int8
+
+const (
+	ScramMechanismUnknown ScramMechanism = iota // 0
+	ScramMechanismSha256                        // 1
+	ScramMechanismSha512                        // 2
+)
+
+type UserScramCredentialsDeletion struct {
+	Name      string
+	Mechanism ScramMechanism
+}
+
+type UserScramCredentialsUpsertion struct {
+	Name           string
+	Mechanism      ScramMechanism
+	Iterations     int
+	Salt           []byte
+	SaltedPassword []byte
+}
+
+// AlterUserScramCredentialsResponse represents a response from a kafka broker to an alter user
+// credentials request.
+type AlterUserScramCredentialsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// List of altered user scram credentials.
+	Results []AlterUserScramCredentialsResponseUser
+}
+
+type AlterUserScramCredentialsResponseUser struct {
+	User  string
+	Error error
+}
+
+// AlterUserScramCredentials sends user scram credentials alteration request to a kafka broker and returns
+// the response.
+func (c *Client) AlterUserScramCredentials(ctx context.Context, req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
+	deletions := make([]alteruserscramcredentials.RequestUserScramCredentialsDeletion, len(req.Deletions))
+	upsertions := make([]alteruserscramcredentials.RequestUserScramCredentialsUpsertion, len(req.Upsertions))
+
+	for deletionIdx, deletion := range req.Deletions {
+		deletions[deletionIdx] = alteruserscramcredentials.RequestUserScramCredentialsDeletion{
+			Name:      deletion.Name,
+			Mechanism: int8(deletion.Mechanism),
+		}
+	}
+
+	for upsertionIdx, upsertion := range req.Upsertions {
+		upsertions[upsertionIdx] = alteruserscramcredentials.RequestUserScramCredentialsUpsertion{
+			Name:           upsertion.Name,
+			Mechanism:      int8(upsertion.Mechanism),
+			Iterations:     int32(upsertion.Iterations),
+			Salt:           upsertion.Salt,
+			SaltedPassword: upsertion.SaltedPassword,
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &alteruserscramcredentials.Request{
+		Deletions:  deletions,
+		Upsertions: upsertions,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).AlterUserScramCredentials: %w", err)
+	}
+
+	res := m.(*alteruserscramcredentials.Response)
+	responseEntries := make([]AlterUserScramCredentialsResponseUser, len(res.Results))
+
+	for responseIdx, responseResult := range res.Results {
+		responseEntries[responseIdx] = AlterUserScramCredentialsResponseUser{
+			User:  responseResult.User,
+			Error: makeError(responseResult.ErrorCode, responseResult.ErrorMessage),
+		}
+	}
+	ret := &AlterUserScramCredentialsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Results:  responseEntries,
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/alteruserscramcredentials_test.go 0.4.49+ds1-1/alteruserscramcredentials_test.go
--- 0.2.1-1.1/alteruserscramcredentials_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/alteruserscramcredentials_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,73 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestAlterUserScramCredentials(t *testing.T) {
+	// https://issues.apache.org/jira/browse/KAFKA-10259
+	if !ktesting.KafkaIsAtLeast("2.7.0") {
+		return
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	name := makeTopic()
+
+	createRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{
+		Upsertions: []UserScramCredentialsUpsertion{
+			{
+				Name:           name,
+				Mechanism:      ScramMechanismSha512,
+				Iterations:     15000,
+				Salt:           []byte("my-salt"),
+				SaltedPassword: []byte("my-salted-password"),
+			},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(createRes.Results) != 1 {
+		t.Fatalf("expected 1 createResult; got %d", len(createRes.Results))
+	}
+
+	if createRes.Results[0].User != name {
+		t.Fatalf("expected createResult with user: %s, got %s", name, createRes.Results[0].User)
+	}
+
+	if createRes.Results[0].Error != nil {
+		t.Fatalf("didn't expect an error in createResult, got %v", createRes.Results[0].Error)
+	}
+
+	deleteRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{
+		Deletions: []UserScramCredentialsDeletion{
+			{
+				Name:      name,
+				Mechanism: ScramMechanismSha512,
+			},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(deleteRes.Results) != 1 {
+		t.Fatalf("expected 1 deleteResult; got %d", len(deleteRes.Results))
+	}
+
+	if deleteRes.Results[0].User != name {
+		t.Fatalf("expected deleteResult with user: %s, got %s", name, deleteRes.Results[0].User)
+	}
+
+	if deleteRes.Results[0].Error != nil {
+		t.Fatalf("didn't expect an error in deleteResult, got %v", deleteRes.Results[0].Error)
+	}
+}
diff -pruN 0.2.1-1.1/apiversions.go 0.4.49+ds1-1/apiversions.go
--- 0.2.1-1.1/apiversions.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/apiversions.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,72 @@
+package kafka
+
+import (
+	"context"
+	"net"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/apiversions"
+)
+
+// ApiVersionsRequest is a request to the ApiVersions API.
+type ApiVersionsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+}
+
+// ApiVersionsResponse is a response from the ApiVersions API.
+type ApiVersionsResponse struct {
+	// Error is set to a non-nil value if an error was encountered.
+	Error error
+
+	// ApiKeys contains the specific details of each supported API.
+	ApiKeys []ApiVersionsResponseApiKey
+}
+
+// ApiVersionsResponseApiKey includes the details of which versions are supported for a single API.
+type ApiVersionsResponseApiKey struct {
+	// ApiKey is the ID of the API.
+	ApiKey int
+
+	// ApiName is a human-friendly description of the API.
+	ApiName string
+
+	// MinVersion is the minimum API version supported by the broker.
+	MinVersion int
+
+	// MaxVersion is the maximum API version supported by the broker.
+	MaxVersion int
+}
+
+func (c *Client) ApiVersions(
+	ctx context.Context,
+	req *ApiVersionsRequest,
+) (*ApiVersionsResponse, error) {
+	apiReq := &apiversions.Request{}
+	protoResp, err := c.roundTrip(
+		ctx,
+		req.Addr,
+		apiReq,
+	)
+	if err != nil {
+		return nil, err
+	}
+	apiResp := protoResp.(*apiversions.Response)
+
+	resp := &ApiVersionsResponse{
+		Error: makeError(apiResp.ErrorCode, ""),
+	}
+	for _, apiKey := range apiResp.ApiKeys {
+		resp.ApiKeys = append(
+			resp.ApiKeys,
+			ApiVersionsResponseApiKey{
+				ApiKey:     int(apiKey.ApiKey),
+				ApiName:    protocol.ApiKey(apiKey.ApiKey).String(),
+				MinVersion: int(apiKey.MinVersion),
+				MaxVersion: int(apiKey.MaxVersion),
+			},
+		)
+	}
+
+	return resp, err
+}
diff -pruN 0.2.1-1.1/apiversions_test.go 0.4.49+ds1-1/apiversions_test.go
--- 0.2.1-1.1/apiversions_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/apiversions_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,33 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+)
+
+func TestClientApiVersions(t *testing.T) {
+	ctx := context.Background()
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	resp, err := client.ApiVersions(ctx, &ApiVersionsRequest{})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if resp.Error != nil {
+		t.Error(
+			"Unexpected error in response",
+			"expected", nil,
+			"got", resp.Error,
+		)
+	}
+
+	if len(resp.ApiKeys) == 0 {
+		t.Error(
+			"Unexpected apiKeys length",
+			"expected greater than", 0,
+			"got", 0,
+		)
+	}
+}
diff -pruN 0.2.1-1.1/balancer.go 0.4.49+ds1-1/balancer.go
--- 0.2.1-1.1/balancer.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/balancer.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,7 +2,9 @@ package kafka
 
 import (
 	"hash"
+	"hash/crc32"
 	"hash/fnv"
+	"math/rand"
 	"sort"
 	"sync"
 )
@@ -11,9 +13,7 @@ import (
 // logic used by Writer instances to route messages to the partitions available
 // on a kafka cluster.
 //
-// Instances of Balancer do not have to be safe to use concurrently by multiple
-// goroutines, the Writer implementation ensures that calls to Balance are
-// synchronized.
+// Balancers must be safe to use concurrently from multiple goroutines.
 type Balancer interface {
 	// Balance receives a message and a set of available partitions and
 	// returns the partition number that the message should be routed to.
@@ -35,16 +35,35 @@ func (f BalancerFunc) Balance(msg Messag
 }
 
 // RoundRobin is an Balancer implementation that equally distributes messages
-// across all available partitions.
+// across all available partitions.  It can take an optional chunk size to send
+// ChunkSize messages to the same partition before moving to the next partition.
+// This can be used to improve batch sizes.
 type RoundRobin struct {
-	offset uint64
+	ChunkSize int
+	// Use a 32 bits integer so RoundRobin values don't need to be aligned to
+	// apply increments.
+	counter uint32
+
+	mutex sync.Mutex
 }
 
 // Balance satisfies the Balancer interface.
 func (rr *RoundRobin) Balance(msg Message, partitions ...int) int {
-	length := uint64(len(partitions))
-	offset := rr.offset
-	rr.offset++
+	return rr.balance(partitions)
+}
+
+func (rr *RoundRobin) balance(partitions []int) int {
+	rr.mutex.Lock()
+	defer rr.mutex.Unlock()
+
+	if rr.ChunkSize < 1 {
+		rr.ChunkSize = 1
+	}
+
+	length := len(partitions)
+	counterNow := rr.counter
+	offset := int(counterNow / uint32(rr.ChunkSize))
+	rr.counter++
 	return partitions[offset%length]
 }
 
@@ -55,6 +74,7 @@ func (rr *RoundRobin) Balance(msg Messag
 // balancing relies on the fact that each producer using a LeastBytes balancer
 // should produce well balanced messages.
 type LeastBytes struct {
+	mutex    sync.Mutex
 	counters []leastBytesCounter
 }
 
@@ -65,11 +85,12 @@ type leastBytesCounter struct {
 
 // Balance satisfies the Balancer interface.
 func (lb *LeastBytes) Balance(msg Message, partitions ...int) int {
-	for _, p := range partitions {
-		if c := lb.counterOf(p); c == nil {
-			lb.counters = lb.makeCounters(partitions...)
-			break
-		}
+	lb.mutex.Lock()
+	defer lb.mutex.Unlock()
+
+	// partitions change
+	if len(partitions) != len(lb.counters) {
+		lb.counters = lb.makeCounters(partitions...)
 	}
 
 	minBytes := lb.counters[0].bytes
@@ -87,16 +108,6 @@ func (lb *LeastBytes) Balance(msg Messag
 	return c.partition
 }
 
-func (lb *LeastBytes) counterOf(partition int) *leastBytesCounter {
-	i := sort.Search(len(lb.counters), func(i int) bool {
-		return lb.counters[i].partition >= partition
-	})
-	if i == len(lb.counters) || lb.counters[i].partition != partition {
-		return nil
-	}
-	return &lb.counters[i]
-}
-
 func (lb *LeastBytes) makeCounters(partitions ...int) (counters []leastBytesCounter) {
 	counters = make([]leastBytesCounter, len(partitions))
 
@@ -124,23 +135,31 @@ var (
 //
 // The logic to calculate the partition is:
 //
-// 		hasher.Sum32() % len(partitions) => partition
+//	hasher.Sum32() % len(partitions) => partition
 //
 // By default, Hash uses the FNV-1a algorithm.  This is the same algorithm used
 // by the Sarama Producer and ensures that messages produced by kafka-go will
-// be delivered to the same topics that the Sarama producer would be delivered to
+// be delivered to the same topics that the Sarama producer would be delivered to.
 type Hash struct {
 	rr     RoundRobin
 	Hasher hash.Hash32
+
+	// lock protects Hasher while calculating the hash code.  It is assumed that
+	// the Hasher field is read-only once the Balancer is created, so as a
+	// performance optimization, reads of the field are not protected.
+	lock sync.Mutex
 }
 
-func (h *Hash) Balance(msg Message, partitions ...int) (partition int) {
+func (h *Hash) Balance(msg Message, partitions ...int) int {
 	if msg.Key == nil {
 		return h.rr.Balance(msg, partitions...)
 	}
 
 	hasher := h.Hasher
-	if hasher == nil {
+	if hasher != nil {
+		h.lock.Lock()
+		defer h.lock.Unlock()
+	} else {
 		hasher = fnv1aPool.Get().(hash.Hash32)
 		defer fnv1aPool.Put(hasher)
 	}
@@ -151,10 +170,184 @@ func (h *Hash) Balance(msg Message, part
 	}
 
 	// uses same algorithm that Sarama's hashPartitioner uses
-	partition = int(hasher.Sum32()) % len(partitions)
+	// note the type conversions here.  if the uint32 hash code is not cast to
+	// an int32, we do not get the same result as sarama.
+	partition := int32(hasher.Sum32()) % int32(len(partitions))
 	if partition < 0 {
 		partition = -partition
 	}
 
-	return
+	return int(partition)
+}
+
+// ReferenceHash is a Balancer that uses the provided hash function to determine which
+// partition to route messages to.  This ensures that messages with the same key
+// are routed to the same partition.
+//
+// The logic to calculate the partition is:
+//
+//	(int32(hasher.Sum32()) & 0x7fffffff) % len(partitions) => partition
+//
+// By default, ReferenceHash uses the FNV-1a algorithm. This is the same algorithm as
+// the Sarama NewReferenceHashPartitioner and ensures that messages produced by kafka-go will
+// be delivered to the same topics that the Sarama producer would be delivered to.
+type ReferenceHash struct {
+	rr     randomBalancer
+	Hasher hash.Hash32
+
+	// lock protects Hasher while calculating the hash code.  It is assumed that
+	// the Hasher field is read-only once the Balancer is created, so as a
+	// performance optimization, reads of the field are not protected.
+	lock sync.Mutex
+}
+
+func (h *ReferenceHash) Balance(msg Message, partitions ...int) int {
+	if msg.Key == nil {
+		return h.rr.Balance(msg, partitions...)
+	}
+
+	hasher := h.Hasher
+	if hasher != nil {
+		h.lock.Lock()
+		defer h.lock.Unlock()
+	} else {
+		hasher = fnv1aPool.Get().(hash.Hash32)
+		defer fnv1aPool.Put(hasher)
+	}
+
+	hasher.Reset()
+	if _, err := hasher.Write(msg.Key); err != nil {
+		panic(err)
+	}
+
+	// uses the same algorithm as the Sarama's referenceHashPartitioner.
+	// note the type conversions here. if the uint32 hash code is not cast to
+	// an int32, we do not get the same result as sarama.
+	partition := (int32(hasher.Sum32()) & 0x7fffffff) % int32(len(partitions))
+	return int(partition)
+}
+
+type randomBalancer struct {
+	mock int // mocked return value, used for testing
+}
+
+func (b randomBalancer) Balance(msg Message, partitions ...int) (partition int) {
+	if b.mock != 0 {
+		return b.mock
+	}
+	return partitions[rand.Int()%len(partitions)]
+}
+
+// CRC32Balancer is a Balancer that uses the CRC32 hash function to determine
+// which partition to route messages to.  This ensures that messages with the
+// same key are routed to the same partition.  This balancer is compatible with
+// the built-in hash partitioners in librdkafka and the language bindings that
+// are built on top of it, including the
+// github.com/confluentinc/confluent-kafka-go Go package.
+//
+// With the Consistent field false (default), this partitioner is equivalent to
+// the "consistent_random" setting in librdkafka.  When Consistent is true, this
+// partitioner is equivalent to the "consistent" setting.  The latter will hash
+// empty or nil keys into the same partition.
+//
+// Unless you are absolutely certain that all your messages will have keys, it's
+// best to leave the Consistent flag off.  Otherwise, you run the risk of
+// creating a very hot partition.
+type CRC32Balancer struct {
+	Consistent bool
+	random     randomBalancer
+}
+
+func (b CRC32Balancer) Balance(msg Message, partitions ...int) (partition int) {
+	// NOTE: the crc32 balancers in librdkafka don't differentiate between nil
+	//       and empty keys.  both cases are treated as unset.
+	if len(msg.Key) == 0 && !b.Consistent {
+		return b.random.Balance(msg, partitions...)
+	}
+
+	idx := crc32.ChecksumIEEE(msg.Key) % uint32(len(partitions))
+	return partitions[idx]
+}
+
+// Murmur2Balancer is a Balancer that uses the Murmur2 hash function to
+// determine which partition to route messages to.  This ensures that messages
+// with the same key are routed to the same partition.  This balancer is
+// compatible with the partitioner used by the Java library and by librdkafka's
+// "murmur2" and "murmur2_random" partitioners.
+//
+// With the Consistent field false (default), this partitioner is equivalent to
+// the "murmur2_random" setting in librdkafka.  When Consistent is true, this
+// partitioner is equivalent to the "murmur2" setting.  The latter will hash
+// nil keys into the same partition.  Empty, non-nil keys are always hashed to
+// the same partition regardless of configuration.
+//
+// Unless you are absolutely certain that all your messages will have keys, it's
+// best to leave the Consistent flag off.  Otherwise, you run the risk of
+// creating a very hot partition.
+//
+// Note that the librdkafka documentation states that the "murmur2_random" is
+// functionally equivalent to the default Java partitioner.  That's because the
+// Java partitioner will use a round robin balancer instead of random on nil
+// keys.  We choose librdkafka's implementation because it arguably has a larger
+// install base.
+type Murmur2Balancer struct {
+	Consistent bool
+	random     randomBalancer
+}
+
+func (b Murmur2Balancer) Balance(msg Message, partitions ...int) (partition int) {
+	// NOTE: the murmur2 balancers in java and librdkafka treat a nil key as
+	//       non-existent while treating an empty slice as a defined value.
+	if msg.Key == nil && !b.Consistent {
+		return b.random.Balance(msg, partitions...)
+	}
+
+	idx := (murmur2(msg.Key) & 0x7fffffff) % uint32(len(partitions))
+	return partitions[idx]
+}
+
+// Go port of the Java library's murmur2 function.
+// https://github.com/apache/kafka/blob/1.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L353
+func murmur2(data []byte) uint32 {
+	length := len(data)
+	const (
+		seed uint32 = 0x9747b28c
+		// 'm' and 'r' are mixing constants generated offline.
+		// They're not really 'magic', they just happen to work well.
+		m = 0x5bd1e995
+		r = 24
+	)
+
+	// Initialize the hash to a random value
+	h := seed ^ uint32(length)
+	length4 := length / 4
+
+	for i := 0; i < length4; i++ {
+		i4 := i * 4
+		k := (uint32(data[i4+0]) & 0xff) + ((uint32(data[i4+1]) & 0xff) << 8) + ((uint32(data[i4+2]) & 0xff) << 16) + ((uint32(data[i4+3]) & 0xff) << 24)
+		k *= m
+		k ^= k >> r
+		k *= m
+		h *= m
+		h ^= k
+	}
+
+	// Handle the last few bytes of the input array
+	extra := length % 4
+	if extra >= 3 {
+		h ^= (uint32(data[(length & ^3)+2]) & 0xff) << 16
+	}
+	if extra >= 2 {
+		h ^= (uint32(data[(length & ^3)+1]) & 0xff) << 8
+	}
+	if extra >= 1 {
+		h ^= uint32(data[length & ^3]) & 0xff
+		h *= m
+	}
+
+	h ^= h >> 13
+	h *= m
+	h ^= h >> 15
+
+	return h
 }
diff -pruN 0.2.1-1.1/balancer_test.go 0.4.49+ds1-1/balancer_test.go
--- 0.2.1-1.1/balancer_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/balancer_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,7 @@
 package kafka
 
 import (
+	"fmt"
 	"hash"
 	"hash/crc32"
 	"testing"
@@ -39,6 +40,13 @@ func TestHashBalancer(t *testing.T) {
 			Partitions: []int{0, 1, 2},
 			Partition:  1,
 		},
+		// in a previous version, this test would select a different partition
+		// than sarama's hash partitioner.
+		"hash code with MSB set": {
+			Key:        []byte("20"),
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+			Partition:  1,
+		},
 	}
 
 	for label, test := range testCases {
@@ -53,4 +61,418 @@ func TestHashBalancer(t *testing.T) {
 			}
 		})
 	}
+}
+
+func TestReferenceHashBalancer(t *testing.T) {
+	testCases := map[string]struct {
+		Key               []byte
+		Hasher            hash.Hash32
+		Partitions        []int
+		Partition         int
+		RndBalancerResult int
+	}{
+		"nil": {
+			Key:               nil, // nil key means random partition
+			Partitions:        []int{0, 1, 2},
+			Partition:         123,
+			RndBalancerResult: 123,
+		},
+		"partition-0": {
+			Key:        []byte("blah"),
+			Partitions: []int{0, 1},
+			Partition:  0,
+		},
+		"partition-1": {
+			Key:        []byte("blah"),
+			Partitions: []int{0, 1, 2},
+			Partition:  1,
+		},
+		"partition-2": {
+			Key:        []byte("castle"),
+			Partitions: []int{0, 1, 2},
+			Partition:  2,
+		},
+		"custom hash": {
+			Key:        []byte("boop"),
+			Hasher:     crc32.NewIEEE(),
+			Partitions: []int{0, 1, 2},
+			Partition:  1,
+		},
+		"hash code with MSB set": {
+			Key:        []byte("20"),
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+			Partition:  15,
+		},
+	}
+
+	for label, test := range testCases {
+		t.Run(label, func(t *testing.T) {
+			var rr randomBalancer
+			if test.Key == nil {
+				rr.mock = test.RndBalancerResult
+			}
+
+			msg := Message{Key: test.Key}
+			h := ReferenceHash{Hasher: test.Hasher, rr: rr}
+			partition := h.Balance(msg, test.Partitions...)
+			if partition != test.Partition {
+				t.Errorf("expected %v; got %v", test.Partition, partition)
+			}
+		})
+	}
+}
+
+func TestCRC32Balancer(t *testing.T) {
+	// These tests are taken from the default "consistent_random" partitioner from
+	// https://github.com/edenhill/librdkafka/blob/master/tests/0048-partitioner.c
+	partitionCount := 17
+	var partitions []int
+	for i := 0; i < partitionCount; i++ {
+		partitions = append(partitions, i*i)
+	}
+
+	testCases := map[string]struct {
+		Key        []byte
+		Partitions []int
+		Partition  int
+	}{
+		"nil": {
+			Key:        nil,
+			Partitions: partitions,
+			Partition:  -1,
+		},
+		"empty": {
+			Key:        []byte{},
+			Partitions: partitions,
+			Partition:  -1,
+		},
+		"unaligned": {
+			Key:        []byte("23456"),
+			Partitions: partitions,
+			Partition:  partitions[0xb1b451d7%partitionCount],
+		},
+		"long key": {
+			Key:        []byte("this is another string with more length to it perhaps"),
+			Partitions: partitions,
+			Partition:  partitions[0xb0150df7%partitionCount],
+		},
+		"short key": {
+			Key:        []byte("hejsan"),
+			Partitions: partitions,
+			Partition:  partitions[0xd077037e%partitionCount],
+		},
+	}
+
+	t.Run("default", func(t *testing.T) {
+		for label, test := range testCases {
+			t.Run(label, func(t *testing.T) {
+				b := CRC32Balancer{}
+				b.random.mock = -1
+
+				msg := Message{Key: test.Key}
+				partition := b.Balance(msg, test.Partitions...)
+				if partition != test.Partition {
+					t.Errorf("expected %v; got %v", test.Partition, partition)
+				}
+			})
+		}
+	})
+
+	t.Run("consistent", func(t *testing.T) {
+		b := CRC32Balancer{Consistent: true}
+		b.random.mock = -1
+
+		p := b.Balance(Message{}, partitions...)
+		if p < 0 {
+			t.Fatal("should not have gotten a random partition")
+		}
+		for i := 0; i < 10; i++ {
+			if p != b.Balance(Message{}, partitions...) {
+				t.Fatal("nil key should always hash consistently")
+			}
+			if p != b.Balance(Message{Key: []byte{}}, partitions...) {
+				t.Fatal("empty key should always hash consistently and have same result as nil key")
+			}
+		}
+	})
+}
+
+func TestMurmur2(t *testing.T) {
+	// These tests are taken from the "murmur2" implementation from
+	// https://github.com/edenhill/librdkafka/blob/master/src/rdmurmur2.c
+	testCases := []struct {
+		Key               []byte
+		JavaMurmur2Result uint32
+	}{
+		{Key: []byte("kafka"), JavaMurmur2Result: 0xd067cf64},
+		{Key: []byte("giberish123456789"), JavaMurmur2Result: 0x8f552b0c},
+		{Key: []byte("1234"), JavaMurmur2Result: 0x9fc97b14},
+		{Key: []byte("234"), JavaMurmur2Result: 0xe7c009ca},
+		{Key: []byte("34"), JavaMurmur2Result: 0x873930da},
+		{Key: []byte("4"), JavaMurmur2Result: 0x5a4b5ca1},
+		{Key: []byte("PreAmbleWillBeRemoved,ThePrePartThatIs"), JavaMurmur2Result: 0x78424f1c},
+		{Key: []byte("reAmbleWillBeRemoved,ThePrePartThatIs"), JavaMurmur2Result: 0x4a62b377},
+		{Key: []byte("eAmbleWillBeRemoved,ThePrePartThatIs"), JavaMurmur2Result: 0xe0e4e09e},
+		{Key: []byte("AmbleWillBeRemoved,ThePrePartThatIs"), JavaMurmur2Result: 0x62b8b43f},
+		{Key: []byte(""), JavaMurmur2Result: 0x106e08d9},
+		{Key: nil, JavaMurmur2Result: 0x106e08d9},
+	}
+
+	for _, test := range testCases {
+		t.Run(fmt.Sprintf("key:%s", test.Key), func(t *testing.T) {
+			got := murmur2(test.Key)
+			if got != test.JavaMurmur2Result {
+				t.Errorf("expected %v; got %v", test.JavaMurmur2Result, got)
+			}
+		})
+	}
+}
+
+func TestMurmur2Balancer(t *testing.T) {
+	// These tests are taken from the "murmur2_random" partitioner from
+	// https://github.com/edenhill/librdkafka/blob/master/tests/0048-partitioner.c
+	partitionCount := 17
+	librdkafkaPartitions := make([]int, partitionCount)
+	for i := 0; i < partitionCount; i++ {
+		librdkafkaPartitions[i] = i * i
+	}
+
+	// These tests are taken from the Murmur2Partitioner Python class from
+	// https://github.com/dpkp/kafka-python/blob/master/test/test_partitioner.py
+	pythonPartitions := make([]int, 1000)
+	for i := 0; i < 1000; i++ {
+		pythonPartitions[i] = i
+	}
+
+	testCases := map[string]struct {
+		Key        []byte
+		Partitions []int
+		Partition  int
+	}{
+		"librdkafka-nil": {
+			Key:        nil,
+			Partitions: librdkafkaPartitions,
+			Partition:  123,
+		},
+		"librdkafka-empty": {
+			Key:        []byte{},
+			Partitions: librdkafkaPartitions,
+			Partition:  librdkafkaPartitions[0x106e08d9%partitionCount],
+		},
+		"librdkafka-unaligned": {
+			Key:        []byte("23456"),
+			Partitions: librdkafkaPartitions,
+			Partition:  librdkafkaPartitions[0x058d780f%partitionCount],
+		},
+		"librdkafka-long key": {
+			Key:        []byte("this is another string with more length to it perhaps"),
+			Partitions: librdkafkaPartitions,
+			Partition:  librdkafkaPartitions[0x4f7703da%partitionCount],
+		},
+		"librdkafka-short key": {
+			Key:        []byte("hejsan"),
+			Partitions: librdkafkaPartitions,
+			Partition:  librdkafkaPartitions[0x5ec19395%partitionCount],
+		},
+		"python-empty": {
+			Key:        []byte(""),
+			Partitions: pythonPartitions,
+			Partition:  681,
+		},
+		"python-a": {
+			Key:        []byte("a"),
+			Partitions: pythonPartitions,
+			Partition:  524,
+		},
+		"python-ab": {
+			Key:        []byte("ab"),
+			Partitions: pythonPartitions,
+			Partition:  434,
+		},
+		"python-abc": {
+			Key:        []byte("abc"),
+			Partitions: pythonPartitions,
+			Partition:  107,
+		},
+		"python-123456789": {
+			Key:        []byte("123456789"),
+			Partitions: pythonPartitions,
+			Partition:  566,
+		},
+		"python-\x00 ": {
+			Key:        []byte{0, 32},
+			Partitions: pythonPartitions,
+			Partition:  742,
+		},
+	}
+
+	t.Run("default", func(t *testing.T) {
+		for label, test := range testCases {
+			t.Run(label, func(t *testing.T) {
+				b := Murmur2Balancer{}
+				b.random.mock = 123
+
+				msg := Message{Key: test.Key}
+				partition := b.Balance(msg, test.Partitions...)
+				if partition != test.Partition {
+					t.Errorf("expected %v; got %v", test.Partition, partition)
+				}
+			})
+		}
+	})
+
+	t.Run("consistent", func(t *testing.T) {
+		b := Murmur2Balancer{Consistent: true}
+		b.random.mock = -1
+
+		p := b.Balance(Message{}, librdkafkaPartitions...)
+		if p < 0 {
+			t.Fatal("should not have gotten a random partition")
+		}
+		for i := 0; i < 10; i++ {
+			if p != b.Balance(Message{}, librdkafkaPartitions...) {
+				t.Fatal("nil key should always hash consistently")
+			}
+		}
+	})
+}
+
+func TestLeastBytes(t *testing.T) {
+	testCases := map[string]struct {
+		Keys       [][]byte
+		Partitions [][]int
+		Partition  int
+	}{
+		"single message": {
+			Keys: [][]byte{
+				[]byte("key"),
+			},
+			Partitions: [][]int{
+				{0, 1, 2},
+			},
+			Partition: 0,
+		},
+		"multiple messages, no partition change": {
+			Keys: [][]byte{
+				[]byte("a"),
+				[]byte("ab"),
+				[]byte("abc"),
+				[]byte("abcd"),
+			},
+			Partitions: [][]int{
+				{0, 1, 2},
+				{0, 1, 2},
+				{0, 1, 2},
+				{0, 1, 2},
+			},
+			Partition: 0,
+		},
+		"partition gained": {
+			Keys: [][]byte{
+				[]byte("hello world 1"),
+				[]byte("hello world 2"),
+				[]byte("hello world 3"),
+			},
+			Partitions: [][]int{
+				{0, 1},
+				{0, 1},
+				{0, 1, 2},
+			},
+			Partition: 0,
+		},
+		"partition lost": {
+			Keys: [][]byte{
+				[]byte("hello world 1"),
+				[]byte("hello world 2"),
+				[]byte("hello world 3"),
+			},
+			Partitions: [][]int{
+				{0, 1, 2},
+				{0, 1, 2},
+				{0, 1},
+			},
+			Partition: 0,
+		},
+	}
+
+	for label, test := range testCases {
+		t.Run(label, func(t *testing.T) {
+			lb := &LeastBytes{}
+
+			var partition int
+			for i, key := range test.Keys {
+				msg := Message{Key: key}
+				partition = lb.Balance(msg, test.Partitions[i]...)
+			}
+
+			if partition != test.Partition {
+				t.Errorf("expected %v; got %v", test.Partition, partition)
+			}
+		})
+	}
+}
+
+func TestRoundRobin(t *testing.T) {
+	testCases := map[string]struct {
+		Partitions []int
+		ChunkSize  int
+	}{
+		"default - odd partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+		},
+		"negative chunk size - odd partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+			ChunkSize:  -1,
+		},
+		"0 chunk size - odd partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+			ChunkSize:  0,
+		},
+		"5 chunk size - odd partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+			ChunkSize:  5,
+		},
+		"12 chunk size - odd partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6},
+			ChunkSize:  12,
+		},
+		"default - even partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+		},
+		"negative chunk size - even partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+			ChunkSize:  -1,
+		},
+		"0 chunk size - even partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+			ChunkSize:  0,
+		},
+		"5 chunk size - even partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+			ChunkSize:  5,
+		},
+		"12 chunk size - even partition count": {
+			Partitions: []int{0, 1, 2, 3, 4, 5, 6, 7},
+			ChunkSize:  12,
+		},
+	}
+	for label, test := range testCases {
+		t.Run(label, func(t *testing.T) {
+			lb := &RoundRobin{ChunkSize: test.ChunkSize}
+			msg := Message{}
+			var partition int
+			var i int
+			expectedChunkSize := test.ChunkSize
+			if expectedChunkSize < 1 {
+				expectedChunkSize = 1
+			}
+			partitions := test.Partitions
+			for i = 0; i < 50; i++ {
+				partition = lb.Balance(msg, partitions...)
+				if partition != i/expectedChunkSize%len(partitions) {
+					t.Error("Returned partition", partition, "expecting", i/expectedChunkSize%len(partitions))
+				}
+			}
+		})
+	}
 }
diff -pruN 0.2.1-1.1/batch.go 0.4.49+ds1-1/batch.go
--- 0.2.1-1.1/batch.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/batch.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,6 +2,7 @@ package kafka
 
 import (
 	"bufio"
+	"errors"
 	"io"
 	"sync"
 	"time"
@@ -28,6 +29,15 @@ type Batch struct {
 	offset        int64
 	highWaterMark int64
 	err           error
+	// The last offset in the batch.
+	//
+	// We use lastOffset to skip offsets that have been compacted away.
+	//
+	// We store lastOffset because we get lastOffset when we read a new message
+	// but only try to handle compaction when we receive an EOF. However, when
+	// we get an EOF we do not get the lastOffset. So there is a mismatch
+	// between when we receive it and need to use it.
+	lastOffset int64
 }
 
 // Throttle gives the throttling duration applied by the kafka server on the
@@ -36,11 +46,16 @@ func (batch *Batch) Throttle() time.Dura
 	return batch.throttle
 }
 
-// Watermark returns the current highest watermark in a partition.
+// HighWaterMark returns the current highest watermark in a partition.
 func (batch *Batch) HighWaterMark() int64 {
 	return batch.highWaterMark
 }
 
+// Partition returns the batch partition.
+func (batch *Batch) Partition() int {
+	return batch.partition
+}
+
 // Offset returns the offset of the next message in the batch.
 func (batch *Batch) Offset() int64 {
 	batch.mutex.Lock()
@@ -64,11 +79,17 @@ func (batch *Batch) close() (err error)
 
 	batch.conn = nil
 	batch.lock = nil
+
 	if batch.msgs != nil {
 		batch.msgs.discard()
 	}
 
-	if err = batch.err; err == io.EOF {
+	if batch.msgs != nil && batch.msgs.decompressed != nil {
+		releaseBuffer(batch.msgs.decompressed)
+		batch.msgs.decompressed = nil
+	}
+
+	if err = batch.err; errors.Is(batch.err, io.EOF) {
 		err = nil
 	}
 
@@ -79,7 +100,8 @@ func (batch *Batch) close() (err error)
 		conn.mutex.Unlock()
 
 		if err != nil {
-			if _, ok := err.(Error); !ok && err != io.ErrShortBuffer {
+			var kafkaError Error
+			if !errors.As(err, &kafkaError) && !errors.Is(err, io.ErrShortBuffer) {
 				conn.Close()
 			}
 		}
@@ -92,6 +114,19 @@ func (batch *Batch) close() (err error)
 	return
 }
 
+// Err returns a non-nil error if the batch is broken. This is the same error
+// that would be returned by Read, ReadMessage or Close (except in the case of
+// io.EOF which is never returned by Close).
+//
+// This method is useful when building retry mechanisms for (*Conn).ReadBatch,
+// the program can check whether the batch carried a error before attempting to
+// read the first message.
+//
+// Note that checking errors on a batch is optional, calling Read or ReadMessage
+// is always valid and can be used to either read a message or an error in cases
+// where that's convenient.
+func (batch *Batch) Err() error { return batch.err }
+
 // Read reads the value of the next message from the batch into b, returning the
 // number of bytes read, or an error if the next message couldn't be read.
 //
@@ -108,7 +143,7 @@ func (batch *Batch) Read(b []byte) (int,
 	batch.mutex.Lock()
 	offset := batch.offset
 
-	_, _, err := batch.readMessage(
+	_, _, _, err := batch.readMessage(
 		func(r *bufio.Reader, size int, nbytes int) (int, error) {
 			if nbytes < 0 {
 				return size, nil
@@ -119,9 +154,17 @@ func (batch *Batch) Read(b []byte) (int,
 			if nbytes < 0 {
 				return size, nil
 			}
+			// make sure there are enough bytes for the message value.  return
+			// errShortRead if the message is truncated.
+			if nbytes > size {
+				return size, errShortRead
+			}
 			n = nbytes // return value
+			if nbytes > cap(b) {
+				nbytes = cap(b)
+			}
 			if nbytes > len(b) {
-				nbytes = len(b)
+				b = b[:nbytes]
 			}
 			nbytes, err := io.ReadFull(r, b[:nbytes])
 			if err != nil {
@@ -150,7 +193,11 @@ func (batch *Batch) ReadMessage() (Messa
 	msg := Message{}
 	batch.mutex.Lock()
 
-	offset, timestamp, err := batch.readMessage(
+	var offset, timestamp int64
+	var headers []Header
+	var err error
+
+	offset, timestamp, headers, err = batch.readMessage(
 		func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
 			msg.Key, remain, err = readNewBytes(r, size, nbytes)
 			return
@@ -160,12 +207,31 @@ func (batch *Batch) ReadMessage() (Messa
 			return
 		},
 	)
+	// A batch may start before the requested offset so skip messages
+	// until the requested offset is reached.
+	for batch.conn != nil && offset < batch.conn.offset {
+		if err != nil {
+			break
+		}
+		offset, timestamp, headers, err = batch.readMessage(
+			func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
+				msg.Key, remain, err = readNewBytes(r, size, nbytes)
+				return
+			},
+			func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
+				msg.Value, remain, err = readNewBytes(r, size, nbytes)
+				return
+			},
+		)
+	}
 
 	batch.mutex.Unlock()
 	msg.Topic = batch.topic
 	msg.Partition = batch.partition
 	msg.Offset = offset
-	msg.Time = timestampToTime(timestamp)
+	msg.HighWaterMark = batch.highWaterMark
+	msg.Time = makeTime(timestamp)
+	msg.Headers = headers
 
 	return msg, err
 }
@@ -173,23 +239,30 @@ func (batch *Batch) ReadMessage() (Messa
 func (batch *Batch) readMessage(
 	key func(*bufio.Reader, int, int) (int, error),
 	val func(*bufio.Reader, int, int) (int, error),
-) (offset int64, timestamp int64, err error) {
+) (offset int64, timestamp int64, headers []Header, err error) {
 	if err = batch.err; err != nil {
 		return
 	}
 
-	offset, timestamp, err = batch.msgs.readMessage(batch.offset, key, val)
-	switch err {
-	case nil:
+	var lastOffset int64
+	offset, lastOffset, timestamp, headers, err = batch.msgs.readMessage(batch.offset, key, val)
+	switch {
+	case err == nil:
 		batch.offset = offset + 1
-	case errShortRead:
+		batch.lastOffset = lastOffset
+	case errors.Is(err, errShortRead):
 		// As an "optimization" kafka truncates the returned response after
 		// producing MaxBytes, which could then cause the code to return
 		// errShortRead.
 		err = batch.msgs.discard()
 		switch {
 		case err != nil:
-			batch.err = err
+			// Since io.EOF is used by the batch to indicate that there is are
+			// no more messages to consume, it is crucial that any io.EOF errors
+			// on the underlying connection are repackaged.  Otherwise, the
+			// caller can't tell the difference between a batch that was fully
+			// consumed or a batch whose connection is in an error state.
+			batch.err = dontExpectEOF(err)
 		case batch.msgs.remaining() == 0:
 			// Because we use the adjusted deadline we could end up returning
 			// before the actual deadline occurred. This is necessary otherwise
@@ -198,16 +271,43 @@ func (batch *Batch) readMessage(
 			// This design decision was made to maximize the chances of keeping
 			// the connection open, the trade off being to lose precision on the
 			// read deadline management.
-			if !batch.deadline.IsZero() && time.Now().After(batch.deadline) {
-				err = RequestTimedOut
-			} else {
-				err = io.EOF
-			}
+			err = checkTimeoutErr(batch.deadline)
 			batch.err = err
+
+			// Checks the following:
+			// - `batch.err` for a "success" from the previous timeout check
+			// - `batch.msgs.lengthRemain` to ensure that this EOF is not due
+			//   to MaxBytes truncation
+			// - `batch.lastOffset` to ensure that the message format contains
+			//   `lastOffset`
+			if errors.Is(batch.err, io.EOF) && batch.msgs.lengthRemain == 0 && batch.lastOffset != -1 {
+				// Log compaction can create batches that end with compacted
+				// records so the normal strategy that increments the "next"
+				// offset as records are read doesn't work as the compacted
+				// records are "missing" and never get "read".
+				//
+				// In order to reliably reach the next non-compacted offset we
+				// jump past the saved lastOffset.
+				batch.offset = batch.lastOffset + 1
+			}
 		}
 	default:
-		batch.err = err
+		// Since io.EOF is used by the batch to indicate that there is are
+		// no more messages to consume, it is crucial that any io.EOF errors
+		// on the underlying connection are repackaged.  Otherwise, the
+		// caller can't tell the difference between a batch that was fully
+		// consumed or a batch whose connection is in an error state.
+		batch.err = dontExpectEOF(err)
 	}
 
 	return
 }
+
+func checkTimeoutErr(deadline time.Time) (err error) {
+	if !deadline.IsZero() && time.Now().After(deadline) {
+		err = RequestTimedOut
+	} else {
+		err = io.EOF
+	}
+	return
+}
diff -pruN 0.2.1-1.1/batch_test.go 0.4.49+ds1-1/batch_test.go
--- 0.2.1-1.1/batch_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/batch_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,6 +2,7 @@ package kafka
 
 import (
 	"context"
+	"errors"
 	"io"
 	"net"
 	"strconv"
@@ -22,18 +23,19 @@ func TestBatchDontExpectEOF(t *testing.T
 	if err != nil {
 		t.Fatalf("cannot connect to partition leader at %s:%d: %s", broker.Host, broker.Port, err)
 	}
-	nc.(*net.TCPConn).CloseRead()
 
 	conn := NewConn(nc, topic, 0)
 	defer conn.Close()
 
+	nc.(*net.TCPConn).CloseRead()
+
 	batch := conn.ReadBatch(1024, 8192)
 
-	if _, err := batch.ReadMessage(); err != io.ErrUnexpectedEOF {
+	if _, err := batch.ReadMessage(); !errors.Is(err, io.ErrUnexpectedEOF) {
 		t.Error("bad error when reading message:", err)
 	}
 
-	if err := batch.Close(); err != io.ErrUnexpectedEOF {
+	if err := batch.Close(); !errors.Is(err, io.ErrUnexpectedEOF) {
 		t.Error("bad error when closing the batch:", err)
 	}
 }
diff -pruN 0.2.1-1.1/buffer.go 0.4.49+ds1-1/buffer.go
--- 0.2.1-1.1/buffer.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/buffer.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,27 @@
+package kafka
+
+import (
+	"bytes"
+	"sync"
+)
+
+var bufferPool = sync.Pool{
+	New: func() interface{} { return newBuffer() },
+}
+
+func newBuffer() *bytes.Buffer {
+	b := new(bytes.Buffer)
+	b.Grow(65536)
+	return b
+}
+
+func acquireBuffer() *bytes.Buffer {
+	return bufferPool.Get().(*bytes.Buffer)
+}
+
+func releaseBuffer(b *bytes.Buffer) {
+	if b != nil {
+		b.Reset()
+		bufferPool.Put(b)
+	}
+}
diff -pruN 0.2.1-1.1/builder_test.go 0.4.49+ds1-1/builder_test.go
--- 0.2.1-1.1/builder_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/builder_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,267 @@
+package kafka
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/segmentio/kafka-go/compress"
+)
+
+// This file defines builders to assist in creating kafka payloads for unit testing.
+
+// fetchResponseBuilder builds v10 fetch responses. The version of the v10 fetch
+// responses are not as important as the message sets contained within, as this
+// type is ultimately used to unit test the message set reader that consumes the
+// rest of the response once the header has been parsed.
+type fetchResponseBuilder struct {
+	header   fetchResponseHeader
+	msgSets  []messageSetBuilder
+	rendered []byte
+}
+
+type fetchResponseHeader struct {
+	throttle            int32
+	errorCode           int16
+	sessionID           int32
+	topic               string
+	partition           int32
+	partitionErrorCode  int16
+	highWatermarkOffset int64
+	lastStableOffset    int64
+	logStartOffset      int64
+}
+
+func (b *fetchResponseBuilder) messages() (res []Message) {
+	for _, set := range b.msgSets {
+		res = append(res, set.messages()...)
+	}
+	return
+}
+
+func (b *fetchResponseBuilder) bytes() []byte {
+	if b.rendered == nil {
+		b.rendered = newWB().call(func(wb *kafkaWriteBuffer) {
+			wb.writeInt32(b.header.throttle)
+			wb.writeInt16(b.header.errorCode)
+			wb.writeInt32(b.header.sessionID)
+			wb.writeInt32(1) // num topics
+			wb.writeString(b.header.topic)
+			wb.writeInt32(1) // how many partitions
+			wb.writeInt32(b.header.partition)
+			wb.writeInt16(b.header.partitionErrorCode)
+			wb.writeInt64(b.header.highWatermarkOffset)
+			wb.writeInt64(b.header.lastStableOffset)
+			wb.writeInt64(b.header.logStartOffset)
+			wb.writeInt32(-1) // num aborted tx
+			wb.writeBytes(newWB().call(func(wb *kafkaWriteBuffer) {
+				for _, msgSet := range b.msgSets {
+					wb.Write(msgSet.bytes())
+				}
+			}))
+		})
+	}
+	return b.rendered
+}
+
+func (b *fetchResponseBuilder) Len() int {
+	return len(b.bytes())
+}
+
+type messageSetBuilder interface {
+	bytes() []byte
+	messages() []Message
+}
+
+type v0MessageSetBuilder struct {
+	msgs  []Message
+	codec CompressionCodec
+}
+
+func (f v0MessageSetBuilder) messages() []Message {
+	return f.msgs
+}
+
+func (f v0MessageSetBuilder) bytes() []byte {
+	bs := newWB().call(func(wb *kafkaWriteBuffer) {
+		for _, msg := range f.msgs {
+			bs := newWB().call(func(wb *kafkaWriteBuffer) {
+				wb.writeInt64(msg.Offset) // offset
+				wb.writeBytes(newWB().call(func(wb *kafkaWriteBuffer) {
+					wb.writeInt32(-1) // crc, unused
+					wb.writeInt8(0)   // magic
+					wb.writeInt8(0)   // attributes -- zero, no compression for the inner message
+					wb.writeBytes(msg.Key)
+					wb.writeBytes(msg.Value)
+				}))
+			})
+			wb.Write(bs)
+		}
+	})
+	if f.codec != nil {
+		bs = newWB().call(func(wb *kafkaWriteBuffer) {
+			wb.writeInt64(f.msgs[0].Offset) // offset
+			wb.writeBytes(newWB().call(func(wb *kafkaWriteBuffer) {
+				compressed := mustCompress(bs, f.codec)
+				wb.writeInt32(-1)            // crc, unused
+				wb.writeInt8(0)              // magic
+				wb.writeInt8(f.codec.Code()) // attributes
+				wb.writeBytes(nil)           // key is always nil for compressed
+				wb.writeBytes(compressed)    // the value is the compressed message
+			}))
+		})
+	}
+	return bs
+}
+
+type v1MessageSetBuilder struct {
+	msgs  []Message
+	codec CompressionCodec
+}
+
+func (f v1MessageSetBuilder) messages() []Message {
+	return f.msgs
+}
+
+func (f v1MessageSetBuilder) bytes() []byte {
+	bs := newWB().call(func(wb *kafkaWriteBuffer) {
+		for i, msg := range f.msgs {
+			bs := newWB().call(func(wb *kafkaWriteBuffer) {
+				if f.codec != nil {
+					wb.writeInt64(int64(i)) // compressed inner message offsets are relative
+				} else {
+					wb.writeInt64(msg.Offset) // offset
+				}
+				wb.writeBytes(newWB().call(func(wb *kafkaWriteBuffer) {
+					wb.writeInt32(-1)                     // crc, unused
+					wb.writeInt8(1)                       // magic
+					wb.writeInt8(0)                       // attributes -- zero, no compression for the inner message
+					wb.writeInt64(1000 * msg.Time.Unix()) // timestamp
+					wb.writeBytes(msg.Key)
+					wb.writeBytes(msg.Value)
+				}))
+			})
+			wb.Write(bs)
+		}
+	})
+	if f.codec != nil {
+		bs = newWB().call(func(wb *kafkaWriteBuffer) {
+			wb.writeInt64(f.msgs[len(f.msgs)-1].Offset) // offset of the wrapper message is the last offset of the inner messages
+			wb.writeBytes(newWB().call(func(wb *kafkaWriteBuffer) {
+				bs := mustCompress(bs, f.codec)
+				wb.writeInt32(-1)                           // crc, unused
+				wb.writeInt8(1)                             // magic
+				wb.writeInt8(f.codec.Code())                // attributes
+				wb.writeInt64(1000 * f.msgs[0].Time.Unix()) // timestamp
+				wb.writeBytes(nil)                          // key is always nil for compressed
+				wb.writeBytes(bs)                           // the value is the compressed message
+			}))
+		})
+	}
+	return bs
+}
+
+type v2MessageSetBuilder struct {
+	msgs  []Message
+	codec CompressionCodec
+}
+
+func (f v2MessageSetBuilder) messages() []Message {
+	return f.msgs
+}
+
+func (f v2MessageSetBuilder) bytes() []byte {
+	attributes := int16(0)
+	if f.codec != nil {
+		attributes = int16(f.codec.Code()) // set codec code on attributes
+	}
+	return newWB().call(func(wb *kafkaWriteBuffer) {
+		wb.writeInt64(f.msgs[0].Offset)
+		wb.writeBytes(newWB().call(func(wb *kafkaWriteBuffer) {
+			wb.writeInt32(0)                            // leader epoch
+			wb.writeInt8(2)                             // magic = 2
+			wb.writeInt32(0)                            // crc, unused
+			wb.writeInt16(attributes)                   // record set attributes
+			wb.writeInt32(0)                            // record set last offset delta
+			wb.writeInt64(1000 * f.msgs[0].Time.Unix()) // record set first timestamp
+			wb.writeInt64(1000 * f.msgs[0].Time.Unix()) // record set last timestamp
+			wb.writeInt64(0)                            // record set producer id
+			wb.writeInt16(0)                            // record set producer epoch
+			wb.writeInt32(0)                            // record set base sequence
+			wb.writeInt32(int32(len(f.msgs)))           // record set count
+			bs := newWB().call(func(wb *kafkaWriteBuffer) {
+				for i, msg := range f.msgs {
+					wb.Write(newWB().call(func(wb *kafkaWriteBuffer) {
+						bs := newWB().call(func(wb *kafkaWriteBuffer) {
+							wb.writeInt8(0)                                              // record attributes, not used here
+							wb.writeVarInt(1000 * (time.Now().Unix() - msg.Time.Unix())) // timestamp
+							wb.writeVarInt(int64(i))                                     // offset delta
+							wb.writeVarInt(int64(len(msg.Key)))                          // key len
+							wb.Write(msg.Key)                                            // key bytes
+							wb.writeVarInt(int64(len(msg.Value)))                        // value len
+							wb.Write(msg.Value)                                          // value bytes
+							wb.writeVarInt(int64(len(msg.Headers)))                      // number of headers
+							for _, header := range msg.Headers {
+								wb.writeVarInt(int64(len(header.Key)))
+								wb.Write([]byte(header.Key))
+								wb.writeVarInt(int64(len(header.Value)))
+								wb.Write(header.Value)
+							}
+						})
+						wb.writeVarInt(int64(len(bs)))
+						wb.Write(bs)
+					}))
+				}
+			})
+			if f.codec != nil {
+				bs = mustCompress(bs, f.codec)
+			}
+			wb.Write(bs)
+		}))
+	})
+}
+
+// kafkaWriteBuffer is a write buffer that helps writing fetch responses.
+type kafkaWriteBuffer struct {
+	writeBuffer
+	buf bytes.Buffer
+}
+
+func newWB() *kafkaWriteBuffer {
+	res := kafkaWriteBuffer{}
+	res.writeBuffer.w = &res.buf
+	return &res
+}
+
+func (f *kafkaWriteBuffer) Bytes() []byte {
+	return f.buf.Bytes()
+}
+
+// call is a convenience method that allows the kafkaWriteBuffer to be used
+// in a functional manner. This is helpful when building
+// nested structures, as the return value can be fed into
+// other fwWB APIs.
+func (f *kafkaWriteBuffer) call(cb func(wb *kafkaWriteBuffer)) []byte {
+	cb(f)
+	bs := f.Bytes()
+	if bs == nil {
+		bs = []byte{}
+	}
+	return bs
+}
+
+func mustCompress(bs []byte, codec compress.Codec) (res []byte) {
+	buf := bytes.Buffer{}
+	codecWriter := codec.NewWriter(&buf)
+	_, err := io.Copy(codecWriter, bytes.NewReader(bs))
+	if err != nil {
+		panic(fmt.Errorf("compress: %w", err))
+	}
+	err = codecWriter.Close()
+	if err != nil {
+		panic(fmt.Errorf("close codec writer: %w", err))
+	}
+	res = buf.Bytes()
+	return
+}
diff -pruN 0.2.1-1.1/client.go 0.4.49+ds1-1/client.go
--- 0.2.1-1.1/client.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/client.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,146 @@
+package kafka
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+const (
+	defaultCreateTopicsTimeout     = 2 * time.Second
+	defaultDeleteTopicsTimeout     = 2 * time.Second
+	defaultCreatePartitionsTimeout = 2 * time.Second
+	defaultProduceTimeout          = 500 * time.Millisecond
+	defaultMaxWait                 = 500 * time.Millisecond
+)
+
+// Client is a high-level API to interract with kafka brokers.
+//
+// All methods of the Client type accept a context as first argument, which may
+// be used to asynchronously cancel the requests.
+//
+// Clients are safe to use concurrently from multiple goroutines, as long as
+// their configuration is not changed after first use.
+type Client struct {
+	// Address of the kafka cluster (or specific broker) that the client will be
+	// sending requests to.
+	//
+	// This field is optional, the address may be provided in each request
+	// instead. The request address takes precedence if both were specified.
+	Addr net.Addr
+
+	// Time limit for requests sent by this client.
+	//
+	// If zero, no timeout is applied.
+	Timeout time.Duration
+
+	// A transport used to communicate with the kafka brokers.
+	//
+	// If nil, DefaultTransport is used.
+	Transport RoundTripper
+}
+
+// A ConsumerGroup and Topic as these are both strings we define a type for
+// clarity when passing to the Client as a function argument
+//
+// N.B TopicAndGroup is currently experimental! Therefore, it is subject to
+// change, including breaking changes between MINOR and PATCH releases.
+//
+// DEPRECATED: this type will be removed in version 1.0, programs should
+// migrate to use kafka.(*Client).OffsetFetch instead.
+type TopicAndGroup struct {
+	Topic   string
+	GroupId string
+}
+
+// ConsumerOffsets returns a map[int]int64 of partition to committed offset for
+// a consumer group id and topic.
+//
+// DEPRECATED: this method will be removed in version 1.0, programs should
+// migrate to use kafka.(*Client).OffsetFetch instead.
+func (c *Client) ConsumerOffsets(ctx context.Context, tg TopicAndGroup) (map[int]int64, error) {
+	metadata, err := c.Metadata(ctx, &MetadataRequest{
+		Topics: []string{tg.Topic},
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("failed to get topic metadata :%w", err)
+	}
+
+	topic := metadata.Topics[0]
+	partitions := make([]int, len(topic.Partitions))
+
+	for i := range topic.Partitions {
+		partitions[i] = topic.Partitions[i].ID
+	}
+
+	offsets, err := c.OffsetFetch(ctx, &OffsetFetchRequest{
+		GroupID: tg.GroupId,
+		Topics: map[string][]int{
+			tg.Topic: partitions,
+		},
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("failed to get offsets: %w", err)
+	}
+
+	topicOffsets := offsets.Topics[topic.Name]
+	partitionOffsets := make(map[int]int64, len(topicOffsets))
+
+	for _, off := range topicOffsets {
+		partitionOffsets[off.Partition] = off.CommittedOffset
+	}
+
+	return partitionOffsets, nil
+}
+
+func (c *Client) roundTrip(ctx context.Context, addr net.Addr, msg protocol.Message) (protocol.Message, error) {
+	if c.Timeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, c.Timeout)
+		defer cancel()
+	}
+
+	if addr == nil {
+		if addr = c.Addr; addr == nil {
+			return nil, errors.New("no address was given for the kafka cluster in the request or on the client")
+		}
+	}
+
+	return c.transport().RoundTrip(ctx, addr, msg)
+}
+
+func (c *Client) transport() RoundTripper {
+	if c.Transport != nil {
+		return c.Transport
+	}
+	return DefaultTransport
+}
+
+func (c *Client) timeout(ctx context.Context, defaultTimeout time.Duration) time.Duration {
+	timeout := c.Timeout
+
+	if deadline, ok := ctx.Deadline(); ok {
+		if remain := time.Until(deadline); remain < timeout {
+			timeout = remain
+		}
+	}
+
+	if timeout > 0 {
+		// Half the timeout because it is communicated to kafka in multiple
+		// requests (e.g. Fetch, Produce, etc...), this adds buffer to account
+		// for network latency when waiting for the response from kafka.
+		return timeout / 2
+	}
+
+	return defaultTimeout
+}
+
+func (c *Client) timeoutMs(ctx context.Context, defaultTimeout time.Duration) int32 {
+	return milliseconds(c.timeout(ctx, defaultTimeout))
+}
diff -pruN 0.2.1-1.1/client_test.go 0.4.49+ds1-1/client_test.go
--- 0.2.1-1.1/client_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/client_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,304 @@
+package kafka
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"io"
+	"math/rand"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/compress"
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func newLocalClientAndTopic() (*Client, string, func()) {
+	topic := makeTopic()
+	client, shutdown := newLocalClientWithTopic(topic, 1)
+	return client, topic, shutdown
+}
+
+func newLocalClientWithTopic(topic string, partitions int) (*Client, func()) {
+	client, shutdown := newLocalClient()
+	if err := clientCreateTopic(client, topic, partitions); err != nil {
+		shutdown()
+		panic(err)
+	}
+	return client, func() {
+		client.DeleteTopics(context.Background(), &DeleteTopicsRequest{
+			Topics: []string{topic},
+		})
+		shutdown()
+	}
+}
+
+func clientCreateTopic(client *Client, topic string, partitions int) error {
+	_, err := client.CreateTopics(context.Background(), &CreateTopicsRequest{
+		Topics: []TopicConfig{{
+			Topic:             topic,
+			NumPartitions:     partitions,
+			ReplicationFactor: 1,
+		}},
+	})
+	if err != nil {
+		return err
+	}
+
+	// Topic creation seems to be asynchronous. Metadata for the topic partition
+	// layout in the cluster is available in the controller before being synced
+	// with the other brokers, which causes "Error:[3] Unknown Topic Or Partition"
+	// when sending requests to the partition leaders.
+	//
+	// This loop will wait up to 2 seconds polling the cluster until no errors
+	// are returned.
+	for i := 0; i < 20; i++ {
+		r, err := client.Fetch(context.Background(), &FetchRequest{
+			Topic:     topic,
+			Partition: 0,
+			Offset:    0,
+		})
+		if err == nil && r.Error == nil {
+			break
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+
+	return nil
+}
+
+func clientEndTxn(client *Client, req *EndTxnRequest) error {
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	resp, err := client.EndTxn(ctx, req)
+	if err != nil {
+		return err
+	}
+
+	return resp.Error
+}
+
+func newLocalClient() (*Client, func()) {
+	return newClient(TCP("localhost"))
+}
+
+func newClient(addr net.Addr) (*Client, func()) {
+	conns := &ktesting.ConnWaitGroup{
+		DialFunc: (&net.Dialer{}).DialContext,
+	}
+
+	transport := &Transport{
+		Dial:     conns.Dial,
+		Resolver: NewBrokerResolver(nil),
+	}
+
+	client := &Client{
+		Addr:      addr,
+		Timeout:   5 * time.Second,
+		Transport: transport,
+	}
+
+	return client, func() { transport.CloseIdleConnections(); conns.Wait() }
+}
+
+func TestClient(t *testing.T) {
+	tests := []struct {
+		scenario string
+		function func(*testing.T, context.Context, *Client)
+	}{
+		{
+			scenario: "retrieve committed offsets for a consumer group and topic",
+			function: testConsumerGroupFetchOffsets,
+		},
+	}
+
+	for _, test := range tests {
+		testFunc := test.function
+		t.Run(test.scenario, func(t *testing.T) {
+			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+			defer cancel()
+
+			client, shutdown := newLocalClient()
+			defer shutdown()
+
+			testFunc(t, ctx, client)
+		})
+	}
+}
+
+func testConsumerGroupFetchOffsets(t *testing.T, ctx context.Context, client *Client) {
+	const totalMessages = 144
+	const partitions = 12
+	const msgPerPartition = totalMessages / partitions
+
+	topic := makeTopic()
+	if err := clientCreateTopic(client, topic, partitions); err != nil {
+		t.Fatal(err)
+	}
+
+	groupId := makeGroupID()
+	brokers := []string{"localhost:9092"}
+
+	writer := &Writer{
+		Addr:      TCP(brokers...),
+		Topic:     topic,
+		Balancer:  &RoundRobin{},
+		BatchSize: 1,
+		Transport: client.Transport,
+	}
+	if err := writer.WriteMessages(ctx, makeTestSequence(totalMessages)...); err != nil {
+		t.Fatalf("bad write messages: %v", err)
+	}
+	if err := writer.Close(); err != nil {
+		t.Fatalf("bad write err: %v", err)
+	}
+
+	r := NewReader(ReaderConfig{
+		Brokers:  brokers,
+		Topic:    topic,
+		GroupID:  groupId,
+		MinBytes: 1,
+		MaxBytes: 10e6,
+		MaxWait:  100 * time.Millisecond,
+	})
+	defer r.Close()
+
+	for i := 0; i < totalMessages; i++ {
+		m, err := r.FetchMessage(ctx)
+		if err != nil {
+			t.Fatalf("error fetching message: %s", err)
+		}
+		if err := r.CommitMessages(context.Background(), m); err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	offsets, err := client.ConsumerOffsets(ctx, TopicAndGroup{GroupId: groupId, Topic: topic})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(offsets) != partitions {
+		t.Fatalf("expected %d partitions but only received offsets for %d", partitions, len(offsets))
+	}
+
+	for i := 0; i < partitions; i++ {
+		committedOffset := offsets[i]
+		if committedOffset != msgPerPartition {
+			t.Errorf("expected partition %d with committed offset of %d but received %d", i, msgPerPartition, committedOffset)
+		}
+	}
+}
+
+func TestClientProduceAndConsume(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+	defer cancel()
+	// Tests a typical kafka use case, data is produced to a partition,
+	// then consumed back sequentially. We use snappy compression because
+	// kafka stream are often compressed, and verify that each record
+	// produced is exposed to the consumer, and order is preserved.
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	epoch := time.Now()
+	seed := int64(0) // deterministic
+	prng := rand.New(rand.NewSource(seed))
+	offset := int64(0)
+
+	const numBatches = 100
+	const recordsPerBatch = 320
+	t.Logf("producing %d batches of %d records...", numBatches, recordsPerBatch)
+
+	for i := 0; i < numBatches; i++ { // produce 100 batches
+		records := make([]Record, recordsPerBatch)
+
+		for i := range records {
+			v := make([]byte, prng.Intn(999)+1)
+			io.ReadFull(prng, v)
+			records[i].Time = epoch
+			records[i].Value = NewBytes(v)
+		}
+
+		res, err := client.Produce(ctx, &ProduceRequest{
+			Topic:        topic,
+			Partition:    0,
+			RequiredAcks: -1,
+			Records:      NewRecordReader(records...),
+			Compression:  compress.Snappy,
+		})
+		if err != nil {
+			t.Fatal(err)
+		}
+		if res.Error != nil {
+			t.Fatal(res.Error)
+		}
+		if res.BaseOffset != offset {
+			t.Fatalf("records were produced at an unexpected offset, want %d but got %d", offset, res.BaseOffset)
+		}
+		offset += int64(len(records))
+	}
+
+	prng.Seed(seed)
+	offset = 0 // reset
+	numFetches := 0
+	numRecords := 0
+
+	for numRecords < (numBatches * recordsPerBatch) {
+		res, err := client.Fetch(ctx, &FetchRequest{
+			Topic:     topic,
+			Partition: 0,
+			Offset:    offset,
+			MinBytes:  1,
+			MaxBytes:  256 * 1024,
+			MaxWait:   100 * time.Millisecond, // should only hit on the last fetch
+		})
+		if err != nil {
+			t.Fatal(err)
+		}
+		if res.Error != nil {
+			t.Fatal(err)
+		}
+
+		for {
+			r, err := res.Records.ReadRecord()
+			if err != nil {
+				if !errors.Is(err, io.EOF) {
+					t.Fatal(err)
+				}
+				break
+			}
+
+			if r.Key != nil {
+				r.Key.Close()
+				t.Error("unexpected non-null key on record at offset", r.Offset)
+			}
+
+			n := prng.Intn(999) + 1
+			a := make([]byte, n)
+			b := make([]byte, n)
+			io.ReadFull(prng, a)
+
+			_, err = io.ReadFull(r.Value, b)
+			r.Value.Close()
+			if err != nil {
+				t.Fatalf("reading record at offset %d: %v", r.Offset, err)
+			}
+
+			if !bytes.Equal(a, b) {
+				t.Fatalf("value of record at offset %d mismatches", r.Offset)
+			}
+
+			if r.Offset != offset {
+				t.Fatalf("record at offset %d was expected to have offset %d", r.Offset, offset)
+			}
+
+			offset = r.Offset + 1
+			numRecords++
+		}
+
+		numFetches++
+	}
+
+	t.Logf("%d records were read in %d fetches", numRecords, numFetches)
+}
diff -pruN 0.2.1-1.1/compress/compress.go 0.4.49+ds1-1/compress/compress.go
--- 0.2.1-1.1/compress/compress.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/compress/compress.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,124 @@
+package compress
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/segmentio/kafka-go/compress/gzip"
+	"github.com/segmentio/kafka-go/compress/lz4"
+	"github.com/segmentio/kafka-go/compress/snappy"
+	"github.com/segmentio/kafka-go/compress/zstd"
+)
+
+// Compression represents the compression applied to a record set.
+type Compression int8
+
+const (
+	None   Compression = 0
+	Gzip   Compression = 1
+	Snappy Compression = 2
+	Lz4    Compression = 3
+	Zstd   Compression = 4
+)
+
+func (c Compression) Codec() Codec {
+	if i := int(c); i >= 0 && i < len(Codecs) {
+		return Codecs[i]
+	}
+	return nil
+}
+
+func (c Compression) String() string {
+	if codec := c.Codec(); codec != nil {
+		return codec.Name()
+	}
+	return "uncompressed"
+}
+
+func (c Compression) MarshalText() ([]byte, error) {
+	return []byte(c.String()), nil
+}
+
+func (c *Compression) UnmarshalText(b []byte) error {
+	switch string(b) {
+	case "none", "uncompressed":
+		*c = None
+		return nil
+	}
+
+	for _, codec := range Codecs[None+1:] {
+		if codec.Name() == string(b) {
+			*c = Compression(codec.Code())
+			return nil
+		}
+	}
+
+	i, err := strconv.ParseInt(string(b), 10, 64)
+	if err == nil && i >= 0 && i < int64(len(Codecs)) {
+		*c = Compression(i)
+		return nil
+	}
+
+	s := &strings.Builder{}
+	s.WriteString("none, uncompressed")
+
+	for i, codec := range Codecs[None+1:] {
+		if i < (len(Codecs) - 1) {
+			s.WriteString(", ")
+		} else {
+			s.WriteString(", or ")
+		}
+		s.WriteString(codec.Name())
+	}
+
+	return fmt.Errorf("compression format must be one of %s, not %q", s, b)
+}
+
+var (
+	_ encoding.TextMarshaler   = Compression(0)
+	_ encoding.TextUnmarshaler = (*Compression)(nil)
+)
+
+// Codec represents a compression codec to encode and decode the messages.
+// See : https://cwiki.apache.org/confluence/display/KAFKA/Compression
+//
+// A Codec must be safe for concurrent access by multiple go routines.
+type Codec interface {
+	// Code returns the compression codec code
+	Code() int8
+
+	// Human-readable name for the codec.
+	Name() string
+
+	// Constructs a new reader which decompresses data from r.
+	NewReader(r io.Reader) io.ReadCloser
+
+	// Constructs a new writer which writes compressed data to w.
+	NewWriter(w io.Writer) io.WriteCloser
+}
+
+var (
+	// The global gzip codec installed on the Codecs table.
+	GzipCodec gzip.Codec
+
+	// The global snappy codec installed on the Codecs table.
+	SnappyCodec snappy.Codec
+
+	// The global lz4 codec installed on the Codecs table.
+	Lz4Codec lz4.Codec
+
+	// The global zstd codec installed on the Codecs table.
+	ZstdCodec zstd.Codec
+
+	// The global table of compression codecs supported by the kafka protocol.
+	Codecs = [...]Codec{
+		None:   nil,
+		Gzip:   &GzipCodec,
+		Snappy: &SnappyCodec,
+		Lz4:    &Lz4Codec,
+		Zstd:   &ZstdCodec,
+	}
+)
diff -pruN 0.2.1-1.1/compress/compress_test.go 0.4.49+ds1-1/compress/compress_test.go
--- 0.2.1-1.1/compress/compress_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/compress/compress_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,512 @@
+package compress_test
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"net"
+	"os"
+	"path/filepath"
+	"strconv"
+	"testing"
+	"text/tabwriter"
+	"time"
+
+	gz "github.com/klauspost/compress/gzip"
+	"github.com/segmentio/kafka-go"
+	pkg "github.com/segmentio/kafka-go/compress"
+	"github.com/segmentio/kafka-go/compress/gzip"
+	"github.com/segmentio/kafka-go/compress/lz4"
+	"github.com/segmentio/kafka-go/compress/snappy"
+	"github.com/segmentio/kafka-go/compress/zstd"
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func init() {
+	// Seeding the random source is important to prevent multiple test runs from
+	// reusing the same topic names.
+	rand.Seed(time.Now().UnixNano())
+}
+
+func TestCodecs(t *testing.T) {
+	for i, c := range pkg.Codecs {
+		if c != nil {
+			if code := c.Code(); int8(code) != int8(i) {
+				t.Fatal("default compression codec table is misconfigured for", c.Name())
+			}
+		}
+	}
+}
+
+func TestCompression(t *testing.T) {
+	msg := kafka.Message{
+		Value: []byte("message"),
+	}
+
+	testEncodeDecode(t, msg, new(gzip.Codec))
+	testEncodeDecode(t, msg, new(snappy.Codec))
+	testEncodeDecode(t, msg, new(lz4.Codec))
+	if ktesting.KafkaIsAtLeast("2.1.0") {
+		testEncodeDecode(t, msg, new(zstd.Codec))
+	}
+}
+
+func compress(codec pkg.Codec, src []byte) ([]byte, error) {
+	b := new(bytes.Buffer)
+	r := bytes.NewReader(src)
+	w := codec.NewWriter(b)
+	if _, err := io.Copy(w, r); err != nil {
+		w.Close()
+		return nil, err
+	}
+	if err := w.Close(); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+func decompress(codec pkg.Codec, src []byte) ([]byte, error) {
+	b := new(bytes.Buffer)
+	r := codec.NewReader(bytes.NewReader(src))
+	if _, err := io.Copy(b, r); err != nil {
+		r.Close()
+		return nil, err
+	}
+	if err := r.Close(); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+func testEncodeDecode(t *testing.T, m kafka.Message, codec pkg.Codec) {
+	var r1, r2 []byte
+	var err error
+
+	t.Run("text format of "+codec.Name(), func(t *testing.T) {
+		c := pkg.Compression(codec.Code())
+		a := strconv.Itoa(int(c))
+		x := pkg.Compression(-1)
+		y := pkg.Compression(-1)
+		b, err := c.MarshalText()
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if err := x.UnmarshalText([]byte(a)); err != nil {
+			t.Fatal(err)
+		}
+		if err := y.UnmarshalText(b); err != nil {
+			t.Fatal(err)
+		}
+
+		if x != c {
+			t.Errorf("compression mismatch after marshal/unmarshal: want=%s got=%s", c, x)
+		}
+		if y != c {
+			t.Errorf("compression mismatch after marshal/unmarshal: want=%s got=%s", c, y)
+		}
+	})
+
+	t.Run("encode with "+codec.Name(), func(t *testing.T) {
+		r1, err = compress(codec, m.Value)
+		if err != nil {
+			t.Fatal(err)
+		}
+	})
+
+	t.Run("decode with "+codec.Name(), func(t *testing.T) {
+		if r1 == nil {
+			if r1, err = compress(codec, m.Value); err != nil {
+				t.Fatal(err)
+			}
+		}
+		r2, err = decompress(codec, r1)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if string(r2) != "message" {
+			t.Error("bad message")
+			t.Logf("expected: %q", string(m.Value))
+			t.Logf("got:      %q", string(r2))
+		}
+	})
+}
+
+func TestCompressedMessages(t *testing.T) {
+	testCompressedMessages(t, new(gzip.Codec))
+	testCompressedMessages(t, new(snappy.Codec))
+	testCompressedMessages(t, new(lz4.Codec))
+
+	if ktesting.KafkaIsAtLeast("2.1.0") {
+		testCompressedMessages(t, new(zstd.Codec))
+	}
+}
+
+func testCompressedMessages(t *testing.T, codec pkg.Codec) {
+	t.Run(codec.Name(), func(t *testing.T) {
+		client, topic, shutdown := newLocalClientAndTopic()
+		defer shutdown()
+
+		w := &kafka.Writer{
+			Addr:         kafka.TCP("127.0.0.1:9092"),
+			Topic:        topic,
+			Compression:  kafka.Compression(codec.Code()),
+			BatchTimeout: 10 * time.Millisecond,
+			Transport:    client.Transport,
+		}
+		defer w.Close()
+
+		offset := 0
+		var values []string
+		for i := 0; i < 10; i++ {
+			batch := make([]kafka.Message, i+1)
+			for j := range batch {
+				value := fmt.Sprintf("Hello World %d!", offset)
+				values = append(values, value)
+				batch[j] = kafka.Message{
+					Key:   []byte(strconv.Itoa(offset)),
+					Value: []byte(value),
+				}
+				offset++
+			}
+			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+			if err := w.WriteMessages(ctx, batch...); err != nil {
+				t.Errorf("error sending batch %d, reason: %+v", i+1, err)
+			}
+			cancel()
+		}
+
+		r := kafka.NewReader(kafka.ReaderConfig{
+			Brokers:   []string{"127.0.0.1:9092"},
+			Topic:     topic,
+			Partition: 0,
+			MaxWait:   10 * time.Millisecond,
+			MinBytes:  1,
+			MaxBytes:  1024,
+		})
+		defer r.Close()
+
+		ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+		defer cancel()
+
+		// in order to ensure proper handling of decompressing message, read at
+		// offsets that we know to be in the middle of compressed message sets.
+		for base := range values {
+			r.SetOffset(int64(base))
+			for i := base; i < len(values); i++ {
+				msg, err := r.ReadMessage(ctx)
+				if err != nil {
+					t.Fatalf("error receiving message at loop %d, offset %d, reason: %+v", base, i, err)
+				}
+				if msg.Offset != int64(i) {
+					t.Fatalf("wrong offset at loop %d...expected %d but got %d", base, i, msg.Offset)
+				}
+				if strconv.Itoa(i) != string(msg.Key) {
+					t.Fatalf("wrong message key at loop %d...expected %d but got %s", base, i, string(msg.Key))
+				}
+				if values[i] != string(msg.Value) {
+					t.Fatalf("wrong message value at loop %d...expected %s but got %s", base, values[i], string(msg.Value))
+				}
+			}
+		}
+	})
+}
+
+func TestMixedCompressedMessages(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	offset := 0
+	var values []string
+	produce := func(n int, codec pkg.Codec) {
+		w := &kafka.Writer{
+			Addr:      kafka.TCP("127.0.0.1:9092"),
+			Topic:     topic,
+			Transport: client.Transport,
+		}
+		defer w.Close()
+
+		if codec != nil {
+			w.Compression = kafka.Compression(codec.Code())
+		}
+
+		msgs := make([]kafka.Message, n)
+		for i := range msgs {
+			value := fmt.Sprintf("Hello World %d!", offset)
+			values = append(values, value)
+			offset++
+			msgs[i] = kafka.Message{Value: []byte(value)}
+		}
+
+		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+		defer cancel()
+		if err := w.WriteMessages(ctx, msgs...); err != nil {
+			t.Errorf("failed to produce messages: %+v", err)
+		}
+	}
+
+	// produce messages that interleave uncompressed messages and messages with
+	// different compression codecs.  reader should be able to properly handle
+	// all of them.
+	produce(10, nil)
+	produce(20, new(gzip.Codec))
+	produce(5, nil)
+	produce(10, new(snappy.Codec))
+	produce(10, new(lz4.Codec))
+	produce(5, nil)
+
+	r := kafka.NewReader(kafka.ReaderConfig{
+		Brokers:   []string{"127.0.0.1:9092"},
+		Topic:     topic,
+		Partition: 0,
+		MaxWait:   10 * time.Millisecond,
+		MinBytes:  1,
+		MaxBytes:  1024,
+	})
+	defer r.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+	defer cancel()
+
+	// in order to ensure proper handling of decompressing message, read at
+	// offsets that we know to be in the middle of compressed message sets.
+	for base := range values {
+		r.SetOffset(int64(base))
+		for i := base; i < len(values); i++ {
+			msg, err := r.ReadMessage(ctx)
+			if err != nil {
+				t.Errorf("error receiving message at loop %d, offset %d, reason: %+v", base, i, err)
+			}
+			if msg.Offset != int64(i) {
+				t.Errorf("wrong offset at loop %d...expected %d but got %d", base, i, msg.Offset)
+			}
+			if values[i] != string(msg.Value) {
+				t.Errorf("wrong message value at loop %d...expected %s but got %s", base, values[i], string(msg.Value))
+			}
+		}
+	}
+}
+
+type noopCodec struct{}
+
+func (noopCodec) Code() int8 {
+	return 0
+}
+
+func (noopCodec) Name() string {
+	return "none"
+}
+
+func (noopCodec) NewReader(r io.Reader) io.ReadCloser {
+	return ioutil.NopCloser(r)
+}
+
+func (noopCodec) NewWriter(w io.Writer) io.WriteCloser {
+	return nopWriteCloser{w}
+}
+
+type nopWriteCloser struct{ io.Writer }
+
+func (nopWriteCloser) Close() error { return nil }
+
+func BenchmarkCompression(b *testing.B) {
+	benchmarks := []struct {
+		codec    pkg.Codec
+		function func(*testing.B, pkg.Codec, *bytes.Buffer, []byte) float64
+	}{
+		{
+			codec:    &noopCodec{},
+			function: benchmarkCompression,
+		},
+		{
+			codec:    new(gzip.Codec),
+			function: benchmarkCompression,
+		},
+		{
+			codec:    new(snappy.Codec),
+			function: benchmarkCompression,
+		},
+		{
+			codec:    new(lz4.Codec),
+			function: benchmarkCompression,
+		},
+		{
+			codec:    new(zstd.Codec),
+			function: benchmarkCompression,
+		},
+	}
+
+	f, err := os.Open(filepath.Join(os.Getenv("GOROOT"), "src/encoding/json/testdata/code.json.gz"))
+	if err != nil {
+		b.Fatal(err)
+	}
+	defer f.Close()
+
+	z, err := gz.NewReader(f)
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	payload, err := ioutil.ReadAll(z)
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	buffer := bytes.Buffer{}
+	buffer.Grow(len(payload))
+
+	ts := &bytes.Buffer{}
+	tw := tabwriter.NewWriter(ts, 0, 8, 0, '\t', 0)
+	defer func() {
+		tw.Flush()
+		fmt.Printf("input => %.2f MB\n", float64(len(payload))/(1024*1024))
+		fmt.Println(ts)
+	}()
+
+	for i := range benchmarks {
+		benchmark := &benchmarks[i]
+		ratio := 0.0
+
+		b.Run(benchmark.codec.Name(), func(b *testing.B) {
+			ratio = benchmark.function(b, benchmark.codec, &buffer, payload)
+		})
+
+		fmt.Fprintf(tw, "  %s:\t%.2f%%\n", benchmark.codec.Name(), 100*ratio)
+	}
+}
+
+func benchmarkCompression(b *testing.B, codec pkg.Codec, buf *bytes.Buffer, payload []byte) float64 {
+	// In case only the decompression benchmark are run, we use this flags to
+	// detect whether we have to compress the payload before the decompression
+	// benchmarks.
+	compressed := false
+
+	b.Run("compress", func(b *testing.B) {
+		compressed = true
+		r := bytes.NewReader(payload)
+		b.ReportAllocs()
+
+		for i := 0; i < b.N; i++ {
+			buf.Reset()
+			r.Reset(payload)
+			w := codec.NewWriter(buf)
+
+			_, err := io.Copy(w, r)
+			if err != nil {
+				b.Fatal(err)
+			}
+			if err := w.Close(); err != nil {
+				b.Fatal(err)
+			}
+		}
+
+		b.SetBytes(int64(buf.Len()))
+	})
+
+	if !compressed {
+		r := bytes.NewReader(payload)
+		w := codec.NewWriter(buf)
+
+		_, err := io.Copy(w, r)
+		if err != nil {
+			b.Fatal(err)
+		}
+		if err := w.Close(); err != nil {
+			b.Fatal(err)
+		}
+	}
+
+	b.Run("decompress", func(b *testing.B) {
+		c := bytes.NewReader(buf.Bytes())
+		b.ReportAllocs()
+		for i := 0; i < b.N; i++ {
+			c.Reset(buf.Bytes())
+			r := codec.NewReader(c)
+
+			n, err := io.Copy(ioutil.Discard, r)
+			if err != nil {
+				b.Fatal(err)
+			}
+			if err := r.Close(); err != nil {
+				b.Fatal(err)
+			}
+
+			b.SetBytes(n)
+		}
+	})
+
+	return 1 - (float64(buf.Len()) / float64(len(payload)))
+}
+
+func init() {
+	rand.Seed(time.Now().UnixNano())
+}
+
+func makeTopic() string {
+	return fmt.Sprintf("kafka-go-%016x", rand.Int63())
+}
+
+func newLocalClientAndTopic() (*kafka.Client, string, func()) {
+	topic := makeTopic()
+	client, shutdown := newLocalClient()
+
+	_, err := client.CreateTopics(context.Background(), &kafka.CreateTopicsRequest{
+		Topics: []kafka.TopicConfig{{
+			Topic:             topic,
+			NumPartitions:     1,
+			ReplicationFactor: 1,
+		}},
+	})
+	if err != nil {
+		shutdown()
+		panic(err)
+	}
+
+	// Topic creation seems to be asynchronous. Metadata for the topic partition
+	// layout in the cluster is available in the controller before being synced
+	// with the other brokers, which causes "Error:[3] Unknown Topic Or Partition"
+	// when sending requests to the partition leaders.
+	for i := 0; i < 20; i++ {
+		r, err := client.Fetch(context.Background(), &kafka.FetchRequest{
+			Topic:     topic,
+			Partition: 0,
+			Offset:    0,
+		})
+		if err == nil && r.Error == nil {
+			break
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+
+	return client, topic, func() {
+		client.DeleteTopics(context.Background(), &kafka.DeleteTopicsRequest{
+			Topics: []string{topic},
+		})
+		shutdown()
+	}
+}
+
+func newLocalClient() (*kafka.Client, func()) {
+	return newClient(kafka.TCP("127.0.0.1:9092"))
+}
+
+func newClient(addr net.Addr) (*kafka.Client, func()) {
+	conns := &ktesting.ConnWaitGroup{
+		DialFunc: (&net.Dialer{}).DialContext,
+	}
+
+	transport := &kafka.Transport{
+		Dial: conns.Dial,
+	}
+
+	client := &kafka.Client{
+		Addr:      addr,
+		Timeout:   5 * time.Second,
+		Transport: transport,
+	}
+
+	return client, func() { transport.CloseIdleConnections(); conns.Wait() }
+}
diff -pruN 0.2.1-1.1/compress/gzip/gzip.go 0.4.49+ds1-1/compress/gzip/gzip.go
--- 0.2.1-1.1/compress/gzip/gzip.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/compress/gzip/gzip.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,123 @@
+package gzip
+
+import (
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/gzip"
+)
+
+var (
+	readerPool sync.Pool
+)
+
+// Codec is the implementation of a compress.Codec which supports creating
+// readers and writers for kafka messages compressed with gzip.
+type Codec struct {
+	// The compression level to configure on writers created by this codec.
+	// Acceptable values are defined in the standard gzip package.
+	//
+	// Default to gzip.DefaultCompressionLevel.
+	Level int
+
+	writerPool sync.Pool
+}
+
+// Code implements the compress.Codec interface.
+func (c *Codec) Code() int8 { return 1 }
+
+// Name implements the compress.Codec interface.
+func (c *Codec) Name() string { return "gzip" }
+
+// NewReader implements the compress.Codec interface.
+func (c *Codec) NewReader(r io.Reader) io.ReadCloser {
+	var err error
+	z, _ := readerPool.Get().(*gzip.Reader)
+	if z != nil {
+		err = z.Reset(r)
+	} else {
+		z, err = gzip.NewReader(r)
+	}
+	if err != nil {
+		if z != nil {
+			readerPool.Put(z)
+		}
+		return &errorReader{err: err}
+	}
+	return &reader{Reader: z}
+}
+
+// NewWriter implements the compress.Codec interface.
+func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
+	x := c.writerPool.Get()
+	z, _ := x.(*gzip.Writer)
+	if z == nil {
+		x, err := gzip.NewWriterLevel(w, c.level())
+		if err != nil {
+			return &errorWriter{err: err}
+		}
+		z = x
+	} else {
+		z.Reset(w)
+	}
+	return &writer{codec: c, Writer: z}
+}
+
+func (c *Codec) level() int {
+	if c.Level != 0 {
+		return c.Level
+	}
+	return gzip.DefaultCompression
+}
+
+type reader struct{ *gzip.Reader }
+
+func (r *reader) Close() (err error) {
+	if z := r.Reader; z != nil {
+		r.Reader = nil
+		err = z.Close()
+		// Pass it an empty reader, which is a zero-size value implementing the
+		// flate.Reader interface to avoid the construction of a bufio.Reader in
+		// the call to Reset.
+		//
+		// Note: we could also not reset the reader at all, but that would cause
+		// the underlying reader to be retained until the gzip.Reader is freed,
+		// which may not be desirable.
+		z.Reset(emptyReader{})
+		readerPool.Put(z)
+	}
+	return
+}
+
+type writer struct {
+	codec *Codec
+	*gzip.Writer
+}
+
+func (w *writer) Close() (err error) {
+	if z := w.Writer; z != nil {
+		w.Writer = nil
+		err = z.Close()
+		z.Reset(nil)
+		w.codec.writerPool.Put(z)
+	}
+	return
+}
+
+type emptyReader struct{}
+
+func (emptyReader) ReadByte() (byte, error) { return 0, io.EOF }
+
+func (emptyReader) Read([]byte) (int, error) { return 0, io.EOF }
+
+type errorReader struct{ err error }
+
+func (r *errorReader) Close() error { return r.err }
+
+func (r *errorReader) Read([]byte) (int, error) { return 0, r.err }
+
+type errorWriter struct{ err error }
+
+func (w *errorWriter) Close() error { return w.err }
+
+func (w *errorWriter) Write([]byte) (int, error) { return 0, w.err }
diff -pruN 0.2.1-1.1/compress/lz4/lz4.go 0.4.49+ds1-1/compress/lz4/lz4.go
--- 0.2.1-1.1/compress/lz4/lz4.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/compress/lz4/lz4.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,68 @@
+package lz4
+
+import (
+	"io"
+	"sync"
+
+	"github.com/pierrec/lz4/v4"
+)
+
+var (
+	readerPool sync.Pool
+	writerPool sync.Pool
+)
+
+// Codec is the implementation of a compress.Codec which supports creating
+// readers and writers for kafka messages compressed with lz4.
+type Codec struct{}
+
+// Code implements the compress.Codec interface.
+func (c *Codec) Code() int8 { return 3 }
+
+// Name implements the compress.Codec interface.
+func (c *Codec) Name() string { return "lz4" }
+
+// NewReader implements the compress.Codec interface.
+func (c *Codec) NewReader(r io.Reader) io.ReadCloser {
+	z, _ := readerPool.Get().(*lz4.Reader)
+	if z != nil {
+		z.Reset(r)
+	} else {
+		z = lz4.NewReader(r)
+	}
+	return &reader{Reader: z}
+}
+
+// NewWriter implements the compress.Codec interface.
+func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
+	z, _ := writerPool.Get().(*lz4.Writer)
+	if z != nil {
+		z.Reset(w)
+	} else {
+		z = lz4.NewWriter(w)
+	}
+	return &writer{Writer: z}
+}
+
+type reader struct{ *lz4.Reader }
+
+func (r *reader) Close() (err error) {
+	if z := r.Reader; z != nil {
+		r.Reader = nil
+		z.Reset(nil)
+		readerPool.Put(z)
+	}
+	return
+}
+
+type writer struct{ *lz4.Writer }
+
+func (w *writer) Close() (err error) {
+	if z := w.Writer; z != nil {
+		w.Writer = nil
+		err = z.Close()
+		z.Reset(nil)
+		writerPool.Put(z)
+	}
+	return
+}
diff -pruN 0.2.1-1.1/compress/snappy/snappy.go 0.4.49+ds1-1/compress/snappy/snappy.go
--- 0.2.1-1.1/compress/snappy/snappy.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/compress/snappy/snappy.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,110 @@
+package snappy
+
+import (
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/s2"
+	"github.com/klauspost/compress/snappy"
+)
+
+// Framing is an enumeration type used to enable or disable xerial framing of
+// snappy messages.
+type Framing int
+
+const (
+	Framed Framing = iota
+	Unframed
+)
+
+// Compression level.
+type Compression int
+
+const (
+	DefaultCompression Compression = iota
+	FasterCompression
+	BetterCompression
+	BestCompression
+)
+
+var (
+	readerPool sync.Pool
+	writerPool sync.Pool
+)
+
+// Codec is the implementation of a compress.Codec which supports creating
+// readers and writers for kafka messages compressed with snappy.
+type Codec struct {
+	// An optional framing to apply to snappy compression.
+	//
+	// Default to Framed.
+	Framing Framing
+
+	// Compression level.
+	Compression Compression
+}
+
+// Code implements the compress.Codec interface.
+func (c *Codec) Code() int8 { return 2 }
+
+// Name implements the compress.Codec interface.
+func (c *Codec) Name() string { return "snappy" }
+
+// NewReader implements the compress.Codec interface.
+func (c *Codec) NewReader(r io.Reader) io.ReadCloser {
+	x, _ := readerPool.Get().(*xerialReader)
+	if x != nil {
+		x.Reset(r)
+	} else {
+		x = &xerialReader{
+			reader: r,
+			decode: snappy.Decode,
+		}
+	}
+	return &reader{xerialReader: x}
+}
+
+// NewWriter implements the compress.Codec interface.
+func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
+	x, _ := writerPool.Get().(*xerialWriter)
+	if x != nil {
+		x.Reset(w)
+	} else {
+		x = &xerialWriter{writer: w}
+	}
+	x.framed = c.Framing == Framed
+	switch c.Compression {
+	case FasterCompression:
+		x.encode = s2.EncodeSnappy
+	case BetterCompression:
+		x.encode = s2.EncodeSnappyBetter
+	case BestCompression:
+		x.encode = s2.EncodeSnappyBest
+	default:
+		x.encode = snappy.Encode // aka. s2.EncodeSnappyBetter
+	}
+	return &writer{xerialWriter: x}
+}
+
+type reader struct{ *xerialReader }
+
+func (r *reader) Close() (err error) {
+	if x := r.xerialReader; x != nil {
+		r.xerialReader = nil
+		x.Reset(nil)
+		readerPool.Put(x)
+	}
+	return
+}
+
+type writer struct{ *xerialWriter }
+
+func (w *writer) Close() (err error) {
+	if x := w.xerialWriter; x != nil {
+		w.xerialWriter = nil
+		err = x.Flush()
+		x.Reset(nil)
+		writerPool.Put(x)
+	}
+	return
+}
diff -pruN 0.2.1-1.1/compress/snappy/xerial.go 0.4.49+ds1-1/compress/snappy/xerial.go
--- 0.2.1-1.1/compress/snappy/xerial.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/compress/snappy/xerial.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,330 @@
+package snappy
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"io"
+
+	"github.com/klauspost/compress/snappy"
+)
+
+const defaultBufferSize = 32 * 1024
+
+// An implementation of io.Reader which consumes a stream of xerial-framed
+// snappy-encoeded data. The framing is optional, if no framing is detected
+// the reader will simply forward the bytes from its underlying stream.
+type xerialReader struct {
+	reader io.Reader
+	header [16]byte
+	input  []byte
+	output []byte
+	offset int64
+	nbytes int64
+	decode func([]byte, []byte) ([]byte, error)
+}
+
+func (x *xerialReader) Reset(r io.Reader) {
+	x.reader = r
+	x.input = x.input[:0]
+	x.output = x.output[:0]
+	x.header = [16]byte{}
+	x.offset = 0
+	x.nbytes = 0
+}
+
+func (x *xerialReader) Read(b []byte) (int, error) {
+	for {
+		if x.offset < int64(len(x.output)) {
+			n := copy(b, x.output[x.offset:])
+			x.offset += int64(n)
+			return n, nil
+		}
+
+		n, err := x.readChunk(b)
+		if err != nil {
+			return 0, err
+		}
+		if n > 0 {
+			return n, nil
+		}
+	}
+}
+
+func (x *xerialReader) WriteTo(w io.Writer) (int64, error) {
+	wn := int64(0)
+
+	for {
+		for x.offset < int64(len(x.output)) {
+			n, err := w.Write(x.output[x.offset:])
+			wn += int64(n)
+			x.offset += int64(n)
+			if err != nil {
+				return wn, err
+			}
+		}
+
+		if _, err := x.readChunk(nil); err != nil {
+			if errors.Is(err, io.EOF) {
+				err = nil
+			}
+			return wn, err
+		}
+	}
+}
+
+func (x *xerialReader) readChunk(dst []byte) (int, error) {
+	x.output = x.output[:0]
+	x.offset = 0
+	prefix := 0
+
+	if x.nbytes == 0 {
+		n, err := x.readFull(x.header[:])
+		if err != nil && n == 0 {
+			return 0, err
+		}
+		prefix = n
+	}
+
+	if isXerialHeader(x.header[:]) {
+		if cap(x.input) < 4 {
+			x.input = make([]byte, 4, defaultBufferSize)
+		} else {
+			x.input = x.input[:4]
+		}
+
+		_, err := x.readFull(x.input)
+		if err != nil {
+			return 0, err
+		}
+
+		frame := int(binary.BigEndian.Uint32(x.input))
+		if cap(x.input) < frame {
+			x.input = make([]byte, frame, align(frame, defaultBufferSize))
+		} else {
+			x.input = x.input[:frame]
+		}
+
+		if _, err := x.readFull(x.input); err != nil {
+			return 0, err
+		}
+	} else {
+		if cap(x.input) == 0 {
+			x.input = make([]byte, 0, defaultBufferSize)
+		} else {
+			x.input = x.input[:0]
+		}
+
+		if prefix > 0 {
+			x.input = append(x.input, x.header[:prefix]...)
+		}
+
+		for {
+			if len(x.input) == cap(x.input) {
+				b := make([]byte, len(x.input), 2*cap(x.input))
+				copy(b, x.input)
+				x.input = b
+			}
+
+			n, err := x.read(x.input[len(x.input):cap(x.input)])
+			x.input = x.input[:len(x.input)+n]
+			if err != nil {
+				if errors.Is(err, io.EOF) && len(x.input) > 0 {
+					break
+				}
+				return 0, err
+			}
+		}
+	}
+
+	var n int
+	var err error
+
+	if x.decode == nil {
+		x.output, x.input, err = x.input, x.output, nil
+	} else if n, err = snappy.DecodedLen(x.input); n <= len(dst) && err == nil {
+		// If the output buffer is large enough to hold the decode value,
+		// write it there directly instead of using the intermediary output
+		// buffer.
+		_, err = x.decode(dst, x.input)
+	} else {
+		var b []byte
+		n = 0
+		b, err = x.decode(x.output[:cap(x.output)], x.input)
+		if err == nil {
+			x.output = b
+		}
+	}
+
+	return n, err
+}
+
+func (x *xerialReader) read(b []byte) (int, error) {
+	n, err := x.reader.Read(b)
+	x.nbytes += int64(n)
+	return n, err
+}
+
+func (x *xerialReader) readFull(b []byte) (int, error) {
+	n, err := io.ReadFull(x.reader, b)
+	x.nbytes += int64(n)
+	return n, err
+}
+
+// An implementation of a xerial-framed snappy-encoded output stream.
+// Each Write made to the writer is framed with a xerial header.
+type xerialWriter struct {
+	writer io.Writer
+	header [16]byte
+	input  []byte
+	output []byte
+	nbytes int64
+	framed bool
+	encode func([]byte, []byte) []byte
+}
+
+func (x *xerialWriter) Reset(w io.Writer) {
+	x.writer = w
+	x.input = x.input[:0]
+	x.output = x.output[:0]
+	x.nbytes = 0
+}
+
+func (x *xerialWriter) ReadFrom(r io.Reader) (int64, error) {
+	wn := int64(0)
+
+	if cap(x.input) == 0 {
+		x.input = make([]byte, 0, defaultBufferSize)
+	}
+
+	for {
+		if x.full() {
+			x.grow()
+		}
+
+		n, err := r.Read(x.input[len(x.input):cap(x.input)])
+		wn += int64(n)
+		x.input = x.input[:len(x.input)+n]
+
+		if x.fullEnough() {
+			if err := x.Flush(); err != nil {
+				return wn, err
+			}
+		}
+
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				err = nil
+			}
+			return wn, err
+		}
+	}
+}
+
+func (x *xerialWriter) Write(b []byte) (int, error) {
+	wn := 0
+
+	if cap(x.input) == 0 {
+		x.input = make([]byte, 0, defaultBufferSize)
+	}
+
+	for len(b) > 0 {
+		if x.full() {
+			x.grow()
+		}
+
+		n := copy(x.input[len(x.input):cap(x.input)], b)
+		b = b[n:]
+		wn += n
+		x.input = x.input[:len(x.input)+n]
+
+		if x.fullEnough() {
+			if err := x.Flush(); err != nil {
+				return wn, err
+			}
+		}
+	}
+
+	return wn, nil
+}
+
+func (x *xerialWriter) Flush() error {
+	if len(x.input) == 0 {
+		return nil
+	}
+
+	var b []byte
+	if x.encode == nil {
+		b = x.input
+	} else {
+		x.output = x.encode(x.output[:cap(x.output)], x.input)
+		b = x.output
+	}
+
+	x.input = x.input[:0]
+	x.output = x.output[:0]
+
+	if x.framed && x.nbytes == 0 {
+		writeXerialHeader(x.header[:])
+		_, err := x.write(x.header[:])
+		if err != nil {
+			return err
+		}
+	}
+
+	if x.framed {
+		writeXerialFrame(x.header[:4], len(b))
+		_, err := x.write(x.header[:4])
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err := x.write(b)
+	return err
+}
+
+func (x *xerialWriter) write(b []byte) (int, error) {
+	n, err := x.writer.Write(b)
+	x.nbytes += int64(n)
+	return n, err
+}
+
+func (x *xerialWriter) full() bool {
+	return len(x.input) == cap(x.input)
+}
+
+func (x *xerialWriter) fullEnough() bool {
+	return x.framed && (cap(x.input)-len(x.input)) < 1024
+}
+
+func (x *xerialWriter) grow() {
+	tmp := make([]byte, len(x.input), 2*cap(x.input))
+	copy(tmp, x.input)
+	x.input = tmp
+}
+
+func align(n, a int) int {
+	if (n % a) == 0 {
+		return n
+	}
+	return ((n / a) + 1) * a
+}
+
+var (
+	xerialHeader      = [...]byte{130, 83, 78, 65, 80, 80, 89, 0}
+	xerialVersionInfo = [...]byte{0, 0, 0, 1, 0, 0, 0, 1}
+)
+
+func isXerialHeader(src []byte) bool {
+	return len(src) >= 16 && bytes.Equal(src[:8], xerialHeader[:])
+}
+
+func writeXerialHeader(b []byte) {
+	copy(b[:8], xerialHeader[:])
+	copy(b[8:], xerialVersionInfo[:])
+}
+
+func writeXerialFrame(b []byte, n int) {
+	binary.BigEndian.PutUint32(b, uint32(n))
+}
diff -pruN 0.2.1-1.1/compress/snappy/xerial_test.go 0.4.49+ds1-1/compress/snappy/xerial_test.go
--- 0.2.1-1.1/compress/snappy/xerial_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/compress/snappy/xerial_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,165 @@
+package snappy
+
+import (
+	"bytes"
+	"crypto/rand"
+	"io"
+	"testing"
+
+	"github.com/klauspost/compress/snappy"
+	goxerialsnappy "github.com/segmentio/kafka-go/compress/snappy/go-xerial-snappy"
+)
+
+// Wrap an io.Reader or io.Writer to disable all copy optimizations like
+// io.WriterTo or io.ReaderFrom.
+// We use this to ensure writes are chunked by io.Copy's internal buffer
+// in the tests.
+type simpleReader struct{ io.Reader }
+type simpleWriter struct{ io.Writer }
+
+func TestXerialReaderSnappy(t *testing.T) {
+	rawData := new(bytes.Buffer)
+	rawData.Grow(1024 * 1024)
+	io.CopyN(rawData, rand.Reader, 1024*1024)
+
+	compressedRawData := bytes.NewReader(snappy.Encode(nil, rawData.Bytes()))
+
+	decompressedData := new(bytes.Buffer)
+	io.Copy(decompressedData,
+		&xerialReader{reader: compressedRawData, decode: snappy.Decode})
+
+	b0 := rawData.Bytes()
+	b1 := decompressedData.Bytes()
+
+	if !bytes.Equal(b0, b1) {
+		t.Error("data mismatch")
+	}
+}
+
+func TestXerialReaderWriter(t *testing.T) {
+	rawData := new(bytes.Buffer)
+	rawData.Grow(1024 * 1024)
+	io.CopyN(rawData, rand.Reader, 1024*1024)
+
+	framedData := new(bytes.Buffer)
+	framedData.Grow(rawData.Len() + 1024)
+	w := simpleWriter{&xerialWriter{writer: framedData}}
+	r := simpleReader{bytes.NewReader(rawData.Bytes())}
+	io.Copy(w, r)
+	w.Writer.(*xerialWriter).Flush()
+
+	unframedData := new(bytes.Buffer)
+	unframedData.Grow(rawData.Len())
+	io.Copy(unframedData, &xerialReader{reader: framedData})
+
+	b0 := rawData.Bytes()
+	b1 := unframedData.Bytes()
+
+	if !bytes.Equal(b0, b1) {
+		t.Error("data mismatch")
+	}
+}
+
+func TestXerialFramedCompression(t *testing.T) {
+	rawData := new(bytes.Buffer)
+	rawData.Grow(1024 * 1024)
+	io.CopyN(rawData, rand.Reader, 1024*1024)
+
+	framedAndCompressedData := new(bytes.Buffer)
+	framedAndCompressedData.Grow(rawData.Len())
+	w := simpleWriter{&xerialWriter{writer: framedAndCompressedData, framed: true, encode: snappy.Encode}}
+	r := simpleReader{bytes.NewReader(rawData.Bytes())}
+	io.Copy(w, r)
+	w.Writer.(*xerialWriter).Flush()
+
+	unframedAndDecompressedData := new(bytes.Buffer)
+	unframedAndDecompressedData.Grow(rawData.Len())
+	io.Copy(unframedAndDecompressedData,
+		simpleReader{&xerialReader{reader: framedAndCompressedData, decode: snappy.Decode}})
+
+	b0 := rawData.Bytes()
+	b1 := unframedAndDecompressedData.Bytes()
+
+	if !bytes.Equal(b0, b1) {
+		t.Error("data mismatch")
+	}
+}
+
+func TestXerialFramedCompressionOptimized(t *testing.T) {
+	rawData := new(bytes.Buffer)
+	rawData.Grow(1024 * 1024)
+	io.CopyN(rawData, rand.Reader, 1024*1024)
+
+	framedAndCompressedData := new(bytes.Buffer)
+	framedAndCompressedData.Grow(rawData.Len())
+	w := &xerialWriter{writer: framedAndCompressedData, framed: true, encode: snappy.Encode}
+	r := simpleReader{bytes.NewReader(rawData.Bytes())}
+	io.Copy(w, r)
+	w.Flush()
+
+	unframedAndDecompressedData := new(bytes.Buffer)
+	unframedAndDecompressedData.Grow(rawData.Len())
+	io.Copy(unframedAndDecompressedData,
+		&xerialReader{reader: framedAndCompressedData, decode: snappy.Decode})
+
+	b0 := rawData.Bytes()
+	b1 := unframedAndDecompressedData.Bytes()
+
+	if !bytes.Equal(b0, b1) {
+		t.Error("data mismatch")
+	}
+}
+
+func TestXerialReaderAgainstGoXerialSnappy(t *testing.T) {
+	rawData := new(bytes.Buffer)
+	rawData.Grow(1024 * 1024)
+	io.CopyN(rawData, rand.Reader, 1024*1024)
+	rawBytes := rawData.Bytes()
+
+	framedAndCompressedData := []byte{}
+	const chunkSize = 999
+	for i := 0; i < len(rawBytes); i += chunkSize {
+		j := i + chunkSize
+		if j > len(rawBytes) {
+			j = len(rawBytes)
+		}
+		framedAndCompressedData = goxerialsnappy.EncodeStream(framedAndCompressedData, rawBytes[i:j])
+	}
+
+	unframedAndDecompressedData := new(bytes.Buffer)
+	unframedAndDecompressedData.Grow(rawData.Len())
+	io.Copy(unframedAndDecompressedData,
+		&xerialReader{reader: bytes.NewReader(framedAndCompressedData), decode: snappy.Decode})
+
+	b0 := rawBytes
+	b1 := unframedAndDecompressedData.Bytes()
+
+	if !bytes.Equal(b0, b1) {
+		t.Error("data mismatch")
+	}
+}
+
+func TestXerialWriterAgainstGoXerialSnappy(t *testing.T) {
+	rawData := new(bytes.Buffer)
+	rawData.Grow(1024 * 1024)
+	io.CopyN(rawData, rand.Reader, 1024*1024)
+
+	framedAndCompressedData := new(bytes.Buffer)
+	framedAndCompressedData.Grow(rawData.Len())
+	w := &xerialWriter{writer: framedAndCompressedData, framed: true, encode: snappy.Encode}
+	r := simpleReader{bytes.NewReader(rawData.Bytes())}
+	io.Copy(w, r)
+	w.Flush()
+
+	unframedAndDecompressedData, err := goxerialsnappy.Decode(framedAndCompressedData.Bytes())
+	if err != nil {
+		t.Error(err)
+	}
+
+	b0 := rawData.Bytes()
+	b1 := unframedAndDecompressedData
+
+	if !bytes.Equal(b0, b1) {
+		t.Error("data mismatch")
+	}
+}
diff -pruN 0.2.1-1.1/compress/zstd/zstd.go 0.4.49+ds1-1/compress/zstd/zstd.go
--- 0.2.1-1.1/compress/zstd/zstd.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/compress/zstd/zstd.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,168 @@
+// Package zstd implements Zstandard compression.
+package zstd
+
+import (
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/zstd"
+)
+
+// Codec is the implementation of a compress.Codec which supports creating
+// readers and writers for kafka messages compressed with zstd.
+type Codec struct {
+	// The compression level configured on writers created by the codec.
+	//
+	// Default to 3.
+	Level int
+
+	encoderPool sync.Pool // *encoder
+}
+
+// Code implements the compress.Codec interface.
+func (c *Codec) Code() int8 { return 4 }
+
+// Name implements the compress.Codec interface.
+func (c *Codec) Name() string { return "zstd" }
+
+// NewReader implements the compress.Codec interface.
+func (c *Codec) NewReader(r io.Reader) io.ReadCloser {
+	p := new(reader)
+	if p.dec, _ = decoderPool.Get().(*zstd.Decoder); p.dec != nil {
+		p.dec.Reset(r)
+	} else {
+		z, err := zstd.NewReader(r,
+			zstd.WithDecoderConcurrency(1),
+		)
+		if err != nil {
+			p.err = err
+		} else {
+			p.dec = z
+		}
+	}
+	return p
+}
+
+func (c *Codec) level() int {
+	if c.Level != 0 {
+		return c.Level
+	}
+	return 3
+}
+
+func (c *Codec) zstdLevel() zstd.EncoderLevel {
+	return zstd.EncoderLevelFromZstd(c.level())
+}
+
+var decoderPool sync.Pool // *zstd.Decoder
+
+type reader struct {
+	dec *zstd.Decoder
+	err error
+}
+
+// Close implements the io.Closer interface.
+func (r *reader) Close() error {
+	if r.dec != nil {
+		r.dec.Reset(devNull{}) // don't retain the underlying reader
+		decoderPool.Put(r.dec)
+		r.dec = nil
+		r.err = io.ErrClosedPipe
+	}
+	return nil
+}
+
+// Read implements the io.Reader interface.
+func (r *reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	if r.dec == nil {
+		return 0, io.EOF
+	}
+	return r.dec.Read(p)
+}
+
+// WriteTo implements the io.WriterTo interface.
+func (r *reader) WriteTo(w io.Writer) (int64, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	if r.dec == nil {
+		return 0, io.ErrClosedPipe
+	}
+	return r.dec.WriteTo(w)
+}
+
+// NewWriter implements the compress.Codec interface.
+func (c *Codec) NewWriter(w io.Writer) io.WriteCloser {
+	p := new(writer)
+	if enc, _ := c.encoderPool.Get().(*zstd.Encoder); enc == nil {
+		z, err := zstd.NewWriter(w,
+			zstd.WithEncoderLevel(c.zstdLevel()),
+			zstd.WithEncoderConcurrency(1),
+			zstd.WithZeroFrames(true),
+		)
+		if err != nil {
+			p.err = err
+		} else {
+			p.enc = z
+		}
+	} else {
+		p.enc = enc
+		p.enc.Reset(w)
+	}
+	p.c = c
+	return p
+}
+
+type writer struct {
+	c   *Codec
+	enc *zstd.Encoder
+	err error
+}
+
+// Close implements the io.Closer interface.
+func (w *writer) Close() error {
+	if w.enc != nil {
+		// Close needs to be called to write the end of stream marker and flush
+		// the buffers. The zstd package documents that the encoder is re-usable
+		// after being closed.
+		err := w.enc.Close()
+		if err != nil {
+			w.err = err
+		}
+		w.enc.Reset(devNull{}) // don't retain the underlying writer
+		w.c.encoderPool.Put(w.enc)
+		w.enc = nil
+		return err
+	}
+	return w.err
+}
+
+// WriteTo implements the io.WriterTo interface.
+func (w *writer) Write(p []byte) (int, error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	if w.enc == nil {
+		return 0, io.ErrClosedPipe
+	}
+	return w.enc.Write(p)
+}
+
+// ReadFrom implements the io.ReaderFrom interface.
+func (w *writer) ReadFrom(r io.Reader) (int64, error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	if w.enc == nil {
+		return 0, io.ErrClosedPipe
+	}
+	return w.enc.ReadFrom(r)
+}
+
+type devNull struct{}
+
+func (devNull) Read([]byte) (int, error)  { return 0, io.EOF }
+func (devNull) Write([]byte) (int, error) { return 0, nil }
diff -pruN 0.2.1-1.1/compression.go 0.4.49+ds1-1/compression.go
--- 0.2.1-1.1/compression.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/compression.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,51 +2,30 @@ package kafka
 
 import (
 	"errors"
-	"sync"
+
+	"github.com/segmentio/kafka-go/compress"
 )
 
-var errUnknownCodec = errors.New("invalid codec")
+type Compression = compress.Compression
 
-var codecs = make(map[int8]CompressionCodec)
-var codecsMutex sync.RWMutex
+const (
+	Gzip   Compression = compress.Gzip
+	Snappy Compression = compress.Snappy
+	Lz4    Compression = compress.Lz4
+	Zstd   Compression = compress.Zstd
+)
 
-// RegisterCompressionCodec registers a compression codec so it can be used by a Writer.
-func RegisterCompressionCodec(codec func() CompressionCodec) {
-	c := codec()
-	codecsMutex.Lock()
-	codecs[c.Code()] = c
-	codecsMutex.Unlock()
-}
+type CompressionCodec = compress.Codec
 
-// resolveCodec looks up a codec by Code()
-func resolveCodec(code int8) (codec CompressionCodec, err error) {
-	codecsMutex.RLock()
-	codec = codecs[code]
-	codecsMutex.RUnlock()
+var (
+	errUnknownCodec = errors.New("the compression code is invalid or its codec has not been imported")
+)
 
+// resolveCodec looks up a codec by Code().
+func resolveCodec(code int8) (CompressionCodec, error) {
+	codec := compress.Compression(code).Codec()
 	if codec == nil {
-		err = errUnknownCodec
+		return nil, errUnknownCodec
 	}
-	return
+	return codec, nil
 }
-
-// CompressionCodec represents a compression codec to encode and decode
-// the messages.
-// See : https://cwiki.apache.org/confluence/display/KAFKA/Compression
-//
-// A CompressionCodec must be safe for concurrent access by multiple go
-// routines.
-type CompressionCodec interface {
-	// Code returns the compression codec code
-	Code() int8
-
-	// Encode encodes the src data
-	Encode(src []byte) ([]byte, error)
-
-	// Decode decodes the src data
-	Decode(src []byte) ([]byte, error)
-}
-
-const compressionCodecMask int8 = 0x03
-const DefaultCompressionLevel int = -1
-const CompressionNoneCode = 0
diff -pruN 0.2.1-1.1/compression_test.go 0.4.49+ds1-1/compression_test.go
--- 0.2.1-1.1/compression_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/compression_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,311 +0,0 @@
-package kafka_test
-
-import (
-	"context"
-	"fmt"
-	"math/rand"
-	"strconv"
-	"testing"
-	"time"
-
-	"github.com/segmentio/kafka-go"
-	"github.com/segmentio/kafka-go/gzip"
-	"github.com/segmentio/kafka-go/lz4"
-	"github.com/segmentio/kafka-go/snappy"
-)
-
-func TestCompression(t *testing.T) {
-	msg := kafka.Message{
-		Value: []byte("message"),
-	}
-
-	testEncodeDecode(t, msg, gzip.NewCompressionCodec())
-	testEncodeDecode(t, msg, snappy.NewCompressionCodec())
-	testEncodeDecode(t, msg, lz4.NewCompressionCodec())
-}
-
-func testEncodeDecode(t *testing.T, m kafka.Message, codec kafka.CompressionCodec) {
-	var r1, r2 []byte
-	var err error
-	var code int8
-
-	if codec != nil {
-		code = codec.Code()
-	}
-
-	t.Run("encode with "+codecToStr(code), func(t *testing.T) {
-		r1, err = codec.Encode(m.Value)
-		if err != nil {
-			t.Error(err)
-		}
-	})
-	t.Run("encode with "+codecToStr(code), func(t *testing.T) {
-		r2, err = codec.Decode(r1)
-		if err != nil {
-			t.Error(err)
-		}
-		if string(r2) != "message" {
-			t.Error("bad message")
-			t.Log("got: ", string(r2))
-			t.Log("expected: ", string(m.Value))
-		}
-	})
-}
-
-func codecToStr(codec int8) string {
-	switch codec {
-	case kafka.CompressionNoneCode:
-		return "none"
-	case gzip.Code:
-		return "gzip"
-	case snappy.Code:
-		return "snappy"
-	case lz4.Code:
-		return "lz4"
-	default:
-		return "unknown"
-	}
-}
-
-func TestCompressedMessages(t *testing.T) {
-	testCompressedMessages(t, gzip.NewCompressionCodec())
-	testCompressedMessages(t, snappy.NewCompressionCodec())
-	testCompressedMessages(t, lz4.NewCompressionCodec())
-}
-
-func testCompressedMessages(t *testing.T, codec kafka.CompressionCodec) {
-	t.Run("produce/consume with"+codecToStr(codec.Code()), func(t *testing.T) {
-		t.Parallel()
-
-		topic := kafka.CreateTopic(t, 1)
-		w := kafka.NewWriter(kafka.WriterConfig{
-			Brokers:          []string{"127.0.0.1:9092"},
-			Topic:            topic,
-			CompressionCodec: codec,
-		})
-		defer w.Close()
-
-		offset := 0
-		var values []string
-		for i := 0; i < 10; i++ {
-			batch := make([]kafka.Message, i+1)
-			for j := range batch {
-				value := fmt.Sprintf("Hello World %d!", offset)
-				values = append(values, value)
-				batch[j] = kafka.Message{
-					Key:   []byte(strconv.Itoa(offset)),
-					Value: []byte(value),
-				}
-				offset++
-			}
-			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-			if err := w.WriteMessages(ctx, batch...); err != nil {
-				t.Errorf("error sending batch %d, reason: %+v", i+1, err)
-			}
-			cancel()
-		}
-
-		r := kafka.NewReader(kafka.ReaderConfig{
-			Brokers:   []string{"127.0.0.1:9092"},
-			Topic:     topic,
-			Partition: 0,
-			MaxWait:   10 * time.Millisecond,
-			MinBytes:  1,
-			MaxBytes:  1024,
-		})
-		defer r.Close()
-
-		ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
-		defer cancel()
-
-		// in order to ensure proper handling of decompressing message, read at
-		// offsets that we know to be in the middle of compressed message sets.
-		for base := range values {
-			r.SetOffset(int64(base))
-			for i := base; i < len(values); i++ {
-				msg, err := r.ReadMessage(ctx)
-				if err != nil {
-					t.Errorf("error receiving message at loop %d, offset %d, reason: %+v", base, i, err)
-				}
-				if msg.Offset != int64(i) {
-					t.Errorf("wrong offset at loop %d...expected %d but got %d", base, i, msg.Offset)
-				}
-				if strconv.Itoa(i) != string(msg.Key) {
-					t.Errorf("wrong message key at loop %d...expected %d but got %s", base, i, string(msg.Key))
-				}
-				if values[i] != string(msg.Value) {
-					t.Errorf("wrong message value at loop %d...expected %s but got %s", base, values[i], string(msg.Value))
-				}
-			}
-		}
-	})
-}
-
-func TestMixedCompressedMessages(t *testing.T) {
-	t.Parallel()
-
-	topic := kafka.CreateTopic(t, 1)
-
-	offset := 0
-	var values []string
-	produce := func(n int, codec kafka.CompressionCodec) {
-		w := kafka.NewWriter(kafka.WriterConfig{
-			Brokers:          []string{"127.0.0.1:9092"},
-			Topic:            topic,
-			CompressionCodec: codec,
-		})
-		defer w.Close()
-
-		msgs := make([]kafka.Message, n)
-		for i := range msgs {
-			value := fmt.Sprintf("Hello World %d!", offset)
-			values = append(values, value)
-			offset++
-			msgs[i] = kafka.Message{Value: []byte(value)}
-		}
-
-		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-		defer cancel()
-		if err := w.WriteMessages(ctx, msgs...); err != nil {
-			t.Errorf("failed to produce messages: %+v", err)
-		}
-	}
-
-	// produce messages that interleave uncompressed messages and messages with
-	// different compression codecs.  reader should be able to properly handle
-	// all of them.
-	produce(10, nil)
-	produce(20, gzip.NewCompressionCodec())
-	produce(5, nil)
-	produce(10, snappy.NewCompressionCodec())
-	produce(10, lz4.NewCompressionCodec())
-	produce(5, nil)
-
-	r := kafka.NewReader(kafka.ReaderConfig{
-		Brokers:   []string{"127.0.0.1:9092"},
-		Topic:     topic,
-		Partition: 0,
-		MaxWait:   10 * time.Millisecond,
-		MinBytes:  1,
-		MaxBytes:  1024,
-	})
-	defer r.Close()
-
-	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
-	defer cancel()
-
-	// in order to ensure proper handling of decompressing message, read at
-	// offsets that we know to be in the middle of compressed message sets.
-	for base := range values {
-		r.SetOffset(int64(base))
-		for i := base; i < len(values); i++ {
-			msg, err := r.ReadMessage(ctx)
-			if err != nil {
-				t.Errorf("error receiving message at loop %d, offset %d, reason: %+v", base, i, err)
-			}
-			if msg.Offset != int64(i) {
-				t.Errorf("wrong offset at loop %d...expected %d but got %d", base, i, msg.Offset)
-			}
-			if values[i] != string(msg.Value) {
-				t.Errorf("wrong message value at loop %d...expected %s but got %s", base, values[i], string(msg.Value))
-			}
-		}
-	}
-}
-
-type noopCodec struct{}
-
-func (noopCodec) Code() int8 {
-	return 0
-}
-
-func (noopCodec) Encode(src []byte) ([]byte, error) {
-	return src, nil
-}
-
-func (noopCodec) Decode(src []byte) ([]byte, error) {
-	return src, nil
-}
-
-func BenchmarkCompression(b *testing.B) {
-	benchmarks := []struct {
-		scenario string
-		codec    kafka.CompressionCodec
-		function func(*testing.B, kafka.CompressionCodec, int, map[int][]byte)
-	}{
-		{
-			scenario: "None",
-			codec:    &noopCodec{},
-			function: benchmarkCompression,
-		},
-		{
-			scenario: "GZIP",
-			codec:    gzip.NewCompressionCodec(),
-			function: benchmarkCompression,
-		},
-		{
-			scenario: "Snappy",
-			codec:    snappy.NewCompressionCodec(),
-			function: benchmarkCompression,
-		},
-		{
-			scenario: "LZ4",
-			codec:    lz4.NewCompressionCodec(),
-			function: benchmarkCompression,
-		},
-	}
-
-	payload := map[int][]byte{
-		1024:  randomPayload(1024),
-		4096:  randomPayload(4096),
-		8192:  randomPayload(8192),
-		16384: randomPayload(16384),
-	}
-
-	for _, benchmark := range benchmarks {
-		b.Run(benchmark.scenario+"1024", func(b *testing.B) {
-			benchmark.function(b, benchmark.codec, 1024, payload)
-		})
-		b.Run(benchmark.scenario+"4096", func(b *testing.B) {
-			benchmark.function(b, benchmark.codec, 4096, payload)
-		})
-		b.Run(benchmark.scenario+"8192", func(b *testing.B) {
-			benchmark.function(b, benchmark.codec, 8192, payload)
-		})
-		b.Run(benchmark.scenario+"16384", func(b *testing.B) {
-			benchmark.function(b, benchmark.codec, 16384, payload)
-		})
-	}
-
-}
-
-func benchmarkCompression(b *testing.B, codec kafka.CompressionCodec, payloadSize int, payload map[int][]byte) {
-	msg := kafka.Message{
-		Value: payload[payloadSize],
-	}
-
-	for i := 0; i < b.N; i++ {
-		m1, err := codec.Encode(msg.Value)
-		if err != nil {
-			b.Fatal(err)
-		}
-
-		b.SetBytes(int64(len(m1)))
-
-		_, err = codec.Decode(m1)
-		if err != nil {
-			b.Fatal(err)
-		}
-
-	}
-}
-
-const dataset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
-
-func randomPayload(n int) []byte {
-	b := make([]byte, n)
-	for i := range b {
-		b[i] = dataset[rand.Intn(len(dataset))]
-	}
-	return b
-}
diff -pruN 0.2.1-1.1/conn.go 0.4.49+ds1-1/conn.go
--- 0.2.1-1.1/conn.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/conn.go	2025-08-21 19:15:53.000000000 +0000
@@ -4,11 +4,11 @@ import (
 	"bufio"
 	"errors"
 	"fmt"
+	"io"
 	"math"
 	"net"
 	"os"
 	"path/filepath"
-	"runtime"
 	"sync"
 	"sync/atomic"
 	"time"
@@ -19,23 +19,6 @@ var (
 	errInvalidWritePartition = errors.New("writes must NOT set Partition on kafka.Message")
 )
 
-// Broker carries the metadata associated with a kafka broker.
-type Broker struct {
-	Host string
-	Port int
-	ID   int
-	Rack string
-}
-
-// Partition carries the metadata associated with a kafka partition.
-type Partition struct {
-	Topic    string
-	Leader   Broker
-	Replicas []Broker
-	Isr      []Broker
-	ID       int
-}
-
 // Conn represents a connection to a kafka broker.
 //
 // Instances of Conn are safe to use concurrently from multiple goroutines.
@@ -43,6 +26,9 @@ type Conn struct {
 	// base network connection
 	conn net.Conn
 
+	// number of inflight requests on the connection.
+	inflight int32
+
 	// offset management (synchronized on the mutex field)
 	mutex  sync.Mutex
 	offset int64
@@ -54,6 +40,7 @@ type Conn struct {
 	// write buffer (synchronized on wlock)
 	wlock sync.Mutex
 	wbuf  bufio.Writer
+	wb    writeBuffer
 
 	// deadline management
 	wdeadline connDeadline
@@ -65,12 +52,35 @@ type Conn struct {
 	partition     int32
 	fetchMaxBytes int32
 	fetchMinSize  int32
+	broker        int32
+	rack          string
 
 	// correlation ID generator (synchronized on wlock)
 	correlationID int32
 
 	// number of replica acks required when publishing to a partition
 	requiredAcks int32
+
+	// lazily loaded API versions used by this connection
+	apiVersions atomic.Value // apiVersionMap
+
+	transactionalID *string
+}
+
+type apiVersionMap map[apiKey]ApiVersion
+
+func (v apiVersionMap) negotiate(key apiKey, sortedSupportedVersions ...apiVersion) apiVersion {
+	x := v[key]
+
+	for i := len(sortedSupportedVersions) - 1; i >= 0; i-- {
+		s := sortedSupportedVersions[i]
+
+		if apiVersion(x.MaxVersion) >= s {
+			return s
+		}
+	}
+
+	return -1
 }
 
 // ConnConfig is a configuration object used to create new instances of Conn.
@@ -78,8 +88,50 @@ type ConnConfig struct {
 	ClientID  string
 	Topic     string
 	Partition int
+	Broker    int
+	Rack      string
+
+	// The transactional id to use for transactional delivery. Idempotent
+	// deliver should be enabled if transactional id is configured.
+	// For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs
+	// Empty string means that this connection can't be transactional.
+	TransactionalID string
+}
+
+// ReadBatchConfig is a configuration object used for reading batches of messages.
+type ReadBatchConfig struct {
+	// MinBytes indicates to the broker the minimum batch size that the consumer
+	// will accept. Setting a high minimum when consuming from a low-volume topic
+	// may result in delayed delivery when the broker does not have enough data to
+	// satisfy the defined minimum.
+	MinBytes int
+
+	// MaxBytes indicates to the broker the maximum batch size that the consumer
+	// will accept. The broker will truncate a message to satisfy this maximum, so
+	// choose a value that is high enough for your largest message size.
+	MaxBytes int
+
+	// IsolationLevel controls the visibility of transactional records.
+	// ReadUncommitted makes all records visible. With ReadCommitted only
+	// non-transactional and committed records are visible.
+	IsolationLevel IsolationLevel
+
+	// MaxWait is the amount of time for the broker while waiting to hit the
+	// min/max byte targets.  This setting is independent of any network-level
+	// timeouts or deadlines.
+	//
+	// For backward compatibility, when this field is left zero, kafka-go will
+	// infer the max wait from the connection's read deadline.
+	MaxWait time.Duration
 }
 
+type IsolationLevel int8
+
+const (
+	ReadUncommitted IsolationLevel = 0
+	ReadCommitted   IsolationLevel = 1
+)
+
 var (
 	// DefaultClientID is the default value used as ClientID of kafka
 	// connections.
@@ -100,6 +152,13 @@ func NewConn(conn net.Conn, topic string
 	})
 }
 
+func emptyToNullable(transactionalID string) (result *string) {
+	if transactionalID != "" {
+		result = &transactionalID
+	}
+	return result
+}
+
 // NewConnWith returns a new kafka connection configured with config.
 // The offset is initialized to FirstOffset.
 func NewConnWith(conn net.Conn, config ConnConfig) *Conn {
@@ -112,16 +171,21 @@ func NewConnWith(conn net.Conn, config C
 	}
 
 	c := &Conn{
-		conn:         conn,
-		rbuf:         *bufio.NewReader(conn),
-		wbuf:         *bufio.NewWriter(conn),
-		clientID:     config.ClientID,
-		topic:        config.Topic,
-		partition:    int32(config.Partition),
-		offset:       FirstOffset,
-		requiredAcks: -1,
+		conn:            conn,
+		rbuf:            *bufio.NewReader(conn),
+		wbuf:            *bufio.NewWriter(conn),
+		clientID:        config.ClientID,
+		topic:           config.Topic,
+		partition:       int32(config.Partition),
+		broker:          int32(config.Broker),
+		rack:            config.Rack,
+		offset:          FirstOffset,
+		requiredAcks:    -1,
+		transactionalID: emptyToNullable(config.TransactionalID),
 	}
 
+	c.wb.w = &c.wbuf
+
 	// The fetch request needs to ask for a MaxBytes value that is at least
 	// enough to load the control data of the response. To avoid having to
 	// recompute it on every read, it is cached here in the Conn value.
@@ -138,40 +202,114 @@ func NewConnWith(conn net.Conn, config C
 	return c
 }
 
-// DeleteTopics deletes the specified topics.
-func (c *Conn) DeleteTopics(topics ...string) error {
-	_, err := c.deleteTopics(deleteTopicsRequestV1{
-		Topics: topics,
-	})
-	return err
+func (c *Conn) negotiateVersion(key apiKey, sortedSupportedVersions ...apiVersion) (apiVersion, error) {
+	v, err := c.loadVersions()
+	if err != nil {
+		return -1, err
+	}
+	a := v.negotiate(key, sortedSupportedVersions...)
+	if a < 0 {
+		return -1, fmt.Errorf("no matching versions were found between the client and the broker for API key %d", key)
+	}
+	return a, nil
 }
 
-// describeGroups retrieves the specified groups
-//
-// See http://kafka.apache.org/protocol.html#The_Messages_DescribeGroups
-func (c *Conn) describeGroups(request describeGroupsRequestV1) (describeGroupsResponseV1, error) {
-	var response describeGroupsResponseV1
+func (c *Conn) loadVersions() (apiVersionMap, error) {
+	v, _ := c.apiVersions.Load().(apiVersionMap)
+	if v != nil {
+		return v, nil
+	}
 
+	brokerVersions, err := c.ApiVersions()
+	if err != nil {
+		return nil, err
+	}
+
+	v = make(apiVersionMap, len(brokerVersions))
+
+	for _, a := range brokerVersions {
+		v[apiKey(a.ApiKey)] = a
+	}
+
+	c.apiVersions.Store(v)
+	return v, nil
+}
+
+// Broker returns a Broker value representing the kafka broker that this
+// connection was established to.
+func (c *Conn) Broker() Broker {
+	addr := c.conn.RemoteAddr()
+	host, port, _ := splitHostPortNumber(addr.String())
+	return Broker{
+		Host: host,
+		Port: port,
+		ID:   int(c.broker),
+		Rack: c.rack,
+	}
+}
+
+// Controller requests kafka for the current controller and returns its URL.
+func (c *Conn) Controller() (broker Broker, err error) {
+	err = c.readOperation(
+		func(deadline time.Time, id int32) error {
+			return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{}))
+		},
+		func(deadline time.Time, size int) error {
+			var res metadataResponseV1
+
+			if err := c.readResponse(size, &res); err != nil {
+				return err
+			}
+			for _, brokerMeta := range res.Brokers {
+				if brokerMeta.NodeID == res.ControllerID {
+					broker = Broker{ID: int(brokerMeta.NodeID),
+						Port: int(brokerMeta.Port),
+						Host: brokerMeta.Host,
+						Rack: brokerMeta.Rack}
+					break
+				}
+			}
+			return nil
+		},
+	)
+	return broker, err
+}
+
+// Brokers retrieve the broker list from the Kafka metadata.
+func (c *Conn) Brokers() ([]Broker, error) {
+	var brokers []Broker
 	err := c.readOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(describeGroupsRequest, v1, id, request)
+			return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{}))
 		},
 		func(deadline time.Time, size int) error {
-			return expectZeroSize(func() (remain int, err error) {
-				return (&response).readFrom(&c.rbuf, size)
-			}())
+			var res metadataResponseV1
+
+			if err := c.readResponse(size, &res); err != nil {
+				return err
+			}
+
+			brokers = make([]Broker, len(res.Brokers))
+			for i, brokerMeta := range res.Brokers {
+				brokers[i] = Broker{
+					ID:   int(brokerMeta.NodeID),
+					Port: int(brokerMeta.Port),
+					Host: brokerMeta.Host,
+					Rack: brokerMeta.Rack,
+				}
+			}
+			return nil
 		},
 	)
-	if err != nil {
-		return describeGroupsResponseV1{}, err
-	}
-	for _, group := range response.Groups {
-		if group.ErrorCode != 0 {
-			return describeGroupsResponseV1{}, Error(group.ErrorCode)
-		}
-	}
+	return brokers, err
+}
 
-	return response, nil
+// DeleteTopics deletes the specified topics.
+func (c *Conn) DeleteTopics(topics ...string) error {
+	_, err := c.deleteTopics(deleteTopicsRequest{
+		Topics: topics,
+	})
+	return err
 }
 
 // findCoordinator finds the coordinator for the specified group or transaction
@@ -182,7 +320,8 @@ func (c *Conn) findCoordinator(request f
 
 	err := c.readOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(groupCoordinatorRequest, v0, id, request)
+			return c.writeRequest(findCoordinator, v0, id, request)
+
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -208,7 +347,7 @@ func (c *Conn) heartbeat(request heartbe
 
 	err := c.writeOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(heartbeatRequest, v0, id, request)
+			return c.writeRequest(heartbeat, v0, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -229,12 +368,17 @@ func (c *Conn) heartbeat(request heartbe
 // joinGroup attempts to join a consumer group
 //
 // See http://kafka.apache.org/protocol.html#The_Messages_JoinGroup
-func (c *Conn) joinGroup(request joinGroupRequestV1) (joinGroupResponseV1, error) {
-	var response joinGroupResponseV1
+func (c *Conn) joinGroup(request joinGroupRequest) (joinGroupResponse, error) {
+	version, err := c.negotiateVersion(joinGroup, v1, v2)
+	if err != nil {
+		return joinGroupResponse{}, err
+	}
 
-	err := c.writeOperation(
+	response := joinGroupResponse{v: version}
+
+	err = c.writeOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(joinGroupRequest, v1, id, request)
+			return c.writeRequest(joinGroup, version, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -243,10 +387,10 @@ func (c *Conn) joinGroup(request joinGro
 		},
 	)
 	if err != nil {
-		return joinGroupResponseV1{}, err
+		return joinGroupResponse{}, err
 	}
 	if response.ErrorCode != 0 {
-		return joinGroupResponseV1{}, Error(response.ErrorCode)
+		return joinGroupResponse{}, Error(response.ErrorCode)
 	}
 
 	return response, nil
@@ -260,7 +404,7 @@ func (c *Conn) leaveGroup(request leaveG
 
 	err := c.writeOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(leaveGroupRequest, v0, id, request)
+			return c.writeRequest(leaveGroup, v0, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -286,7 +430,7 @@ func (c *Conn) listGroups(request listGr
 
 	err := c.readOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(listGroupsRequest, v1, id, request)
+			return c.writeRequest(listGroups, v1, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -312,7 +456,7 @@ func (c *Conn) offsetCommit(request offs
 
 	err := c.writeOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(offsetCommitRequest, v2, id, request)
+			return c.writeRequest(offsetCommit, v2, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -343,7 +487,7 @@ func (c *Conn) offsetFetch(request offse
 
 	err := c.readOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(offsetFetchRequest, v1, id, request)
+			return c.writeRequest(offsetFetch, v1, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -365,15 +509,15 @@ func (c *Conn) offsetFetch(request offse
 	return response, nil
 }
 
-// syncGroups completes the handshake to join a consumer group
+// syncGroup completes the handshake to join a consumer group
 //
 // See http://kafka.apache.org/protocol.html#The_Messages_SyncGroup
-func (c *Conn) syncGroups(request syncGroupRequestV0) (syncGroupResponseV0, error) {
+func (c *Conn) syncGroup(request syncGroupRequestV0) (syncGroupResponseV0, error) {
 	var response syncGroupResponseV0
 
 	err := c.readOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(syncGroupRequest, v0, id, request)
+			return c.writeRequest(syncGroup, v0, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -468,6 +612,12 @@ const (
 	SeekAbsolute = 1 // Seek to an absolute offset.
 	SeekEnd      = 2 // Seek relative to the last offset available in the partition.
 	SeekCurrent  = 3 // Seek relative to the current offset.
+
+	// This flag may be combined to any of the SeekAbsolute and SeekCurrent
+	// constants to skip the bound check that the connection would do otherwise.
+	// Programs can use this flag to avoid making a metadata request to the kafka
+	// broker to read the current first and last offsets of the partition.
+	SeekDontCheck = 1 << 30
 )
 
 // Seek sets the offset for the next read or write operation according to whence, which
@@ -477,12 +627,32 @@ const (
 // as in lseek(2) or os.Seek.
 // The method returns the new absolute offset of the connection.
 func (c *Conn) Seek(offset int64, whence int) (int64, error) {
+	seekDontCheck := (whence & SeekDontCheck) != 0
+	whence &= ^SeekDontCheck
+
 	switch whence {
 	case SeekStart, SeekAbsolute, SeekEnd, SeekCurrent:
 	default:
 		return 0, fmt.Errorf("whence must be one of 0, 1, 2, or 3. (whence = %d)", whence)
 	}
 
+	if seekDontCheck {
+		if whence == SeekAbsolute {
+			c.mutex.Lock()
+			c.offset = offset
+			c.mutex.Unlock()
+			return offset, nil
+		}
+
+		if whence == SeekCurrent {
+			c.mutex.Lock()
+			c.offset += offset
+			offset = c.offset
+			c.mutex.Unlock()
+			return offset, nil
+		}
+	}
+
 	if whence == SeekAbsolute {
 		c.mutex.Lock()
 		unchanged := offset == c.offset
@@ -491,6 +661,7 @@ func (c *Conn) Seek(offset int64, whence
 			return offset, nil
 		}
 	}
+
 	if whence == SeekCurrent {
 		c.mutex.Lock()
 		offset = c.offset + offset
@@ -576,39 +747,94 @@ func (c *Conn) ReadMessage(maxBytes int)
 // gives the minimum and maximum number of bytes that it wants to receive from
 // the kafka server.
 func (c *Conn) ReadBatch(minBytes, maxBytes int) *Batch {
+	return c.ReadBatchWith(ReadBatchConfig{
+		MinBytes: minBytes,
+		MaxBytes: maxBytes,
+	})
+}
+
+// ReadBatchWith in every way is similar to ReadBatch. ReadBatch is configured
+// with the default values in ReadBatchConfig except for minBytes and maxBytes.
+func (c *Conn) ReadBatchWith(cfg ReadBatchConfig) *Batch {
+
 	var adjustedDeadline time.Time
 	var maxFetch = int(c.fetchMaxBytes)
 
-	if minBytes < 0 || minBytes > maxFetch {
-		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", minBytes, maxFetch)}
+	if cfg.MinBytes < 0 || cfg.MinBytes > maxFetch {
+		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", cfg.MinBytes, maxFetch)}
 	}
-	if maxBytes < 0 || maxBytes > maxFetch {
-		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", maxBytes, maxFetch)}
+	if cfg.MaxBytes < 0 || cfg.MaxBytes > maxFetch {
+		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", cfg.MaxBytes, maxFetch)}
 	}
-	if minBytes > maxBytes {
-		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", minBytes, maxBytes)}
+	if cfg.MinBytes > cfg.MaxBytes {
+		return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", cfg.MinBytes, cfg.MaxBytes)}
 	}
 
-	offset, err := c.Seek(c.Offset())
+	offset, whence := c.Offset()
+
+	offset, err := c.Seek(offset, whence|SeekDontCheck)
+	if err != nil {
+		return &Batch{err: dontExpectEOF(err)}
+	}
+
+	fetchVersion, err := c.negotiateVersion(fetch, v2, v5, v10)
 	if err != nil {
 		return &Batch{err: dontExpectEOF(err)}
 	}
 
 	id, err := c.doRequest(&c.rdeadline, func(deadline time.Time, id int32) error {
 		now := time.Now()
-		deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
+		var timeout time.Duration
+		if cfg.MaxWait > 0 {
+			// explicitly-configured case: no changes are made to the deadline,
+			// and the timeout is sent exactly as specified.
+			timeout = cfg.MaxWait
+		} else {
+			// default case: use the original logic to adjust the conn's
+			// deadline.T
+			deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
+			timeout = deadlineToTimeout(deadline, now)
+		}
+		// save this variable outside of the closure for later use in detecting
+		// truncated messages.
 		adjustedDeadline = deadline
-		return writeFetchRequestV2(
-			&c.wbuf,
-			id,
-			c.clientID,
-			c.topic,
-			c.partition,
-			offset,
-			minBytes,
-			maxBytes+int(c.fetchMinSize),
-			deadlineToTimeout(deadline, now),
-		)
+		switch fetchVersion {
+		case v10:
+			return c.wb.writeFetchRequestV10(
+				id,
+				c.clientID,
+				c.topic,
+				c.partition,
+				offset,
+				cfg.MinBytes,
+				cfg.MaxBytes+int(c.fetchMinSize),
+				timeout,
+				int8(cfg.IsolationLevel),
+			)
+		case v5:
+			return c.wb.writeFetchRequestV5(
+				id,
+				c.clientID,
+				c.topic,
+				c.partition,
+				offset,
+				cfg.MinBytes,
+				cfg.MaxBytes+int(c.fetchMinSize),
+				timeout,
+				int8(cfg.IsolationLevel),
+			)
+		default:
+			return c.wb.writeFetchRequestV2(
+				id,
+				c.clientID,
+				c.topic,
+				c.partition,
+				offset,
+				cfg.MinBytes,
+				cfg.MaxBytes+int(c.fetchMinSize),
+				timeout,
+			)
+		}
 	})
 	if err != nil {
 		return &Batch{err: dontExpectEOF(err)}
@@ -619,18 +845,49 @@ func (c *Conn) ReadBatch(minBytes, maxBy
 		return &Batch{err: dontExpectEOF(err)}
 	}
 
-	throttle, highWaterMark, remain, err := readFetchResponseHeader(&c.rbuf, size)
+	var throttle int32
+	var highWaterMark int64
+	var remain int
+
+	switch fetchVersion {
+	case v10:
+		throttle, highWaterMark, remain, err = readFetchResponseHeaderV10(&c.rbuf, size)
+	case v5:
+		throttle, highWaterMark, remain, err = readFetchResponseHeaderV5(&c.rbuf, size)
+	default:
+		throttle, highWaterMark, remain, err = readFetchResponseHeaderV2(&c.rbuf, size)
+	}
+	if errors.Is(err, errShortRead) {
+		err = checkTimeoutErr(adjustedDeadline)
+	}
+
+	var msgs *messageSetReader
+	if err == nil {
+		if highWaterMark == offset {
+			msgs = &messageSetReader{empty: true}
+		} else {
+			msgs, err = newMessageSetReader(&c.rbuf, remain)
+		}
+	}
+	if errors.Is(err, errShortRead) {
+		err = checkTimeoutErr(adjustedDeadline)
+	}
+
 	return &Batch{
 		conn:          c,
-		msgs:          newMessageSetReader(&c.rbuf, remain),
+		msgs:          msgs,
 		deadline:      adjustedDeadline,
-		throttle:      duration(throttle),
+		throttle:      makeDuration(throttle),
 		lock:          lock,
 		topic:         c.topic,          // topic is copied to Batch to prevent race with Batch.close
 		partition:     int(c.partition), // partition is copied to Batch to prevent race with Batch.close
 		offset:        offset,
 		highWaterMark: highWaterMark,
-		err:           dontExpectEOF(err),
+		// there shouldn't be a short read on initially setting up the batch.
+		// as such, any io.EOF is re-mapped to an io.ErrUnexpectedEOF so that we
+		// don't accidentally signal that we successfully reached the end of the
+		// batch.
+		err: dontExpectEOF(err),
 	}
 }
 
@@ -669,7 +926,7 @@ func (c *Conn) ReadOffsets() (first, las
 func (c *Conn) readOffset(t int64) (offset int64, err error) {
 	err = c.readOperation(
 		func(deadline time.Time, id int32) error {
-			return writeListOffsetRequestV1(&c.wbuf, id, c.clientID, c.topic, c.partition, t)
+			return c.wb.writeListOffsetRequestV1(id, c.clientID, c.topic, c.partition, t)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) {
@@ -707,64 +964,131 @@ func (c *Conn) readOffset(t int64) (offs
 // connection. If there are none, the method fetches all partitions of the kafka
 // cluster.
 func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err error) {
-	defaultTopics := [...]string{c.topic}
 
-	if len(topics) == 0 && len(c.topic) != 0 {
-		topics = defaultTopics[:]
+	if len(topics) == 0 {
+		if len(c.topic) != 0 {
+			defaultTopics := [...]string{c.topic}
+			topics = defaultTopics[:]
+		} else {
+			// topics needs to be explicitly nil-ed out or the broker will
+			// interpret it as a request for 0 partitions instead of all.
+			topics = nil
+		}
+	}
+	metadataVersion, err := c.negotiateVersion(metadata, v1, v6)
+	if err != nil {
+		return nil, err
 	}
 
 	err = c.readOperation(
 		func(deadline time.Time, id int32) error {
-			return c.writeRequest(metadataRequest, v1, id, topicMetadataRequestV1(topics))
+			switch metadataVersion {
+			case v6:
+				return c.writeRequest(metadata, v6, id, topicMetadataRequestV6{Topics: topics, AllowAutoTopicCreation: true})
+			default:
+				return c.writeRequest(metadata, v1, id, topicMetadataRequestV1(topics))
+			}
 		},
 		func(deadline time.Time, size int) error {
-			var res metadataResponseV1
+			partitions, err = c.readPartitionsResponse(metadataVersion, size)
+			return err
+		},
+	)
+	return
+}
 
-			if err := c.readResponse(size, &res); err != nil {
-				return err
-			}
+func (c *Conn) readPartitionsResponse(metadataVersion apiVersion, size int) ([]Partition, error) {
+	switch metadataVersion {
+	case v6:
+		var res metadataResponseV6
+		if err := c.readResponse(size, &res); err != nil {
+			return nil, err
+		}
+		brokers := readBrokerMetadata(res.Brokers)
+		return c.readTopicMetadatav6(brokers, res.Topics)
+	default:
+		var res metadataResponseV1
+		if err := c.readResponse(size, &res); err != nil {
+			return nil, err
+		}
+		brokers := readBrokerMetadata(res.Brokers)
+		return c.readTopicMetadatav1(brokers, res.Topics)
+	}
+}
 
-			brokers := make(map[int32]Broker, len(res.Brokers))
-			for _, b := range res.Brokers {
-				brokers[b.NodeID] = Broker{
-					Host: b.Host,
-					Port: int(b.Port),
-					ID:   int(b.NodeID),
-					Rack: b.Rack,
-				}
-			}
+func readBrokerMetadata(brokerMetadata []brokerMetadataV1) map[int32]Broker {
+	brokers := make(map[int32]Broker, len(brokerMetadata))
+	for _, b := range brokerMetadata {
+		brokers[b.NodeID] = Broker{
+			Host: b.Host,
+			Port: int(b.Port),
+			ID:   int(b.NodeID),
+			Rack: b.Rack,
+		}
+	}
+	return brokers
+}
 
-			makeBrokers := func(ids ...int32) []Broker {
-				b := make([]Broker, len(ids))
-				for i, id := range ids {
-					b[i] = brokers[id]
-				}
-				return b
-			}
+func (c *Conn) readTopicMetadatav1(brokers map[int32]Broker, topicMetadata []topicMetadataV1) (partitions []Partition, err error) {
+	for _, t := range topicMetadata {
+		if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) {
+			// We only report errors if they happened for the topic of
+			// the connection, otherwise the topic will simply have no
+			// partitions in the result set.
+			return nil, Error(t.TopicErrorCode)
+		}
+		for _, p := range t.Partitions {
+			partitions = append(partitions, Partition{
+				Topic:           t.TopicName,
+				Leader:          brokers[p.Leader],
+				Replicas:        makeBrokers(brokers, p.Replicas...),
+				Isr:             makeBrokers(brokers, p.Isr...),
+				ID:              int(p.PartitionID),
+				OfflineReplicas: []Broker{},
+			})
+		}
+	}
+	return
+}
 
-			for _, t := range res.Topics {
-				if t.TopicErrorCode != 0 && t.TopicName == c.topic {
-					// We only report errors if they happened for the topic of
-					// the connection, otherwise the topic will simply have no
-					// partitions in the result set.
-					return Error(t.TopicErrorCode)
-				}
-				for _, p := range t.Partitions {
-					partitions = append(partitions, Partition{
-						Topic:    t.TopicName,
-						Leader:   brokers[p.Leader],
-						Replicas: makeBrokers(p.Replicas...),
-						Isr:      makeBrokers(p.Isr...),
-						ID:       int(p.PartitionID),
-					})
-				}
-			}
-			return nil
-		},
-	)
+func (c *Conn) readTopicMetadatav6(brokers map[int32]Broker, topicMetadata []topicMetadataV6) (partitions []Partition, err error) {
+	for _, t := range topicMetadata {
+		if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) {
+			// We only report errors if they happened for the topic of
+			// the connection, otherwise the topic will simply have no
+			// partitions in the result set.
+			return nil, Error(t.TopicErrorCode)
+		}
+		for _, p := range t.Partitions {
+			partitions = append(partitions, Partition{
+				Topic:           t.TopicName,
+				Leader:          brokers[p.Leader],
+				Replicas:        makeBrokers(brokers, p.Replicas...),
+				Isr:             makeBrokers(brokers, p.Isr...),
+				ID:              int(p.PartitionID),
+				OfflineReplicas: makeBrokers(brokers, p.OfflineReplicas...),
+			})
+		}
+	}
 	return
 }
 
+func makeBrokers(brokers map[int32]Broker, ids ...int32) []Broker {
+	b := make([]Broker, len(ids))
+	for i, id := range ids {
+		br, ok := brokers[id]
+		if !ok {
+			// When the broker id isn't found in the current list of known
+			// brokers, use a placeholder to report that the cluster has
+			// logical knowledge of the broker but no information about the
+			// physical host where it is running.
+			br.ID = int(id)
+		}
+		b[i] = br
+	}
+	return b
+}
+
 // Write writes a message to the kafka broker that this connection was
 // established to. The method returns the number of bytes written, or an error
 // if something went wrong.
@@ -789,45 +1113,106 @@ func (c *Conn) WriteMessages(msgs ...Mes
 // operation, it either fully succeeds or fails.
 //
 // If the compression codec is not nil, the messages will be compressed.
-func (c *Conn) WriteCompressedMessages(codec CompressionCodec, msgs ...Message) (int, error) {
+func (c *Conn) WriteCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, err error) {
+	nbytes, _, _, _, err = c.writeCompressedMessages(codec, msgs...)
+	return
+}
+
+// WriteCompressedMessagesAt writes a batch of messages to the connection's topic
+// and partition, returning the number of bytes written, partition and offset numbers
+// and timestamp assigned by the kafka broker to the message set. The write is an atomic
+// operation, it either fully succeeds or fails.
+//
+// If the compression codec is not nil, the messages will be compressed.
+func (c *Conn) WriteCompressedMessagesAt(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) {
+	return c.writeCompressedMessages(codec, msgs...)
+}
+
+func (c *Conn) writeCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) {
 	if len(msgs) == 0 {
-		return 0, nil
+		return
 	}
 
 	writeTime := time.Now()
-	n := 0
 	for i, msg := range msgs {
 		// users may believe they can set the Topic and/or Partition
 		// on the kafka message.
 		if msg.Topic != "" && msg.Topic != c.topic {
-			return 0, errInvalidWriteTopic
+			err = errInvalidWriteTopic
+			return
 		}
 		if msg.Partition != 0 {
-			return 0, errInvalidWritePartition
+			err = errInvalidWritePartition
+			return
 		}
 
 		if msg.Time.IsZero() {
 			msgs[i].Time = writeTime
 		}
 
-		n += len(msg.Key) + len(msg.Value)
+		nbytes += len(msg.Key) + len(msg.Value)
 	}
 
-	err := c.writeOperation(
+	var produceVersion apiVersion
+	if produceVersion, err = c.negotiateVersion(produce, v2, v3, v7); err != nil {
+		return
+	}
+
+	err = c.writeOperation(
 		func(deadline time.Time, id int32) error {
 			now := time.Now()
 			deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
-			return writeProduceRequestV2(
-				&c.wbuf,
-				codec,
-				id,
-				c.clientID,
-				c.topic,
-				c.partition,
-				deadlineToTimeout(deadline, now),
-				int16(atomic.LoadInt32(&c.requiredAcks)),
-				msgs...,
-			)
+			switch produceVersion {
+			case v7:
+				recordBatch, err :=
+					newRecordBatch(
+						codec,
+						msgs...,
+					)
+				if err != nil {
+					return err
+				}
+				return c.wb.writeProduceRequestV7(
+					id,
+					c.clientID,
+					c.topic,
+					c.partition,
+					deadlineToTimeout(deadline, now),
+					int16(atomic.LoadInt32(&c.requiredAcks)),
+					c.transactionalID,
+					recordBatch,
+				)
+			case v3:
+				recordBatch, err :=
+					newRecordBatch(
+						codec,
+						msgs...,
+					)
+				if err != nil {
+					return err
+				}
+				return c.wb.writeProduceRequestV3(
+					id,
+					c.clientID,
+					c.topic,
+					c.partition,
+					deadlineToTimeout(deadline, now),
+					int16(atomic.LoadInt32(&c.requiredAcks)),
+					c.transactionalID,
+					recordBatch,
+				)
+			default:
+				return c.wb.writeProduceRequestV2(
+					codec,
+					id,
+					c.clientID,
+					c.topic,
+					c.partition,
+					deadlineToTimeout(deadline, now),
+					int16(atomic.LoadInt32(&c.requiredAcks)),
+					msgs...,
+				)
+			}
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) {
@@ -841,12 +1226,33 @@ func (c *Conn) WriteCompressedMessages(c
 				// Read the list of partitions, there should be only one since
 				// we've produced a message to a single partition.
 				size, err = readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) {
-					var p produceResponsePartitionV2
-					size, err := p.readFrom(r, size)
-					if err == nil && p.ErrorCode != 0 {
-						err = Error(p.ErrorCode)
+					switch produceVersion {
+					case v7:
+						var p produceResponsePartitionV7
+						size, err := p.readFrom(r, size)
+						if err == nil && p.ErrorCode != 0 {
+							err = Error(p.ErrorCode)
+						}
+						if err == nil {
+							partition = p.Partition
+							offset = p.Offset
+							appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond))
+						}
+						return size, err
+					default:
+						var p produceResponsePartitionV2
+						size, err := p.readFrom(r, size)
+						if err == nil && p.ErrorCode != 0 {
+							err = Error(p.ErrorCode)
+						}
+						if err == nil {
+							partition = p.Partition
+							offset = p.Offset
+							appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond))
+						}
+						return size, err
 					}
-					return size, err
+
 				})
 				if err != nil {
 					return size, err
@@ -860,10 +1266,10 @@ func (c *Conn) WriteCompressedMessages(c
 	)
 
 	if err != nil {
-		n = 0
+		nbytes = 0
 	}
 
-	return n, err
+	return
 }
 
 // SetRequiredAcks sets the number of acknowledges from replicas that the
@@ -878,27 +1284,20 @@ func (c *Conn) SetRequiredAcks(n int) er
 	}
 }
 
-func (c *Conn) writeRequestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32, size int32) {
-	hdr := c.requestHeader(apiKey, apiVersion, correlationID)
-	hdr.Size = (hdr.size() + size) - 4
-	hdr.writeTo(&c.wbuf)
-}
-
 func (c *Conn) writeRequest(apiKey apiKey, apiVersion apiVersion, correlationID int32, req request) error {
 	hdr := c.requestHeader(apiKey, apiVersion, correlationID)
 	hdr.Size = (hdr.size() + req.size()) - 4
-	hdr.writeTo(&c.wbuf)
-	req.writeTo(&c.wbuf)
+	hdr.writeTo(&c.wb)
+	req.writeTo(&c.wb)
 	return c.wbuf.Flush()
 }
 
 func (c *Conn) readResponse(size int, res interface{}) error {
 	size, err := read(&c.rbuf, size, res)
-	switch err.(type) {
-	case Error:
-		var e error
-		if size, e = discardN(&c.rbuf, size, size); e != nil {
-			err = e
+	if err != nil {
+		var kafkaError Error
+		if errors.As(err, &kafkaError) {
+			size, err = discardN(&c.rbuf, size, size)
 		}
 	}
 	return expectZeroSize(size, err)
@@ -933,6 +1332,18 @@ func (c *Conn) writeOperation(write func
 	return c.do(&c.wdeadline, write, read)
 }
 
+func (c *Conn) enter() {
+	atomic.AddInt32(&c.inflight, +1)
+}
+
+func (c *Conn) leave() {
+	atomic.AddInt32(&c.inflight, -1)
+}
+
+func (c *Conn) concurrency() int {
+	return int(atomic.LoadInt32(&c.inflight))
+}
+
 func (c *Conn) do(d *connDeadline, write func(time.Time, int32) error, read func(time.Time, int) error) error {
 	id, err := c.doRequest(d, write)
 	if err != nil {
@@ -945,9 +1356,8 @@ func (c *Conn) do(d *connDeadline, write
 	}
 
 	if err = read(deadline, size); err != nil {
-		switch err.(type) {
-		case Error:
-		default:
+		var kafkaError Error
+		if !errors.As(err, &kafkaError) {
 			c.conn.Close()
 		}
 	}
@@ -958,6 +1368,7 @@ func (c *Conn) do(d *connDeadline, write
 }
 
 func (c *Conn) doRequest(d *connDeadline, write func(time.Time, int32) error) (id int32, err error) {
+	c.enter()
 	c.wlock.Lock()
 	c.correlationID++
 	id = c.correlationID
@@ -969,6 +1380,7 @@ func (c *Conn) doRequest(d *connDeadline
 		// recoverable state so we're better off just giving up at this point to
 		// avoid any risk of corrupting the following operations.
 		c.conn.Close()
+		c.leave()
 	}
 
 	c.wlock.Unlock()
@@ -982,25 +1394,39 @@ func (c *Conn) waitResponse(d *connDeadl
 
 		c.rlock.Lock()
 		deadline = d.setConnReadDeadline(c.conn)
+		rsz, rid, err = c.peekResponseSizeAndID()
 
-		if rsz, rid, err = c.peekResponseSizeAndID(); err != nil {
+		if err != nil {
 			d.unsetConnReadDeadline()
 			c.conn.Close()
 			c.rlock.Unlock()
-			return
+			break
 		}
 
 		if id == rid {
 			c.skipResponseSizeAndID()
 			size, lock = int(rsz-4), &c.rlock
-			return
+			// Don't unlock the read mutex to yield ownership to the caller.
+			break
+		}
+
+		if c.concurrency() == 1 {
+			// If the goroutine is the only one waiting on this connection it
+			// should be impossible to read a correlation id different from the
+			// one it expects. This is a sign that the data we are reading on
+			// the wire is corrupted and the connection needs to be closed.
+			err = io.ErrNoProgress
+			c.rlock.Unlock()
+			break
 		}
 
 		// Optimistically release the read lock if a response has already
 		// been received but the current operation is not the target for it.
 		c.rlock.Unlock()
-		runtime.Gosched()
 	}
+
+	c.leave()
+	return
 }
 
 func (c *Conn) requestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32) requestHeader {
@@ -1012,6 +1438,67 @@ func (c *Conn) requestHeader(apiKey apiK
 	}
 }
 
+func (c *Conn) ApiVersions() ([]ApiVersion, error) {
+	deadline := &c.rdeadline
+
+	if deadline.deadline().IsZero() {
+		// ApiVersions is called automatically when API version negotiation
+		// needs to happen, so we are not guaranteed that a read deadline has
+		// been set yet. Fallback to use the write deadline in case it was
+		// set, for example when version negotiation is initiated during a
+		// produce request.
+		deadline = &c.wdeadline
+	}
+
+	id, err := c.doRequest(deadline, func(_ time.Time, id int32) error {
+		h := requestHeader{
+			ApiKey:        int16(apiVersions),
+			ApiVersion:    int16(v0),
+			CorrelationID: id,
+			ClientID:      c.clientID,
+		}
+		h.Size = (h.size() - 4)
+		h.writeTo(&c.wb)
+		return c.wbuf.Flush()
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	_, size, lock, err := c.waitResponse(deadline, id)
+	if err != nil {
+		return nil, err
+	}
+	defer lock.Unlock()
+
+	var errorCode int16
+	if size, err = readInt16(&c.rbuf, size, &errorCode); err != nil {
+		return nil, err
+	}
+	var arrSize int32
+	if size, err = readInt32(&c.rbuf, size, &arrSize); err != nil {
+		return nil, err
+	}
+	r := make([]ApiVersion, arrSize)
+	for i := 0; i < int(arrSize); i++ {
+		if size, err = readInt16(&c.rbuf, size, &r[i].ApiKey); err != nil {
+			return nil, err
+		}
+		if size, err = readInt16(&c.rbuf, size, &r[i].MinVersion); err != nil {
+			return nil, err
+		}
+		if size, err = readInt16(&c.rbuf, size, &r[i].MaxVersion); err != nil {
+			return nil, err
+		}
+	}
+
+	if errorCode != 0 {
+		return r, Error(errorCode)
+	}
+
+	return r, nil
+}
+
 // connDeadline is a helper type to implement read/write deadline management on
 // the kafka connection.
 type connDeadline struct {
@@ -1072,3 +1559,92 @@ func (d *connDeadline) unsetConnWriteDea
 	d.wconn = nil
 	d.mutex.Unlock()
 }
+
+// saslHandshake sends the SASL handshake message.  This will determine whether
+// the Mechanism is supported by the cluster.  If it's not, this function will
+// error out with UnsupportedSASLMechanism.
+//
+// If the mechanism is unsupported, the handshake request will reply with the
+// list of the cluster's configured mechanisms, which could potentially be used
+// to facilitate negotiation.  At the moment, we are not negotiating the
+// mechanism as we believe that brokers are usually known to the client, and
+// therefore the client should already know which mechanisms are supported.
+//
+// See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake
+func (c *Conn) saslHandshake(mechanism string) error {
+	// The wire format for V0 and V1 is identical, but the version
+	// number will affect how the SASL authentication
+	// challenge/responses are sent
+	var resp saslHandshakeResponseV0
+
+	version, err := c.negotiateVersion(saslHandshake, v0, v1)
+	if err != nil {
+		return err
+	}
+
+	err = c.writeOperation(
+		func(deadline time.Time, id int32) error {
+			return c.writeRequest(saslHandshake, version, id, &saslHandshakeRequestV0{Mechanism: mechanism})
+		},
+		func(deadline time.Time, size int) error {
+			return expectZeroSize(func() (int, error) {
+				return (&resp).readFrom(&c.rbuf, size)
+			}())
+		},
+	)
+	if err == nil && resp.ErrorCode != 0 {
+		err = Error(resp.ErrorCode)
+	}
+	return err
+}
+
+// saslAuthenticate sends the SASL authenticate message.  This function must
+// be immediately preceded by a successful saslHandshake.
+//
+// See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate
+func (c *Conn) saslAuthenticate(data []byte) ([]byte, error) {
+	// if we sent a v1 handshake, then we must encapsulate the authentication
+	// request in a saslAuthenticateRequest.  otherwise, we read and write raw
+	// bytes.
+	version, err := c.negotiateVersion(saslHandshake, v0, v1)
+	if err != nil {
+		return nil, err
+	}
+	if version == v1 {
+		var request = saslAuthenticateRequestV0{Data: data}
+		var response saslAuthenticateResponseV0
+
+		err := c.writeOperation(
+			func(deadline time.Time, id int32) error {
+				return c.writeRequest(saslAuthenticate, v0, id, request)
+			},
+			func(deadline time.Time, size int) error {
+				return expectZeroSize(func() (remain int, err error) {
+					return (&response).readFrom(&c.rbuf, size)
+				}())
+			},
+		)
+		if err == nil && response.ErrorCode != 0 {
+			err = Error(response.ErrorCode)
+		}
+		return response.Data, err
+	}
+
+	// fall back to opaque bytes on the wire.  the broker is expecting these if
+	// it just processed a v0 sasl handshake.
+	c.wb.writeInt32(int32(len(data)))
+	if _, err := c.wb.Write(data); err != nil {
+		return nil, err
+	}
+	if err := c.wb.Flush(); err != nil {
+		return nil, err
+	}
+
+	var respLen int32
+	if _, err := readInt32(&c.rbuf, 4, &respLen); err != nil {
+		return nil, err
+	}
+
+	resp, _, err := readNewBytes(&c.rbuf, int(respLen), int(respLen))
+	return resp, err
+}
diff -pruN 0.2.1-1.1/conn_test.go 0.4.49+ds1-1/conn_test.go
--- 0.2.1-1.1/conn_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/conn_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,16 +1,21 @@
 package kafka
 
 import (
+	"bytes"
 	"context"
+	"errors"
 	"fmt"
 	"io"
 	"math/rand"
 	"net"
+	"os"
 	"strconv"
 	"testing"
 	"time"
 
 	"golang.org/x/net/nettest"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
 )
 
 type timeout struct{}
@@ -102,12 +107,15 @@ func makeGroupID() string {
 	return fmt.Sprintf("kafka-go-group-%016x", rand.Int63())
 }
 
-func TestConn(t *testing.T) {
-	t.Parallel()
+func makeTransactionalID() string {
+	return fmt.Sprintf("kafka-go-transactional-id-%016x", rand.Int63())
+}
 
+func TestConn(t *testing.T) {
 	tests := []struct {
-		scenario string
-		function func(*testing.T, *Conn)
+		scenario   string
+		function   func(*testing.T, *Conn)
+		minVersion string
 	}{
 		{
 			scenario: "close right away",
@@ -150,6 +158,11 @@ func TestConn(t *testing.T) {
 		},
 
 		{
+			scenario: "unchecked seeks allow the connection to be positioned outside the boundaries of the partition",
+			function: testConnSeekDontCheck,
+		},
+
+		{
 			scenario: "writing and reading messages sequentially should preserve the order",
 			function: testConnWriteReadSequentially,
 		},
@@ -180,8 +193,14 @@ func TestConn(t *testing.T) {
 		},
 
 		{
-			scenario: "describe groups retrieves all groups when no groupID specified",
-			function: testConnDescribeGroupRetrievesAllGroups,
+			scenario:   "read a batch with no explicit min or max bytes",
+			function:   testConnReadBatchWithNoMinMaxBytes,
+			minVersion: "0.11.0",
+		},
+
+		{
+			scenario: "read a batch using explicit max wait time",
+			function: testConnReadBatchWithMaxWait,
 		},
 
 		{
@@ -220,8 +239,9 @@ func TestConn(t *testing.T) {
 		},
 
 		{
-			scenario: "test list groups",
-			function: testConnListGroupsReturnsGroups,
+			scenario:   "test list groups",
+			function:   testConnListGroupsReturnsGroups,
+			minVersion: "0.11.0",
 		},
 
 		{
@@ -238,6 +258,21 @@ func TestConn(t *testing.T) {
 			scenario: "test delete topics with an invalid topic",
 			function: testDeleteTopicsInvalidTopic,
 		},
+
+		{
+			scenario: "test retrieve controller",
+			function: testController,
+		},
+
+		{
+			scenario: "test list brokers",
+			function: testBrokers,
+		},
+
+		{
+			scenario: "the connection advertises the broker that it is connected to",
+			function: testConnBroker,
+		},
 	}
 
 	const (
@@ -246,6 +281,11 @@ func TestConn(t *testing.T) {
 	)
 
 	for _, test := range tests {
+		if !ktesting.KafkaIsAtLeast(test.minVersion) {
+			t.Log("skipping " + test.scenario + " because broker is not at least version " + test.minVersion)
+			continue
+		}
+
 		testFunc := test.function
 		t.Run(test.scenario, func(t *testing.T) {
 			t.Parallel()
@@ -267,16 +307,30 @@ func TestConn(t *testing.T) {
 	}
 
 	t.Run("nettest", func(t *testing.T) {
+		// Need ability to skip nettest on newer Kafka versions to avoid these kinds of errors:
+		//  --- FAIL: TestConn/nettest (17.56s)
+		//    --- FAIL: TestConn/nettest/PingPong (7.40s)
+		//      conntest.go:112: unexpected Read error: [7] Request Timed Out: the request exceeded the user-specified time limit in the request
+		//      conntest.go:118: mismatching value: got 77, want 78
+		//      conntest.go:118: mismatching value: got 78, want 79
+		// ...
+		//
+		// TODO: Figure out why these are happening and fix them (they don't appear to be new).
+		if _, ok := os.LookupEnv("KAFKA_SKIP_NETTEST"); ok {
+			t.Log("skipping nettest because KAFKA_SKIP_NETTEST is set")
+			t.Skip()
+		}
+
 		t.Parallel()
 
 		nettest.TestConn(t, func() (c1 net.Conn, c2 net.Conn, stop func(), err error) {
-			var topic1 = makeTopic()
-			var topic2 = makeTopic()
+			topic1 := makeTopic()
+			topic2 := makeTopic()
 			var t1Reader *Conn
 			var t2Reader *Conn
 			var t1Writer *Conn
 			var t2Writer *Conn
-			var dialer = &Dialer{}
+			dialer := &Dialer{}
 
 			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
 			defer cancel()
@@ -324,7 +378,6 @@ func testConnFirstOffset(t *testing.T, c
 func testConnWrite(t *testing.T, conn *Conn) {
 	b := []byte("Hello World!")
 	n, err := conn.Write(b)
-
 	if err != nil {
 		t.Error(err)
 	}
@@ -337,9 +390,11 @@ func testConnWrite(t *testing.T, conn *C
 func testConnCloseAndWrite(t *testing.T, conn *Conn) {
 	conn.Close()
 
-	switch _, err := conn.Write([]byte("Hello World!")); err.(type) {
-	case *net.OpError:
-	default:
+	_, err := conn.Write([]byte("Hello World!"))
+
+	// expect a network error
+	var netOpError *net.OpError
+	if !errors.As(err, &netOpError) {
 		t.Error(err)
 	}
 }
@@ -421,6 +476,27 @@ func testConnSeekRandomOffset(t *testing
 	}
 }
 
+func testConnSeekDontCheck(t *testing.T, conn *Conn) {
+	for i := 0; i != 10; i++ {
+		if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	offset, err := conn.Seek(42, SeekAbsolute|SeekDontCheck)
+	if err != nil {
+		t.Error(err)
+	}
+
+	if offset != 42 {
+		t.Error("bad offset:", offset)
+	}
+
+	if _, err := conn.ReadMessage(1024); !errors.Is(err, OffsetOutOfRange) {
+		t.Error("unexpected error:", err)
+	}
+}
+
 func testConnWriteReadSequentially(t *testing.T, conn *Conn) {
 	for i := 0; i != 10; i++ {
 		if _, err := conn.Write([]byte(strconv.Itoa(i))); err != nil {
@@ -446,7 +522,9 @@ func testConnWriteReadSequentially(t *te
 }
 
 func testConnWriteBatchReadSequentially(t *testing.T, conn *Conn) {
-	if _, err := conn.WriteMessages(makeTestSequence(10)...); err != nil {
+	msgs := makeTestSequence(10)
+
+	if _, err := conn.WriteMessages(msgs...); err != nil {
 		t.Fatal(err)
 	}
 
@@ -456,11 +534,14 @@ func testConnWriteBatchReadSequentially(
 			t.Error(err)
 			continue
 		}
-		s := string(msg.Value)
-		if v, err := strconv.Atoi(s); err != nil {
-			t.Error(err)
-		} else if v != i {
-			t.Errorf("bad message read at offset %d: %s", i, s)
+		if !bytes.Equal(msg.Key, msgs[i].Key) {
+			t.Errorf("bad message key at offset %d: %q != %q", i, msg.Key, msgs[i].Key)
+		}
+		if !bytes.Equal(msg.Value, msgs[i].Value) {
+			t.Errorf("bad message value at offset %d: %q != %q", i, msg.Value, msgs[i].Value)
+		}
+		if !msg.Time.Equal(msgs[i].Time) {
+			t.Errorf("bad message time at offset %d: %s != %s", i, msg.Time, msgs[i].Time)
 		}
 	}
 }
@@ -493,6 +574,86 @@ func testConnReadWatermarkFromBatch(t *t
 	batch.Close()
 }
 
+func testConnReadBatchWithNoMinMaxBytes(t *testing.T, conn *Conn) {
+	if _, err := conn.WriteMessages(makeTestSequence(10)...); err != nil {
+		t.Fatal(err)
+	}
+
+	value := make([]byte, 10e3) // 10 KB
+
+	batch := conn.ReadBatchWith(ReadBatchConfig{})
+
+	for i := 0; i < 10; i++ {
+		_, err := batch.Read(value)
+		if err != nil {
+			if err = batch.Close(); err != nil {
+				t.Fatalf("error trying to read batch message: %s", err)
+			}
+		}
+
+		if batch.HighWaterMark() != 10 {
+			t.Fatal("expected highest offset (watermark) to be 10")
+		}
+	}
+
+	if err := batch.Close(); err != nil {
+		t.Fatalf("error trying to close batch: %s", err)
+	}
+
+	if err := batch.Err(); err != nil {
+		t.Fatalf("broken batch: %s", err)
+	}
+}
+
+func testConnReadBatchWithMaxWait(t *testing.T, conn *Conn) {
+	if _, err := conn.WriteMessages(makeTestSequence(10)...); err != nil {
+		t.Fatal(err)
+	}
+
+	const maxBytes = 10e6 // 10 MB
+
+	value := make([]byte, 10e3) // 10 KB
+
+	cfg := ReadBatchConfig{
+		MinBytes: maxBytes, // use max for both so that we hit max wait time
+		MaxBytes: maxBytes,
+		MaxWait:  500 * time.Millisecond,
+	}
+
+	// set aa read deadline so the batch will succeed.
+	conn.SetDeadline(time.Now().Add(time.Second))
+	batch := conn.ReadBatchWith(cfg)
+
+	for i := 0; i < 10; i++ {
+		_, err := batch.Read(value)
+		if err != nil {
+			if err = batch.Close(); err != nil {
+				t.Fatalf("error trying to read batch message: %s", err)
+			}
+		}
+
+		if batch.HighWaterMark() != 10 {
+			t.Fatal("expected highest offset (watermark) to be 10")
+		}
+	}
+
+	batch.Close()
+
+	// reset the offset and  ensure that the conn deadline takes precedence over
+	// the max wait
+	conn.Seek(0, SeekAbsolute)
+	conn.SetDeadline(time.Now().Add(50 * time.Millisecond))
+	batch = conn.ReadBatchWith(cfg)
+	var netErr net.Error
+	if err := batch.Err(); err == nil {
+		t.Fatal("should have timed out, but got no error")
+	} else if errors.As(err, &netErr) {
+		if !netErr.Timeout() {
+			t.Fatalf("should have timed out, but got: %v", err)
+		}
+	}
+}
+
 func waitForCoordinator(t *testing.T, conn *Conn, groupID string) {
 	// ensure that kafka has allocated a group coordinator.  oddly, issue doesn't
 	// appear to happen if the kafka been running for a while.
@@ -501,13 +662,15 @@ func waitForCoordinator(t *testing.T, co
 		_, err := conn.findCoordinator(findCoordinatorRequestV0{
 			CoordinatorKey: groupID,
 		})
-		switch err {
-		case nil:
+		if err != nil {
+			if errors.Is(err, GroupCoordinatorNotAvailable) {
+				time.Sleep(250 * time.Millisecond)
+				continue
+			} else {
+				t.Fatalf("unable to find coordinator for group: %v", err)
+			}
+		} else {
 			return
-		case GroupCoordinatorNotAvailable:
-			time.Sleep(250 * time.Millisecond)
-		default:
-			t.Fatalf("unable to find coordinator for group: %v", err)
 		}
 	}
 
@@ -517,10 +680,10 @@ func waitForCoordinator(t *testing.T, co
 func createGroup(t *testing.T, conn *Conn, groupID string) (generationID int32, memberID string, stop func()) {
 	waitForCoordinator(t, conn, groupID)
 
-	join := func() (joinGroup joinGroupResponseV1) {
+	join := func() (joinGroup joinGroupResponse) {
 		var err error
 		for attempt := 0; attempt < 10; attempt++ {
-			joinGroup, err = conn.joinGroup(joinGroupRequestV1{
+			joinGroup, err = conn.joinGroup(joinGroupRequest{
 				GroupID:          groupID,
 				SessionTimeout:   int32(time.Minute / time.Millisecond),
 				RebalanceTimeout: int32(time.Second / time.Millisecond),
@@ -532,15 +695,18 @@ func createGroup(t *testing.T, conn *Con
 					},
 				},
 			})
-			switch err {
-			case nil:
+			if err != nil {
+				if errors.Is(err, NotCoordinatorForGroup) {
+					time.Sleep(250 * time.Millisecond)
+					continue
+				} else {
+					t.Fatalf("bad joinGroup: %s", err)
+				}
+			} else {
 				return
-			case NotCoordinatorForGroup:
-				time.Sleep(250 * time.Millisecond)
-			default:
-				t.Fatalf("bad joinGroup: %s", err)
 			}
 		}
+
 		return
 	}
 
@@ -548,7 +714,7 @@ func createGroup(t *testing.T, conn *Con
 	joinGroup := join()
 
 	// sync the group
-	_, err := conn.syncGroups(syncGroupRequestV0{
+	_, err := conn.syncGroup(syncGroupRequestV0{
 		GroupID:      groupID,
 		GenerationID: joinGroup.GenerationID,
 		MemberID:     joinGroup.MemberID,
@@ -560,7 +726,7 @@ func createGroup(t *testing.T, conn *Con
 		},
 	})
 	if err != nil {
-		t.Fatalf("bad syncGroups: %s", err)
+		t.Fatalf("bad syncGroup: %s", err)
 	}
 
 	generationID = joinGroup.GenerationID
@@ -575,26 +741,6 @@ func createGroup(t *testing.T, conn *Con
 	return
 }
 
-func testConnDescribeGroupRetrievesAllGroups(t *testing.T, conn *Conn) {
-	groupID := makeGroupID()
-	_, _, stop1 := createGroup(t, conn, groupID)
-	defer stop1()
-
-	out, err := conn.describeGroups(describeGroupsRequestV1{
-		GroupIDs: []string{groupID},
-	})
-	if err != nil {
-		t.Fatalf("bad describeGroups: %s", err)
-	}
-
-	if v := len(out.Groups); v != 1 {
-		t.Fatalf("expected 1 group, got %v", v)
-	}
-	if id := out.Groups[0].GroupID; id != groupID {
-		t.Errorf("bad group: got %v, expected %v", id, groupID)
-	}
-}
-
 func testConnFindCoordinator(t *testing.T, conn *Conn) {
 	groupID := makeGroupID()
 
@@ -604,12 +750,11 @@ func testConnFindCoordinator(t *testing.
 		}
 		response, err := conn.findCoordinator(findCoordinatorRequestV0{CoordinatorKey: groupID})
 		if err != nil {
-			switch err {
-			case GroupCoordinatorNotAvailable:
+			if errors.Is(err, GroupCoordinatorNotAvailable) {
 				continue
-			default:
-				t.Fatalf("bad findCoordinator: %s", err)
 			}
+
+			t.Fatalf("bad findCoordinator: %s", err)
 		}
 
 		if response.Coordinator.NodeID == 0 {
@@ -626,8 +771,8 @@ func testConnFindCoordinator(t *testing.
 }
 
 func testConnJoinGroupInvalidGroupID(t *testing.T, conn *Conn) {
-	_, err := conn.joinGroup(joinGroupRequestV1{})
-	if err != InvalidGroupId && err != NotCoordinatorForGroup {
+	_, err := conn.joinGroup(joinGroupRequest{})
+	if !errors.Is(err, InvalidGroupId) && !errors.Is(err, NotCoordinatorForGroup) {
 		t.Fatalf("expected %v or %v; got %v", InvalidGroupId, NotCoordinatorForGroup, err)
 	}
 }
@@ -636,10 +781,10 @@ func testConnJoinGroupInvalidSessionTime
 	groupID := makeGroupID()
 	waitForCoordinator(t, conn, groupID)
 
-	_, err := conn.joinGroup(joinGroupRequestV1{
+	_, err := conn.joinGroup(joinGroupRequest{
 		GroupID: groupID,
 	})
-	if err != InvalidSessionTimeout && err != NotCoordinatorForGroup {
+	if !errors.Is(err, InvalidSessionTimeout) && !errors.Is(err, NotCoordinatorForGroup) {
 		t.Fatalf("expected %v or %v; got %v", InvalidSessionTimeout, NotCoordinatorForGroup, err)
 	}
 }
@@ -648,11 +793,11 @@ func testConnJoinGroupInvalidRefreshTime
 	groupID := makeGroupID()
 	waitForCoordinator(t, conn, groupID)
 
-	_, err := conn.joinGroup(joinGroupRequestV1{
+	_, err := conn.joinGroup(joinGroupRequest{
 		GroupID:        groupID,
 		SessionTimeout: int32(3 * time.Second / time.Millisecond),
 	})
-	if err != InvalidSessionTimeout && err != NotCoordinatorForGroup {
+	if !errors.Is(err, InvalidSessionTimeout) && !errors.Is(err, NotCoordinatorForGroup) {
 		t.Fatalf("expected %v or %v; got %v", InvalidSessionTimeout, NotCoordinatorForGroup, err)
 	}
 }
@@ -661,10 +806,10 @@ func testConnHeartbeatErr(t *testing.T,
 	groupID := makeGroupID()
 	createGroup(t, conn, groupID)
 
-	_, err := conn.syncGroups(syncGroupRequestV0{
+	_, err := conn.syncGroup(syncGroupRequestV0{
 		GroupID: groupID,
 	})
-	if err != UnknownMemberId && err != NotCoordinatorForGroup {
+	if !errors.Is(err, UnknownMemberId) && !errors.Is(err, NotCoordinatorForGroup) {
 		t.Fatalf("expected %v or %v; got %v", UnknownMemberId, NotCoordinatorForGroup, err)
 	}
 }
@@ -676,7 +821,7 @@ func testConnLeaveGroupErr(t *testing.T,
 	_, err := conn.leaveGroup(leaveGroupRequestV0{
 		GroupID: groupID,
 	})
-	if err != UnknownMemberId && err != NotCoordinatorForGroup {
+	if !errors.Is(err, UnknownMemberId) && !errors.Is(err, NotCoordinatorForGroup) {
 		t.Fatalf("expected %v or %v; got %v", UnknownMemberId, NotCoordinatorForGroup, err)
 	}
 }
@@ -685,10 +830,10 @@ func testConnSyncGroupErr(t *testing.T,
 	groupID := makeGroupID()
 	waitForCoordinator(t, conn, groupID)
 
-	_, err := conn.syncGroups(syncGroupRequestV0{
+	_, err := conn.syncGroup(syncGroupRequestV0{
 		GroupID: groupID,
 	})
-	if err != UnknownMemberId && err != NotCoordinatorForGroup {
+	if !errors.Is(err, UnknownMemberId) && !errors.Is(err, NotCoordinatorForGroup) {
 		t.Fatalf("expected %v or %v; got %v", UnknownMemberId, NotCoordinatorForGroup, err)
 	}
 }
@@ -796,8 +941,9 @@ func testConnFetchAndCommitOffsets(t *te
 
 func testConnWriteReadConcurrently(t *testing.T, conn *Conn) {
 	const N = 1000
-	var msgs = make([]string, N)
-	var done = make(chan struct{})
+	msgs := make([]string, N)
+	done := make(chan struct{})
+	written := make(chan struct{}, N/10)
 
 	for i := 0; i != N; i++ {
 		msgs[i] = strconv.Itoa(i)
@@ -809,12 +955,21 @@ func testConnWriteReadConcurrently(t *te
 			if _, err := conn.Write([]byte(msg)); err != nil {
 				t.Error(err)
 			}
+			written <- struct{}{}
 		}
 	}()
 
 	b := make([]byte, 128)
 
 	for i := 0; i != N; i++ {
+		// wait until at least one message has been written.  the reason for
+		// this synchronization is that we aren't using deadlines.  as such, if
+		// the read happens before a message is available, it will cause a
+		// deadlock because the read request will never hit the one byte minimum
+		// in order to return and release the lock on the conn.  by ensuring
+		// that there's at least one message produced, we don't hit that
+		// condition.
+		<-written
 		n, err := conn.Read(b)
 		if err != nil {
 			t.Error(err)
@@ -841,7 +996,7 @@ func testConnReadShortBuffer(t *testing.
 		b[3] = 0
 
 		n, err := conn.Read(b)
-		if err != io.ErrShortBuffer {
+		if !errors.Is(err, io.ErrShortBuffer) {
 			t.Error("bad error:", i, err)
 		}
 		if n != 4 {
@@ -857,7 +1012,7 @@ func testConnReadEmptyWithDeadline(t *te
 	b := make([]byte, 100)
 
 	start := time.Now()
-	deadline := start.Add(100 * time.Millisecond)
+	deadline := start.Add(time.Second)
 
 	conn.SetReadDeadline(deadline)
 	n, err := conn.Read(b)
@@ -915,9 +1070,9 @@ func testDeleteTopicsInvalidTopic(t *tes
 	if err != nil {
 		t.Fatalf("bad CreateTopics: %v", err)
 	}
-	conn.SetDeadline(time.Now().Add(time.Second))
+	conn.SetDeadline(time.Now().Add(5 * time.Second))
 	err = conn.DeleteTopics("invalid-topic", topic)
-	if err != UnknownTopicOrPartition {
+	if !errors.Is(err, UnknownTopicOrPartition) {
 		t.Fatalf("expected UnknownTopicOrPartition error, but got %v", err)
 	}
 	partitions, err := conn.ReadPartitions(topic)
@@ -925,7 +1080,93 @@ func testDeleteTopicsInvalidTopic(t *tes
 		t.Fatalf("bad ReadPartitions: %v", err)
 	}
 	if len(partitions) != 0 {
-		t.Fatal("exepected partitions to be empty")
+		t.Fatal("expected partitions to be empty")
+	}
+}
+
+func testController(t *testing.T, conn *Conn) {
+	b, err := conn.Controller()
+	if err != nil {
+		t.Error(err)
+	}
+
+	if b.Host != "localhost" {
+		t.Errorf("expected localhost received %s", b.Host)
+	}
+	if b.Port != 9092 {
+		t.Errorf("expected 9092 received %d", b.Port)
+	}
+	if b.ID != 1 {
+		t.Errorf("expected 1 received %d", b.ID)
+	}
+	if b.Rack != "" {
+		t.Errorf("expected empty string for rack received %s", b.Rack)
+	}
+}
+
+func testBrokers(t *testing.T, conn *Conn) {
+	brokers, err := conn.Brokers()
+	if err != nil {
+		t.Error(err)
+	}
+
+	if len(brokers) != 1 {
+		t.Errorf("expected 1 broker in %+v", brokers)
+	}
+
+	if brokers[0].ID != 1 {
+		t.Errorf("expected ID 1 received %d", brokers[0].ID)
+	}
+}
+
+func testConnBroker(t *testing.T, conn *Conn) {
+	broker := conn.Broker()
+	// Depending on the environment the test is being run, IPv4 or IPv6 may be used.
+	if broker.Host != "::1" && broker.Host != "127.0.0.1" {
+		t.Errorf("invalid broker address: %q", broker.Host)
+	}
+	if broker.Port != 9092 {
+		t.Errorf("invalid broker port: %d", broker.Port)
+	}
+	if broker.ID != 1 {
+		t.Errorf("invalid broker id: %d", broker.ID)
+	}
+	if broker.Rack != "" {
+		t.Errorf("invalid broker rack: %q", broker.Rack)
+	}
+}
+
+func TestReadPartitionsNoTopic(t *testing.T) {
+	conn, err := Dial("tcp", "127.0.0.1:9092")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer conn.Close()
+
+	parts, err := conn.ReadPartitions()
+	if err != nil {
+		t.Error(err)
+	}
+
+	if len(parts) == 0 {
+		t.Errorf("no partitions were returned")
+	}
+}
+
+func TestUnsupportedSASLMechanism(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	conn, err := (&Dialer{
+		Resolver: &net.Resolver{},
+	}).DialContext(ctx, "tcp", "127.0.0.1:9093")
+	if err != nil {
+		t.Fatal("failed to open a new kafka connection:", err)
+	}
+	defer conn.Close()
+
+	if err := conn.saslHandshake("FOO"); !errors.Is(err, UnsupportedSASLMechanism) {
+		t.Errorf("Expected UnsupportedSASLMechanism but got %v", err)
 	}
 }
 
@@ -1078,3 +1319,54 @@ func benchmarkConnWrite(b *testing.B, co
 
 	b.SetBytes(int64(n / i))
 }
+
+func TestEmptyToNullableReturnsNil(t *testing.T) {
+	if emptyToNullable("") != nil {
+		t.Error("Empty string is not converted to nil")
+	}
+}
+
+func TestEmptyToNullableLeavesStringsIntact(t *testing.T) {
+	const s = "abc"
+	r := emptyToNullable(s)
+	if *r != s {
+		t.Error("Non empty string is not equal to the original string")
+	}
+}
+
+func TestMakeBrokersAllPresent(t *testing.T) {
+	brokers := make(map[int32]Broker)
+	brokers[1] = Broker{ID: 1, Host: "203.0.113.101", Port: 9092}
+	brokers[2] = Broker{ID: 1, Host: "203.0.113.102", Port: 9092}
+	brokers[3] = Broker{ID: 1, Host: "203.0.113.103", Port: 9092}
+
+	b := makeBrokers(brokers, 1, 2, 3)
+	if len(b) != 3 {
+		t.Errorf("Expected 3 brokers, got %d", len(b))
+	}
+	for _, i := range []int32{1, 2, 3} {
+		if b[i-1] != brokers[i] {
+			t.Errorf("Expected broker %d at index %d, got %d", i, i-1, b[i].ID)
+		}
+	}
+}
+
+func TestMakeBrokersOneMissing(t *testing.T) {
+	brokers := make(map[int32]Broker)
+	brokers[1] = Broker{ID: 1, Host: "203.0.113.101", Port: 9092}
+	brokers[3] = Broker{ID: 3, Host: "203.0.113.103", Port: 9092}
+
+	b := makeBrokers(brokers, 1, 2, 3)
+	if len(b) != 3 {
+		t.Errorf("Expected 3 brokers, got %d", len(b))
+	}
+	if b[0] != brokers[1] {
+		t.Errorf("Expected broker 1 at index 0, got %d", b[0].ID)
+	}
+	if b[1] != (Broker{ID: 2}) {
+		t.Errorf("Expected broker 2 at index 1, got %d", b[1].ID)
+	}
+	if b[2] != brokers[3] {
+		t.Errorf("Expected broker 3 at index 1, got %d", b[2].ID)
+	}
+}
diff -pruN 0.2.1-1.1/consumergroup.go 0.4.49+ds1-1/consumergroup.go
--- 0.2.1-1.1/consumergroup.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/consumergroup.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,1252 @@
+package kafka
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+// ErrGroupClosed is returned by ConsumerGroup.Next when the group has already
+// been closed.
+var ErrGroupClosed = errors.New("consumer group is closed")
+
+// ErrGenerationEnded is returned by the context.Context issued by the
+// Generation's Start function when the context has been closed.
+var ErrGenerationEnded = errors.New("consumer group generation has ended")
+
+const (
+	// defaultProtocolType holds the default protocol type documented in the
+	// kafka protocol
+	//
+	// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI
+	defaultProtocolType = "consumer"
+
+	// defaultHeartbeatInterval contains the default time between heartbeats.  If
+	// the coordinator does not receive a heartbeat within the session timeout interval,
+	// the consumer will be considered dead and the coordinator will rebalance the
+	// group.
+	//
+	// As a rule, the heartbeat interval should be no greater than 1/3 the session timeout.
+	defaultHeartbeatInterval = 3 * time.Second
+
+	// defaultSessionTimeout contains the default interval the coordinator will wait
+	// for a heartbeat before marking a consumer as dead.
+	defaultSessionTimeout = 30 * time.Second
+
+	// defaultRebalanceTimeout contains the amount of time the coordinator will wait
+	// for consumers to issue a join group once a rebalance has been requested.
+	defaultRebalanceTimeout = 30 * time.Second
+
+	// defaultJoinGroupBackoff is the amount of time to wait after a failed
+	// consumer group generation before attempting to re-join.
+	defaultJoinGroupBackoff = 5 * time.Second
+
+	// defaultRetentionTime holds the length of time a the consumer group will be
+	// saved by kafka.  This value tells the broker to use its configured value.
+	defaultRetentionTime = -1 * time.Millisecond
+
+	// defaultPartitionWatchTime contains the amount of time the kafka-go will wait to
+	// query the brokers looking for partition changes.
+	defaultPartitionWatchTime = 5 * time.Second
+
+	// defaultTimeout is the deadline to set when interacting with the
+	// consumer group coordinator.
+	defaultTimeout = 5 * time.Second
+)
+
+// ConsumerGroupConfig is a configuration object used to create new instances of
+// ConsumerGroup.
+type ConsumerGroupConfig struct {
+	// ID is the consumer group ID.  It must not be empty.
+	ID string
+
+	// The list of broker addresses used to connect to the kafka cluster.  It
+	// must not be empty.
+	Brokers []string
+
+	// An dialer used to open connections to the kafka server. This field is
+	// optional, if nil, the default dialer is used instead.
+	Dialer *Dialer
+
+	// Topics is the list of topics that will be consumed by this group.  It
+	// will usually have a single value, but it is permitted to have multiple
+	// for more complex use cases.
+	Topics []string
+
+	// GroupBalancers is the priority-ordered list of client-side consumer group
+	// balancing strategies that will be offered to the coordinator.  The first
+	// strategy that all group members support will be chosen by the leader.
+	//
+	// Default: [Range, RoundRobin]
+	GroupBalancers []GroupBalancer
+
+	// HeartbeatInterval sets the optional frequency at which the reader sends the consumer
+	// group heartbeat update.
+	//
+	// Default: 3s
+	HeartbeatInterval time.Duration
+
+	// PartitionWatchInterval indicates how often a reader checks for partition changes.
+	// If a reader sees a partition change (such as a partition add) it will rebalance the group
+	// picking up new partitions.
+	//
+	// Default: 5s
+	PartitionWatchInterval time.Duration
+
+	// WatchForPartitionChanges is used to inform kafka-go that a consumer group should be
+	// polling the brokers and rebalancing if any partition changes happen to the topic.
+	WatchPartitionChanges bool
+
+	// SessionTimeout optionally sets the length of time that may pass without a heartbeat
+	// before the coordinator considers the consumer dead and initiates a rebalance.
+	//
+	// Default: 30s
+	SessionTimeout time.Duration
+
+	// RebalanceTimeout optionally sets the length of time the coordinator will wait
+	// for members to join as part of a rebalance.  For kafka servers under higher
+	// load, it may be useful to set this value higher.
+	//
+	// Default: 30s
+	RebalanceTimeout time.Duration
+
+	// JoinGroupBackoff optionally sets the length of time to wait before re-joining
+	// the consumer group after an error.
+	//
+	// Default: 5s
+	JoinGroupBackoff time.Duration
+
+	// RetentionTime optionally sets the length of time the consumer group will
+	// be saved by the broker.  -1 will disable the setting and leave the
+	// retention up to the broker's offsets.retention.minutes property.  By
+	// default, that setting is 1 day for kafka < 2.0 and 7 days for kafka >=
+	// 2.0.
+	//
+	// Default: -1
+	RetentionTime time.Duration
+
+	// StartOffset determines from whence the consumer group should begin
+	// consuming when it finds a partition without a committed offset.  If
+	// non-zero, it must be set to one of FirstOffset or LastOffset.
+	//
+	// Default: FirstOffset
+	StartOffset int64
+
+	// If not nil, specifies a logger used to report internal changes within the
+	// reader.
+	Logger Logger
+
+	// ErrorLogger is the logger used to report errors. If nil, the reader falls
+	// back to using Logger instead.
+	ErrorLogger Logger
+
+	// Timeout is the network timeout used when communicating with the consumer
+	// group coordinator.  This value should not be too small since errors
+	// communicating with the broker will generally cause a consumer group
+	// rebalance, and it's undesirable that a transient network error intoduce
+	// that overhead.  Similarly, it should not be too large or the consumer
+	// group may be slow to respond to the coordinator failing over to another
+	// broker.
+	//
+	// Default: 5s
+	Timeout time.Duration
+
+	// connect is a function for dialing the coordinator.  This is provided for
+	// unit testing to mock broker connections.
+	connect func(dialer *Dialer, brokers ...string) (coordinator, error)
+}
+
+// Validate method validates ConsumerGroupConfig properties and sets relevant
+// defaults.
+func (config *ConsumerGroupConfig) Validate() error {
+
+	if len(config.Brokers) == 0 {
+		return errors.New("cannot create a consumer group with an empty list of broker addresses")
+	}
+
+	if len(config.Topics) == 0 {
+		return errors.New("cannot create a consumer group without a topic")
+	}
+
+	if config.ID == "" {
+		return errors.New("cannot create a consumer group without an ID")
+	}
+
+	if config.Dialer == nil {
+		config.Dialer = DefaultDialer
+	}
+
+	if len(config.GroupBalancers) == 0 {
+		config.GroupBalancers = []GroupBalancer{
+			RangeGroupBalancer{},
+			RoundRobinGroupBalancer{},
+		}
+	}
+
+	if config.HeartbeatInterval == 0 {
+		config.HeartbeatInterval = defaultHeartbeatInterval
+	}
+
+	if config.SessionTimeout == 0 {
+		config.SessionTimeout = defaultSessionTimeout
+	}
+
+	if config.PartitionWatchInterval == 0 {
+		config.PartitionWatchInterval = defaultPartitionWatchTime
+	}
+
+	if config.RebalanceTimeout == 0 {
+		config.RebalanceTimeout = defaultRebalanceTimeout
+	}
+
+	if config.JoinGroupBackoff == 0 {
+		config.JoinGroupBackoff = defaultJoinGroupBackoff
+	}
+
+	if config.RetentionTime == 0 {
+		config.RetentionTime = defaultRetentionTime
+	}
+
+	if config.HeartbeatInterval < 0 || (config.HeartbeatInterval/time.Millisecond) >= math.MaxInt32 {
+		return fmt.Errorf("HeartbeatInterval out of bounds: %d", config.HeartbeatInterval)
+	}
+
+	if config.SessionTimeout < 0 || (config.SessionTimeout/time.Millisecond) >= math.MaxInt32 {
+		return fmt.Errorf("SessionTimeout out of bounds: %d", config.SessionTimeout)
+	}
+
+	if config.RebalanceTimeout < 0 || (config.RebalanceTimeout/time.Millisecond) >= math.MaxInt32 {
+		return fmt.Errorf("RebalanceTimeout out of bounds: %d", config.RebalanceTimeout)
+	}
+
+	if config.JoinGroupBackoff < 0 || (config.JoinGroupBackoff/time.Millisecond) >= math.MaxInt32 {
+		return fmt.Errorf("JoinGroupBackoff out of bounds: %d", config.JoinGroupBackoff)
+	}
+
+	if config.RetentionTime < 0 && config.RetentionTime != defaultRetentionTime {
+		return fmt.Errorf("RetentionTime out of bounds: %d", config.RetentionTime)
+	}
+
+	if config.PartitionWatchInterval < 0 || (config.PartitionWatchInterval/time.Millisecond) >= math.MaxInt32 {
+		return fmt.Errorf("PartitionWachInterval out of bounds %d", config.PartitionWatchInterval)
+	}
+
+	if config.StartOffset == 0 {
+		config.StartOffset = FirstOffset
+	}
+
+	if config.StartOffset != FirstOffset && config.StartOffset != LastOffset {
+		return fmt.Errorf("StartOffset is not valid %d", config.StartOffset)
+	}
+
+	if config.Timeout == 0 {
+		config.Timeout = defaultTimeout
+	}
+
+	if config.connect == nil {
+		config.connect = makeConnect(*config)
+	}
+
+	return nil
+}
+
+// PartitionAssignment represents the starting state of a partition that has
+// been assigned to a consumer.
+type PartitionAssignment struct {
+	// ID is the partition ID.
+	ID int
+
+	// Offset is the initial offset at which this assignment begins.  It will
+	// either be an absolute offset if one has previously been committed for
+	// the consumer group or a relative offset such as FirstOffset when this
+	// is the first time the partition have been assigned to a member of the
+	// group.
+	Offset int64
+}
+
+// genCtx adapts the done channel of the generation to a context.Context.  This
+// is used by Generation.Start so that we can pass a context to go routines
+// instead of passing around channels.
+type genCtx struct {
+	gen *Generation
+}
+
+func (c genCtx) Done() <-chan struct{} {
+	return c.gen.done
+}
+
+func (c genCtx) Err() error {
+	select {
+	case <-c.gen.done:
+		return ErrGenerationEnded
+	default:
+		return nil
+	}
+}
+
+func (c genCtx) Deadline() (time.Time, bool) {
+	return time.Time{}, false
+}
+
+func (c genCtx) Value(interface{}) interface{} {
+	return nil
+}
+
+// Generation represents a single consumer group generation.  The generation
+// carries the topic+partition assignments for the given.  It also provides
+// facilities for committing offsets and for running functions whose lifecycles
+// are bound to the generation.
+type Generation struct {
+	// ID is the generation ID as assigned by the consumer group coordinator.
+	ID int32
+
+	// GroupID is the name of the consumer group.
+	GroupID string
+
+	// MemberID is the ID assigned to this consumer by the consumer group
+	// coordinator.
+	MemberID string
+
+	// Assignments is the initial state of this Generation.  The partition
+	// assignments are grouped by topic.
+	Assignments map[string][]PartitionAssignment
+
+	conn coordinator
+
+	// the following fields are used for process accounting to synchronize
+	// between Start and close.  lock protects all of them.  done is closed
+	// when the generation is ending in order to signal that the generation
+	// should start self-desructing.  closed protects against double-closing
+	// the done chan.  routines is a count of running go routines that have been
+	// launched by Start.  joined will be closed by the last go routine to exit.
+	lock     sync.Mutex
+	done     chan struct{}
+	closed   bool
+	routines int
+	joined   chan struct{}
+
+	retentionMillis int64
+	log             func(func(Logger))
+	logError        func(func(Logger))
+}
+
+// close stops the generation and waits for all functions launched via Start to
+// terminate.
+func (g *Generation) close() {
+	g.lock.Lock()
+	if !g.closed {
+		close(g.done)
+		g.closed = true
+	}
+	// determine whether any go routines are running that we need to wait for.
+	// waiting needs to happen outside of the critical section.
+	r := g.routines
+	g.lock.Unlock()
+
+	// NOTE: r will be zero if no go routines were ever launched.  no need to
+	// wait in that case.
+	if r > 0 {
+		<-g.joined
+	}
+}
+
+// Start launches the provided function in a go routine and adds accounting such
+// that when the function exits, it stops the current generation (if not
+// already in the process of doing so).
+//
+// The provided function MUST support cancellation via the ctx argument and exit
+// in a timely manner once the ctx is complete.  When the context is closed, the
+// context's Error() function will return ErrGenerationEnded.
+//
+// When closing out a generation, the consumer group will wait for all functions
+// launched by Start to exit before the group can move on and join the next
+// generation.  If the function does not exit promptly, it will stop forward
+// progress for this consumer and potentially cause consumer group membership
+// churn.
+func (g *Generation) Start(fn func(ctx context.Context)) {
+	g.lock.Lock()
+	defer g.lock.Unlock()
+
+	// this is an edge case: if the generation has already closed, then it's
+	// possible that the close func has already waited on outstanding go
+	// routines and exited.
+	//
+	// nonetheless, it's important to honor that the fn is invoked in case the
+	// calling function is waiting e.g. on a channel send or a WaitGroup.  in
+	// such a case, fn should immediately exit because ctx.Err() will return
+	// ErrGenerationEnded.
+	if g.closed {
+		go fn(genCtx{g})
+		return
+	}
+
+	// register that there is one more go routine that's part of this gen.
+	g.routines++
+
+	go func() {
+		fn(genCtx{g})
+		g.lock.Lock()
+		// shut down the generation as soon as one function exits.  this is
+		// different from close() in that it doesn't wait for all go routines in
+		// the generation to exit.
+		if !g.closed {
+			close(g.done)
+			g.closed = true
+		}
+		g.routines--
+		// if this was the last go routine in the generation, close the joined
+		// chan so that close() can exit if it's waiting.
+		if g.routines == 0 {
+			close(g.joined)
+		}
+		g.lock.Unlock()
+	}()
+}
+
+// CommitOffsets commits the provided topic+partition+offset combos to the
+// consumer group coordinator.  This can be used to reset the consumer to
+// explicit offsets.
+func (g *Generation) CommitOffsets(offsets map[string]map[int]int64) error {
+	if len(offsets) == 0 {
+		return nil
+	}
+
+	topics := make([]offsetCommitRequestV2Topic, 0, len(offsets))
+	for topic, partitions := range offsets {
+		t := offsetCommitRequestV2Topic{Topic: topic}
+		for partition, offset := range partitions {
+			t.Partitions = append(t.Partitions, offsetCommitRequestV2Partition{
+				Partition: int32(partition),
+				Offset:    offset,
+			})
+		}
+		topics = append(topics, t)
+	}
+
+	request := offsetCommitRequestV2{
+		GroupID:       g.GroupID,
+		GenerationID:  g.ID,
+		MemberID:      g.MemberID,
+		RetentionTime: g.retentionMillis,
+		Topics:        topics,
+	}
+
+	_, err := g.conn.offsetCommit(request)
+	if err == nil {
+		// if logging is enabled, print out the partitions that were committed.
+		g.log(func(l Logger) {
+			var report []string
+			for _, t := range request.Topics {
+				report = append(report, fmt.Sprintf("\ttopic: %s", t.Topic))
+				for _, p := range t.Partitions {
+					report = append(report, fmt.Sprintf("\t\tpartition %d: %d", p.Partition, p.Offset))
+				}
+			}
+			l.Printf("committed offsets for group %s: \n%s", g.GroupID, strings.Join(report, "\n"))
+		})
+	}
+
+	return err
+}
+
+// heartbeatLoop checks in with the consumer group coordinator at the provided
+// interval.  It exits if it ever encounters an error, which would signal the
+// end of the generation.
+func (g *Generation) heartbeatLoop(interval time.Duration) {
+	g.Start(func(ctx context.Context) {
+		g.log(func(l Logger) {
+			l.Printf("started heartbeat for group, %v [%v]", g.GroupID, interval)
+		})
+		defer g.log(func(l Logger) {
+			l.Printf("stopped heartbeat for group %s\n", g.GroupID)
+		})
+
+		ticker := time.NewTicker(interval)
+		defer ticker.Stop()
+
+		for {
+			select {
+			case <-ctx.Done():
+				return
+			case <-ticker.C:
+				_, err := g.conn.heartbeat(heartbeatRequestV0{
+					GroupID:      g.GroupID,
+					GenerationID: g.ID,
+					MemberID:     g.MemberID,
+				})
+				if err != nil {
+					return
+				}
+			}
+		}
+	})
+}
+
+// partitionWatcher queries kafka and watches for partition changes, triggering
+// a rebalance if changes are found. Similar to heartbeat it's okay to return on
+// error here as if you are unable to ask a broker for basic metadata you're in
+// a bad spot and should rebalance. Commonly you will see an error here if there
+// is a problem with the connection to the coordinator and a rebalance will
+// establish a new connection to the coordinator.
+func (g *Generation) partitionWatcher(interval time.Duration, topic string) {
+	g.Start(func(ctx context.Context) {
+		g.log(func(l Logger) {
+			l.Printf("started partition watcher for group, %v, topic %v [%v]", g.GroupID, topic, interval)
+		})
+		defer g.log(func(l Logger) {
+			l.Printf("stopped partition watcher for group, %v, topic %v", g.GroupID, topic)
+		})
+
+		ticker := time.NewTicker(interval)
+		defer ticker.Stop()
+
+		ops, err := g.conn.readPartitions(topic)
+		if err != nil {
+			g.logError(func(l Logger) {
+				l.Printf("Problem getting partitions during startup, %v\n, Returning and setting up nextGeneration", err)
+			})
+			return
+		}
+		oParts := len(ops)
+		for {
+			select {
+			case <-ctx.Done():
+				return
+			case <-ticker.C:
+				ops, err := g.conn.readPartitions(topic)
+				switch {
+				case err == nil, errors.Is(err, UnknownTopicOrPartition):
+					if len(ops) != oParts {
+						g.log(func(l Logger) {
+							l.Printf("Partition changes found, rebalancing group: %v.", g.GroupID)
+						})
+						return
+					}
+
+				default:
+					g.logError(func(l Logger) {
+						l.Printf("Problem getting partitions while checking for changes, %v", err)
+					})
+					var kafkaError Error
+					if errors.As(err, &kafkaError) {
+						continue
+					}
+					// other errors imply that we lost the connection to the coordinator, so we
+					// should abort and reconnect.
+					return
+				}
+			}
+		}
+	})
+}
+
+// coordinator is a subset of the functionality in Conn in order to facilitate
+// testing the consumer group...especially for error conditions that are
+// difficult to instigate with a live broker running in docker.
+type coordinator interface {
+	io.Closer
+	findCoordinator(findCoordinatorRequestV0) (findCoordinatorResponseV0, error)
+	joinGroup(joinGroupRequest) (joinGroupResponse, error)
+	syncGroup(syncGroupRequestV0) (syncGroupResponseV0, error)
+	leaveGroup(leaveGroupRequestV0) (leaveGroupResponseV0, error)
+	heartbeat(heartbeatRequestV0) (heartbeatResponseV0, error)
+	offsetFetch(offsetFetchRequestV1) (offsetFetchResponseV1, error)
+	offsetCommit(offsetCommitRequestV2) (offsetCommitResponseV2, error)
+	readPartitions(...string) ([]Partition, error)
+}
+
+// timeoutCoordinator wraps the Conn to ensure that every operation has a
+// deadline.  Otherwise, it would be possible for requests to block indefinitely
+// if the remote server never responds.  There are many spots where the consumer
+// group needs to interact with the broker, so it feels less error prone to
+// factor all of the deadline management into this shared location as opposed to
+// peppering it all through where the code actually interacts with the broker.
+type timeoutCoordinator struct {
+	timeout          time.Duration
+	sessionTimeout   time.Duration
+	rebalanceTimeout time.Duration
+	conn             *Conn
+}
+
+func (t *timeoutCoordinator) Close() error {
+	return t.conn.Close()
+}
+
+func (t *timeoutCoordinator) findCoordinator(req findCoordinatorRequestV0) (findCoordinatorResponseV0, error) {
+	if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil {
+		return findCoordinatorResponseV0{}, err
+	}
+	return t.conn.findCoordinator(req)
+}
+
+func (t *timeoutCoordinator) joinGroup(req joinGroupRequest) (joinGroupResponse, error) {
+	// in the case of join group, the consumer group coordinator may wait up
+	// to rebalance timeout in order to wait for all members to join.
+	if err := t.conn.SetDeadline(time.Now().Add(t.timeout + t.rebalanceTimeout)); err != nil {
+		return joinGroupResponse{}, err
+	}
+	return t.conn.joinGroup(req)
+}
+
+func (t *timeoutCoordinator) syncGroup(req syncGroupRequestV0) (syncGroupResponseV0, error) {
+	// in the case of sync group, the consumer group leader is given up to
+	// the session timeout to respond before the coordinator will give up.
+	if err := t.conn.SetDeadline(time.Now().Add(t.timeout + t.sessionTimeout)); err != nil {
+		return syncGroupResponseV0{}, err
+	}
+	return t.conn.syncGroup(req)
+}
+
+func (t *timeoutCoordinator) leaveGroup(req leaveGroupRequestV0) (leaveGroupResponseV0, error) {
+	if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil {
+		return leaveGroupResponseV0{}, err
+	}
+	return t.conn.leaveGroup(req)
+}
+
+func (t *timeoutCoordinator) heartbeat(req heartbeatRequestV0) (heartbeatResponseV0, error) {
+	if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil {
+		return heartbeatResponseV0{}, err
+	}
+	return t.conn.heartbeat(req)
+}
+
+func (t *timeoutCoordinator) offsetFetch(req offsetFetchRequestV1) (offsetFetchResponseV1, error) {
+	if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil {
+		return offsetFetchResponseV1{}, err
+	}
+	return t.conn.offsetFetch(req)
+}
+
+func (t *timeoutCoordinator) offsetCommit(req offsetCommitRequestV2) (offsetCommitResponseV2, error) {
+	if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil {
+		return offsetCommitResponseV2{}, err
+	}
+	return t.conn.offsetCommit(req)
+}
+
+func (t *timeoutCoordinator) readPartitions(topics ...string) ([]Partition, error) {
+	if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil {
+		return nil, err
+	}
+	return t.conn.ReadPartitions(topics...)
+}
+
+// NewConsumerGroup creates a new ConsumerGroup.  It returns an error if the
+// provided configuration is invalid.  It does not attempt to connect to the
+// Kafka cluster.  That happens asynchronously, and any errors will be reported
+// by Next.
+func NewConsumerGroup(config ConsumerGroupConfig) (*ConsumerGroup, error) {
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	cg := &ConsumerGroup{
+		config: config,
+		next:   make(chan *Generation),
+		errs:   make(chan error),
+		done:   make(chan struct{}),
+	}
+	cg.wg.Add(1)
+	go func() {
+		cg.run()
+		cg.wg.Done()
+	}()
+	return cg, nil
+}
+
+// ConsumerGroup models a Kafka consumer group.  A caller doesn't interact with
+// the group directly.  Rather, they interact with a Generation.  Every time a
+// member enters or exits the group, it results in a new Generation.  The
+// Generation is where partition assignments and offset management occur.
+// Callers will use Next to get a handle to the Generation.
+type ConsumerGroup struct {
+	config ConsumerGroupConfig
+	next   chan *Generation
+	errs   chan error
+
+	closeOnce sync.Once
+	wg        sync.WaitGroup
+	done      chan struct{}
+}
+
+// Close terminates the current generation by causing this member to leave and
+// releases all local resources used to participate in the consumer group.
+// Close will also end the current generation if it is still active.
+func (cg *ConsumerGroup) Close() error {
+	cg.closeOnce.Do(func() {
+		close(cg.done)
+	})
+	cg.wg.Wait()
+	return nil
+}
+
+// Next waits for the next consumer group generation.  There will never be two
+// active generations.  Next will never return a new generation until the
+// previous one has completed.
+//
+// If there are errors setting up the next generation, they will be surfaced
+// here.
+//
+// If the ConsumerGroup has been closed, then Next will return ErrGroupClosed.
+func (cg *ConsumerGroup) Next(ctx context.Context) (*Generation, error) {
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case <-cg.done:
+		return nil, ErrGroupClosed
+	case err := <-cg.errs:
+		return nil, err
+	case next := <-cg.next:
+		return next, nil
+	}
+}
+
+func (cg *ConsumerGroup) run() {
+	// the memberID is the only piece of information that is maintained across
+	// generations.  it starts empty and will be assigned on the first nextGeneration
+	// when the joinGroup request is processed.  it may change again later if
+	// the CG coordinator fails over or if the member is evicted.  otherwise, it
+	// will be constant for the lifetime of this group.
+	var memberID string
+	var err error
+	for {
+		memberID, err = cg.nextGeneration(memberID)
+
+		// backoff will be set if this go routine should sleep before continuing
+		// to the next generation.  it will be non-nil in the case of an error
+		// joining or syncing the group.
+		var backoff <-chan time.Time
+
+		switch {
+		case err == nil:
+			// no error...the previous generation finished normally.
+			continue
+
+		case errors.Is(err, ErrGroupClosed):
+			// the CG has been closed...leave the group and exit loop.
+			_ = cg.leaveGroup(memberID)
+			return
+
+		case errors.Is(err, RebalanceInProgress):
+			// in case of a RebalanceInProgress, don't leave the group or
+			// change the member ID, but report the error.  the next attempt
+			// to join the group will then be subject to the rebalance
+			// timeout, so the broker will be responsible for throttling
+			// this loop.
+
+		default:
+			// leave the group and report the error if we had gotten far
+			// enough so as to have a member ID.  also clear the member id
+			// so we don't attempt to use it again.  in order to avoid
+			// a tight error loop, backoff before the next attempt to join
+			// the group.
+			_ = cg.leaveGroup(memberID)
+			memberID = ""
+			backoff = time.After(cg.config.JoinGroupBackoff)
+		}
+		// ensure that we exit cleanly in case the CG is done and no one is
+		// waiting to receive on the unbuffered error channel.
+		select {
+		case <-cg.done:
+			return
+		case cg.errs <- err:
+		}
+		// backoff if needed, being sure to exit cleanly if the CG is done.
+		if backoff != nil {
+			select {
+			case <-cg.done:
+				// exit cleanly if the group is closed.
+				return
+			case <-backoff:
+			}
+		}
+	}
+}
+
+func (cg *ConsumerGroup) nextGeneration(memberID string) (string, error) {
+	// get a new connection to the coordinator on each loop.  the previous
+	// generation could have exited due to losing the connection, so this
+	// ensures that we always have a clean starting point.  it means we will
+	// re-connect in certain cases, but that shouldn't be an issue given that
+	// rebalances are relatively infrequent under normal operating
+	// conditions.
+	conn, err := cg.coordinator()
+	if err != nil {
+		cg.withErrorLogger(func(log Logger) {
+			log.Printf("Unable to establish connection to consumer group coordinator for group %s: %v", cg.config.ID, err)
+		})
+		return memberID, err // a prior memberID may still be valid, so don't return ""
+	}
+	defer conn.Close()
+
+	var generationID int32
+	var groupAssignments GroupMemberAssignments
+	var assignments map[string][]int32
+
+	// join group.  this will join the group and prepare assignments if our
+	// consumer is elected leader.  it may also change or assign the member ID.
+	memberID, generationID, groupAssignments, err = cg.joinGroup(conn, memberID)
+	if err != nil {
+		cg.withErrorLogger(func(log Logger) {
+			log.Printf("Failed to join group %s: %v", cg.config.ID, err)
+		})
+		return memberID, err
+	}
+	cg.withLogger(func(log Logger) {
+		log.Printf("Joined group %s as member %s in generation %d", cg.config.ID, memberID, generationID)
+	})
+
+	// sync group
+	assignments, err = cg.syncGroup(conn, memberID, generationID, groupAssignments)
+	if err != nil {
+		cg.withErrorLogger(func(log Logger) {
+			log.Printf("Failed to sync group %s: %v", cg.config.ID, err)
+		})
+		return memberID, err
+	}
+
+	// fetch initial offsets.
+	var offsets map[string]map[int]int64
+	offsets, err = cg.fetchOffsets(conn, assignments)
+	if err != nil {
+		cg.withErrorLogger(func(log Logger) {
+			log.Printf("Failed to fetch offsets for group %s: %v", cg.config.ID, err)
+		})
+		return memberID, err
+	}
+
+	// create the generation.
+	gen := Generation{
+		ID:              generationID,
+		GroupID:         cg.config.ID,
+		MemberID:        memberID,
+		Assignments:     cg.makeAssignments(assignments, offsets),
+		conn:            conn,
+		done:            make(chan struct{}),
+		joined:          make(chan struct{}),
+		retentionMillis: int64(cg.config.RetentionTime / time.Millisecond),
+		log:             cg.withLogger,
+		logError:        cg.withErrorLogger,
+	}
+
+	// spawn all of the go routines required to facilitate this generation.  if
+	// any of these functions exit, then the generation is determined to be
+	// complete.
+	gen.heartbeatLoop(cg.config.HeartbeatInterval)
+	if cg.config.WatchPartitionChanges {
+		for _, topic := range cg.config.Topics {
+			gen.partitionWatcher(cg.config.PartitionWatchInterval, topic)
+		}
+	}
+
+	// make this generation available for retrieval.  if the CG is closed before
+	// we can send it on the channel, exit.  that case is required b/c the next
+	// channel is unbuffered.  if the caller to Next has already bailed because
+	// it's own teardown logic has been invoked, this would deadlock otherwise.
+	select {
+	case <-cg.done:
+		gen.close()
+		return memberID, ErrGroupClosed // ErrGroupClosed will trigger leave logic.
+	case cg.next <- &gen:
+	}
+
+	// wait for generation to complete.  if the CG is closed before the
+	// generation is finished, exit and leave the group.
+	select {
+	case <-cg.done:
+		gen.close()
+		return memberID, ErrGroupClosed // ErrGroupClosed will trigger leave logic.
+	case <-gen.done:
+		// time for next generation!  make sure all the current go routines exit
+		// before continuing onward.
+		gen.close()
+		return memberID, nil
+	}
+}
+
+// connect returns a connection to ANY broker.
+func makeConnect(config ConsumerGroupConfig) func(dialer *Dialer, brokers ...string) (coordinator, error) {
+	return func(dialer *Dialer, brokers ...string) (coordinator, error) {
+		var err error
+		for _, broker := range brokers {
+			var conn *Conn
+			if conn, err = dialer.Dial("tcp", broker); err == nil {
+				return &timeoutCoordinator{
+					conn:             conn,
+					timeout:          config.Timeout,
+					sessionTimeout:   config.SessionTimeout,
+					rebalanceTimeout: config.RebalanceTimeout,
+				}, nil
+			}
+		}
+		return nil, err // err will be non-nil
+	}
+}
+
+// coordinator establishes a connection to the coordinator for this consumer
+// group.
+func (cg *ConsumerGroup) coordinator() (coordinator, error) {
+	// NOTE : could try to cache the coordinator to avoid the double connect
+	//        here.  since consumer group balances happen infrequently and are
+	//        an expensive operation, we're not currently optimizing that case
+	//        in order to keep the code simpler.
+	conn, err := cg.config.connect(cg.config.Dialer, cg.config.Brokers...)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	out, err := conn.findCoordinator(findCoordinatorRequestV0{
+		CoordinatorKey: cg.config.ID,
+	})
+	if err == nil && out.ErrorCode != 0 {
+		err = Error(out.ErrorCode)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	address := net.JoinHostPort(out.Coordinator.Host, strconv.Itoa(int(out.Coordinator.Port)))
+	return cg.config.connect(cg.config.Dialer, address)
+}
+
+// joinGroup attempts to join the reader to the consumer group.
+// Returns GroupMemberAssignments is this Reader was selected as
+// the leader.  Otherwise, GroupMemberAssignments will be nil.
+//
+// Possible kafka error codes returned:
+//  * GroupLoadInProgress:
+//  * GroupCoordinatorNotAvailable:
+//  * NotCoordinatorForGroup:
+//  * InconsistentGroupProtocol:
+//  * InvalidSessionTimeout:
+//  * GroupAuthorizationFailed:
+func (cg *ConsumerGroup) joinGroup(conn coordinator, memberID string) (string, int32, GroupMemberAssignments, error) {
+	request, err := cg.makeJoinGroupRequest(memberID)
+	if err != nil {
+		return "", 0, nil, err
+	}
+
+	response, err := conn.joinGroup(request)
+	if err == nil && response.ErrorCode != 0 {
+		err = Error(response.ErrorCode)
+	}
+	if err != nil {
+		return "", 0, nil, err
+	}
+
+	memberID = response.MemberID
+	generationID := response.GenerationID
+
+	cg.withLogger(func(l Logger) {
+		l.Printf("joined group %s as member %s in generation %d", cg.config.ID, memberID, generationID)
+	})
+
+	var assignments GroupMemberAssignments
+	if iAmLeader := response.MemberID == response.LeaderID; iAmLeader {
+		v, err := cg.assignTopicPartitions(conn, response)
+		if err != nil {
+			return memberID, 0, nil, err
+		}
+		assignments = v
+
+		cg.withLogger(func(l Logger) {
+			for memberID, assignment := range assignments {
+				for topic, partitions := range assignment {
+					l.Printf("assigned member/topic/partitions %v/%v/%v", memberID, topic, partitions)
+				}
+			}
+		})
+	}
+
+	cg.withLogger(func(l Logger) {
+		l.Printf("joinGroup succeeded for response, %v.  generationID=%v, memberID=%v", cg.config.ID, response.GenerationID, response.MemberID)
+	})
+
+	return memberID, generationID, assignments, nil
+}
+
+// makeJoinGroupRequestV1 handles the logic of constructing a joinGroup
+// request.
+func (cg *ConsumerGroup) makeJoinGroupRequest(memberID string) (joinGroupRequest, error) {
+	request := joinGroupRequest{
+		GroupID:          cg.config.ID,
+		MemberID:         memberID,
+		SessionTimeout:   int32(cg.config.SessionTimeout / time.Millisecond),
+		RebalanceTimeout: int32(cg.config.RebalanceTimeout / time.Millisecond),
+		ProtocolType:     defaultProtocolType,
+	}
+
+	for _, balancer := range cg.config.GroupBalancers {
+		userData, err := balancer.UserData()
+		if err != nil {
+			return joinGroupRequest{}, fmt.Errorf("unable to construct protocol metadata for member, %v: %w", balancer.ProtocolName(), err)
+		}
+		request.GroupProtocols = append(request.GroupProtocols, joinGroupRequestGroupProtocolV1{
+			ProtocolName: balancer.ProtocolName(),
+			ProtocolMetadata: groupMetadata{
+				Version:  1,
+				Topics:   cg.config.Topics,
+				UserData: userData,
+			}.bytes(),
+		})
+	}
+
+	return request, nil
+}
+
+// assignTopicPartitions uses the selected GroupBalancer to assign members to
+// their various partitions.
+func (cg *ConsumerGroup) assignTopicPartitions(conn coordinator, group joinGroupResponse) (GroupMemberAssignments, error) {
+	cg.withLogger(func(l Logger) {
+		l.Printf("selected as leader for group, %s\n", cg.config.ID)
+	})
+
+	balancer, ok := findGroupBalancer(group.GroupProtocol, cg.config.GroupBalancers)
+	if !ok {
+		// NOTE : this shouldn't happen in practice...the broker should not
+		//        return successfully from joinGroup unless all members support
+		//        at least one common protocol.
+		return nil, fmt.Errorf("unable to find selected balancer, %v, for group, %v", group.GroupProtocol, cg.config.ID)
+	}
+
+	members, err := cg.makeMemberProtocolMetadata(group.Members)
+	if err != nil {
+		return nil, err
+	}
+
+	topics := extractTopics(members)
+	partitions, err := conn.readPartitions(topics...)
+
+	// it's not a failure if the topic doesn't exist yet.  it results in no
+	// assignments for the topic.  this matches the behavior of the official
+	// clients: java, python, and librdkafka.
+	// a topic watcher can trigger a rebalance when the topic comes into being.
+	if err != nil && !errors.Is(err, UnknownTopicOrPartition) {
+		return nil, err
+	}
+
+	cg.withLogger(func(l Logger) {
+		l.Printf("using '%v' balancer to assign group, %v", group.GroupProtocol, cg.config.ID)
+		for _, member := range members {
+			l.Printf("found member: %v/%#v", member.ID, member.UserData)
+		}
+		for _, partition := range partitions {
+			l.Printf("found topic/partition: %v/%v", partition.Topic, partition.ID)
+		}
+	})
+
+	return balancer.AssignGroups(members, partitions), nil
+}
+
+// makeMemberProtocolMetadata maps encoded member metadata ([]byte) into []GroupMember.
+func (cg *ConsumerGroup) makeMemberProtocolMetadata(in []joinGroupResponseMember) ([]GroupMember, error) {
+	members := make([]GroupMember, 0, len(in))
+	for _, item := range in {
+		metadata := groupMetadata{}
+		reader := bufio.NewReader(bytes.NewReader(item.MemberMetadata))
+		if remain, err := (&metadata).readFrom(reader, len(item.MemberMetadata)); err != nil || remain != 0 {
+			return nil, fmt.Errorf("unable to read metadata for member, %v: %w", item.MemberID, err)
+		}
+
+		members = append(members, GroupMember{
+			ID:       item.MemberID,
+			Topics:   metadata.Topics,
+			UserData: metadata.UserData,
+		})
+	}
+	return members, nil
+}
+
+// syncGroup completes the consumer group nextGeneration by accepting the
+// memberAssignments (if this Reader is the leader) and returning this
+// Readers subscriptions topic => partitions
+//
+// Possible kafka error codes returned:
+//  * GroupCoordinatorNotAvailable:
+//  * NotCoordinatorForGroup:
+//  * IllegalGeneration:
+//  * RebalanceInProgress:
+//  * GroupAuthorizationFailed:
+func (cg *ConsumerGroup) syncGroup(conn coordinator, memberID string, generationID int32, memberAssignments GroupMemberAssignments) (map[string][]int32, error) {
+	request := cg.makeSyncGroupRequestV0(memberID, generationID, memberAssignments)
+	response, err := conn.syncGroup(request)
+	if err == nil && response.ErrorCode != 0 {
+		err = Error(response.ErrorCode)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	assignments := groupAssignment{}
+	reader := bufio.NewReader(bytes.NewReader(response.MemberAssignments))
+	if _, err := (&assignments).readFrom(reader, len(response.MemberAssignments)); err != nil {
+		return nil, err
+	}
+
+	if len(assignments.Topics) == 0 {
+		cg.withLogger(func(l Logger) {
+			l.Printf("received empty assignments for group, %v as member %s for generation %d", cg.config.ID, memberID, generationID)
+		})
+	}
+
+	cg.withLogger(func(l Logger) {
+		l.Printf("sync group finished for group, %v", cg.config.ID)
+	})
+
+	return assignments.Topics, nil
+}
+
+func (cg *ConsumerGroup) makeSyncGroupRequestV0(memberID string, generationID int32, memberAssignments GroupMemberAssignments) syncGroupRequestV0 {
+	request := syncGroupRequestV0{
+		GroupID:      cg.config.ID,
+		GenerationID: generationID,
+		MemberID:     memberID,
+	}
+
+	if memberAssignments != nil {
+		request.GroupAssignments = make([]syncGroupRequestGroupAssignmentV0, 0, 1)
+
+		for memberID, topics := range memberAssignments {
+			topics32 := make(map[string][]int32)
+			for topic, partitions := range topics {
+				partitions32 := make([]int32, len(partitions))
+				for i := range partitions {
+					partitions32[i] = int32(partitions[i])
+				}
+				topics32[topic] = partitions32
+			}
+			request.GroupAssignments = append(request.GroupAssignments, syncGroupRequestGroupAssignmentV0{
+				MemberID: memberID,
+				MemberAssignments: groupAssignment{
+					Version: 1,
+					Topics:  topics32,
+				}.bytes(),
+			})
+		}
+
+		cg.withLogger(func(logger Logger) {
+			logger.Printf("Syncing %d assignments for generation %d as member %s", len(request.GroupAssignments), generationID, memberID)
+		})
+	}
+
+	return request
+}
+
+func (cg *ConsumerGroup) fetchOffsets(conn coordinator, subs map[string][]int32) (map[string]map[int]int64, error) {
+	req := offsetFetchRequestV1{
+		GroupID: cg.config.ID,
+		Topics:  make([]offsetFetchRequestV1Topic, 0, len(cg.config.Topics)),
+	}
+	for _, topic := range cg.config.Topics {
+		req.Topics = append(req.Topics, offsetFetchRequestV1Topic{
+			Topic:      topic,
+			Partitions: subs[topic],
+		})
+	}
+	offsets, err := conn.offsetFetch(req)
+	if err != nil {
+		return nil, err
+	}
+
+	offsetsByTopic := make(map[string]map[int]int64)
+	for _, res := range offsets.Responses {
+		offsetsByPartition := map[int]int64{}
+		offsetsByTopic[res.Topic] = offsetsByPartition
+		for _, pr := range res.PartitionResponses {
+			for _, partition := range subs[res.Topic] {
+				if partition == pr.Partition {
+					offset := pr.Offset
+					if offset < 0 {
+						offset = cg.config.StartOffset
+					}
+					offsetsByPartition[int(partition)] = offset
+				}
+			}
+		}
+	}
+
+	return offsetsByTopic, nil
+}
+
+func (cg *ConsumerGroup) makeAssignments(assignments map[string][]int32, offsets map[string]map[int]int64) map[string][]PartitionAssignment {
+	topicAssignments := make(map[string][]PartitionAssignment)
+	for _, topic := range cg.config.Topics {
+		topicPartitions := assignments[topic]
+		topicAssignments[topic] = make([]PartitionAssignment, 0, len(topicPartitions))
+		for _, partition := range topicPartitions {
+			var offset int64
+			partitionOffsets, ok := offsets[topic]
+			if ok {
+				offset, ok = partitionOffsets[int(partition)]
+			}
+			if !ok {
+				offset = cg.config.StartOffset
+			}
+			topicAssignments[topic] = append(topicAssignments[topic], PartitionAssignment{
+				ID:     int(partition),
+				Offset: offset,
+			})
+		}
+	}
+	return topicAssignments
+}
+
+func (cg *ConsumerGroup) leaveGroup(memberID string) error {
+	// don't attempt to leave the group if no memberID was ever assigned.
+	if memberID == "" {
+		return nil
+	}
+
+	cg.withLogger(func(log Logger) {
+		log.Printf("Leaving group %s, member %s", cg.config.ID, memberID)
+	})
+
+	// IMPORTANT : leaveGroup establishes its own connection to the coordinator
+	//             because it is often called after some other operation failed.
+	//             said failure could be the result of connection-level issues,
+	//             so we want to re-establish the connection to ensure that we
+	//             are able to process the cleanup step.
+	coordinator, err := cg.coordinator()
+	if err != nil {
+		return err
+	}
+
+	_, err = coordinator.leaveGroup(leaveGroupRequestV0{
+		GroupID:  cg.config.ID,
+		MemberID: memberID,
+	})
+	if err != nil {
+		cg.withErrorLogger(func(log Logger) {
+			log.Printf("leave group failed for group, %v, and member, %v: %v", cg.config.ID, memberID, err)
+		})
+	}
+
+	_ = coordinator.Close()
+
+	return err
+}
+
+func (cg *ConsumerGroup) withLogger(do func(Logger)) {
+	if cg.config.Logger != nil {
+		do(cg.config.Logger)
+	}
+}
+
+func (cg *ConsumerGroup) withErrorLogger(do func(Logger)) {
+	if cg.config.ErrorLogger != nil {
+		do(cg.config.ErrorLogger)
+	} else {
+		cg.withLogger(do)
+	}
+}
diff -pruN 0.2.1-1.1/consumergroup_test.go 0.4.49+ds1-1/consumergroup_test.go
--- 0.2.1-1.1/consumergroup_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/consumergroup_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,686 @@
+package kafka
+
+import (
+	"context"
+	"errors"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"testing"
+	"time"
+)
+
+var _ coordinator = mockCoordinator{}
+
+type mockCoordinator struct {
+	closeFunc           func() error
+	findCoordinatorFunc func(findCoordinatorRequestV0) (findCoordinatorResponseV0, error)
+	joinGroupFunc       func(joinGroupRequest) (joinGroupResponse, error)
+	syncGroupFunc       func(syncGroupRequestV0) (syncGroupResponseV0, error)
+	leaveGroupFunc      func(leaveGroupRequestV0) (leaveGroupResponseV0, error)
+	heartbeatFunc       func(heartbeatRequestV0) (heartbeatResponseV0, error)
+	offsetFetchFunc     func(offsetFetchRequestV1) (offsetFetchResponseV1, error)
+	offsetCommitFunc    func(offsetCommitRequestV2) (offsetCommitResponseV2, error)
+	readPartitionsFunc  func(...string) ([]Partition, error)
+}
+
+func (c mockCoordinator) Close() error {
+	if c.closeFunc != nil {
+		return c.closeFunc()
+	}
+	return nil
+}
+
+func (c mockCoordinator) findCoordinator(req findCoordinatorRequestV0) (findCoordinatorResponseV0, error) {
+	if c.findCoordinatorFunc == nil {
+		return findCoordinatorResponseV0{}, errors.New("no findCoordinator behavior specified")
+	}
+	return c.findCoordinatorFunc(req)
+}
+
+func (c mockCoordinator) joinGroup(req joinGroupRequest) (joinGroupResponse, error) {
+	if c.joinGroupFunc == nil {
+		return joinGroupResponse{}, errors.New("no joinGroup behavior specified")
+	}
+	return c.joinGroupFunc(req)
+}
+
+func (c mockCoordinator) syncGroup(req syncGroupRequestV0) (syncGroupResponseV0, error) {
+	if c.syncGroupFunc == nil {
+		return syncGroupResponseV0{}, errors.New("no syncGroup behavior specified")
+	}
+	return c.syncGroupFunc(req)
+}
+
+func (c mockCoordinator) leaveGroup(req leaveGroupRequestV0) (leaveGroupResponseV0, error) {
+	if c.leaveGroupFunc == nil {
+		return leaveGroupResponseV0{}, errors.New("no leaveGroup behavior specified")
+	}
+	return c.leaveGroupFunc(req)
+}
+
+func (c mockCoordinator) heartbeat(req heartbeatRequestV0) (heartbeatResponseV0, error) {
+	if c.heartbeatFunc == nil {
+		return heartbeatResponseV0{}, errors.New("no heartbeat behavior specified")
+	}
+	return c.heartbeatFunc(req)
+}
+
+func (c mockCoordinator) offsetFetch(req offsetFetchRequestV1) (offsetFetchResponseV1, error) {
+	if c.offsetFetchFunc == nil {
+		return offsetFetchResponseV1{}, errors.New("no offsetFetch behavior specified")
+	}
+	return c.offsetFetchFunc(req)
+}
+
+func (c mockCoordinator) offsetCommit(req offsetCommitRequestV2) (offsetCommitResponseV2, error) {
+	if c.offsetCommitFunc == nil {
+		return offsetCommitResponseV2{}, errors.New("no offsetCommit behavior specified")
+	}
+	return c.offsetCommitFunc(req)
+}
+
+func (c mockCoordinator) readPartitions(topics ...string) ([]Partition, error) {
+	if c.readPartitionsFunc == nil {
+		return nil, errors.New("no Readpartitions behavior specified")
+	}
+	return c.readPartitionsFunc(topics...)
+}
+
+func TestValidateConsumerGroupConfig(t *testing.T) {
+	tests := []struct {
+		config       ConsumerGroupConfig
+		errorOccured bool
+	}{
+		{config: ConsumerGroupConfig{}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, HeartbeatInterval: 2}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", HeartbeatInterval: -1}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", SessionTimeout: -1}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", HeartbeatInterval: 2, SessionTimeout: -1}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", HeartbeatInterval: 2, SessionTimeout: 2, RebalanceTimeout: -2}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", HeartbeatInterval: 2, SessionTimeout: 2, RebalanceTimeout: 2, RetentionTime: -1}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", HeartbeatInterval: 2, SessionTimeout: 2, RebalanceTimeout: 2, RetentionTime: 1, StartOffset: 123}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", HeartbeatInterval: 2, SessionTimeout: 2, RebalanceTimeout: 2, RetentionTime: 1, PartitionWatchInterval: -1}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", HeartbeatInterval: 2, SessionTimeout: 2, RebalanceTimeout: 2, RetentionTime: 1, PartitionWatchInterval: 1, JoinGroupBackoff: -1}, errorOccured: true},
+		{config: ConsumerGroupConfig{Brokers: []string{"broker1"}, Topics: []string{"t1"}, ID: "group1", HeartbeatInterval: 2, SessionTimeout: 2, RebalanceTimeout: 2, RetentionTime: 1, PartitionWatchInterval: 1, JoinGroupBackoff: 1}, errorOccured: false},
+	}
+	for _, test := range tests {
+		err := test.config.Validate()
+		if test.errorOccured && err == nil {
+			t.Error("expected an error", test.config)
+		}
+		if !test.errorOccured && err != nil {
+			t.Error("expected no error, got", err, test.config)
+		}
+	}
+}
+
+func TestReaderAssignTopicPartitions(t *testing.T) {
+	conn := &mockCoordinator{
+		readPartitionsFunc: func(...string) ([]Partition, error) {
+			return []Partition{
+				{
+					Topic: "topic-1",
+					ID:    0,
+				},
+				{
+					Topic: "topic-1",
+					ID:    1,
+				},
+				{
+					Topic: "topic-1",
+					ID:    2,
+				},
+				{
+					Topic: "topic-2",
+					ID:    0,
+				},
+			}, nil
+		},
+	}
+
+	newJoinGroupResponse := func(topicsByMemberID map[string][]string) func(v apiVersion) joinGroupResponse {
+		return func(v apiVersion) joinGroupResponse {
+			resp := joinGroupResponse{
+				v:             v,
+				GroupProtocol: RoundRobinGroupBalancer{}.ProtocolName(),
+			}
+
+			for memberID, topics := range topicsByMemberID {
+				resp.Members = append(resp.Members, joinGroupResponseMember{
+					MemberID: memberID,
+					MemberMetadata: groupMetadata{
+						Topics: topics,
+					}.bytes(),
+				})
+			}
+
+			return resp
+		}
+	}
+
+	testCases := map[string]struct {
+		MembersFunc func(v apiVersion) joinGroupResponse
+		Assignments GroupMemberAssignments
+	}{
+		"nil": {
+			MembersFunc: newJoinGroupResponse(nil),
+			Assignments: GroupMemberAssignments{},
+		},
+		"one member, one topic": {
+			MembersFunc: newJoinGroupResponse(map[string][]string{
+				"member-1": {"topic-1"},
+			}),
+			Assignments: GroupMemberAssignments{
+				"member-1": map[string][]int{
+					"topic-1": {0, 1, 2},
+				},
+			},
+		},
+		"one member, two topics": {
+			MembersFunc: newJoinGroupResponse(map[string][]string{
+				"member-1": {"topic-1", "topic-2"},
+			}),
+			Assignments: GroupMemberAssignments{
+				"member-1": map[string][]int{
+					"topic-1": {0, 1, 2},
+					"topic-2": {0},
+				},
+			},
+		},
+		"two members, one topic": {
+			MembersFunc: newJoinGroupResponse(map[string][]string{
+				"member-1": {"topic-1"},
+				"member-2": {"topic-1"},
+			}),
+			Assignments: GroupMemberAssignments{
+				"member-1": map[string][]int{
+					"topic-1": {0, 2},
+				},
+				"member-2": map[string][]int{
+					"topic-1": {1},
+				},
+			},
+		},
+		"two members, two unshared topics": {
+			MembersFunc: newJoinGroupResponse(map[string][]string{
+				"member-1": {"topic-1"},
+				"member-2": {"topic-2"},
+			}),
+			Assignments: GroupMemberAssignments{
+				"member-1": map[string][]int{
+					"topic-1": {0, 1, 2},
+				},
+				"member-2": map[string][]int{
+					"topic-2": {0},
+				},
+			},
+		},
+	}
+
+	supportedVersions := []apiVersion{v1, v2} // joinGroup versions
+	for label, tc := range testCases {
+		for _, v := range supportedVersions {
+			t.Run(label+"_v"+strconv.Itoa(int(v)), func(t *testing.T) {
+				cg := ConsumerGroup{}
+				cg.config.GroupBalancers = []GroupBalancer{
+					RangeGroupBalancer{},
+					RoundRobinGroupBalancer{},
+				}
+				assignments, err := cg.assignTopicPartitions(conn, tc.MembersFunc(v))
+				if err != nil {
+					t.Fatalf("bad err: %v", err)
+				}
+				if !reflect.DeepEqual(tc.Assignments, assignments) {
+					t.Errorf("expected %v; got %v", tc.Assignments, assignments)
+				}
+			})
+		}
+	}
+}
+
+func TestConsumerGroup(t *testing.T) {
+	tests := []struct {
+		scenario string
+		function func(*testing.T, context.Context, *ConsumerGroup)
+	}{
+		{
+			scenario: "Next returns generations",
+			function: func(t *testing.T, ctx context.Context, cg *ConsumerGroup) {
+				gen1, err := cg.Next(ctx)
+				if gen1 == nil {
+					t.Fatalf("expected generation 1 not to be nil")
+				}
+				if err != nil {
+					t.Fatalf("expected no error, but got %+v", err)
+				}
+				// returning from this function should cause the generation to
+				// exit.
+				gen1.Start(func(context.Context) {})
+
+				// if this fails due to context timeout, it would indicate that
+				// the
+				gen2, err := cg.Next(ctx)
+				if gen2 == nil {
+					t.Fatalf("expected generation 2 not to be nil")
+				}
+				if err != nil {
+					t.Fatalf("expected no error, but got %+v", err)
+				}
+
+				if gen1.ID == gen2.ID {
+					t.Errorf("generation ID should have changed, but it stayed as %d", gen1.ID)
+				}
+				if gen1.GroupID != gen2.GroupID {
+					t.Errorf("mismatched group ID between generations: %s and %s", gen1.GroupID, gen2.GroupID)
+				}
+				if gen1.MemberID != gen2.MemberID {
+					t.Errorf("mismatched member ID between generations: %s and %s", gen1.MemberID, gen2.MemberID)
+				}
+			},
+		},
+
+		{
+			scenario: "Next returns ctx.Err() on canceled context",
+			function: func(t *testing.T, _ context.Context, cg *ConsumerGroup) {
+				ctx, cancel := context.WithCancel(context.Background())
+				cancel()
+
+				gen, err := cg.Next(ctx)
+				if gen != nil {
+					t.Errorf("expected generation to be nil")
+				}
+				if !errors.Is(err, context.Canceled) {
+					t.Errorf("expected context.Canceled, but got %+v", err)
+				}
+			},
+		},
+
+		{
+			scenario: "Next returns ErrGroupClosed on closed group",
+			function: func(t *testing.T, ctx context.Context, cg *ConsumerGroup) {
+				if err := cg.Close(); err != nil {
+					t.Fatal(err)
+				}
+				gen, err := cg.Next(ctx)
+				if gen != nil {
+					t.Errorf("expected generation to be nil")
+				}
+				if !errors.Is(err, ErrGroupClosed) {
+					t.Errorf("expected ErrGroupClosed, but got %+v", err)
+				}
+			},
+		},
+	}
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	for _, test := range tests {
+		t.Run(test.scenario, func(t *testing.T) {
+			group, err := NewConsumerGroup(ConsumerGroupConfig{
+				ID:                makeGroupID(),
+				Topics:            []string{topic},
+				Brokers:           []string{"localhost:9092"},
+				HeartbeatInterval: 2 * time.Second,
+				RebalanceTimeout:  2 * time.Second,
+				RetentionTime:     time.Hour,
+				Logger:            &testKafkaLogger{T: t},
+			})
+			if err != nil {
+				t.Fatal(err)
+			}
+			defer group.Close()
+
+			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+			defer cancel()
+
+			test.function(t, ctx, group)
+		})
+	}
+}
+
+func TestConsumerGroupErrors(t *testing.T) {
+	var left []string
+	var lock sync.Mutex
+	mc := mockCoordinator{
+		leaveGroupFunc: func(req leaveGroupRequestV0) (leaveGroupResponseV0, error) {
+			lock.Lock()
+			left = append(left, req.MemberID)
+			lock.Unlock()
+			return leaveGroupResponseV0{}, nil
+		},
+	}
+	assertLeftGroup := func(t *testing.T, memberID string) {
+		lock.Lock()
+		if !reflect.DeepEqual(left, []string{memberID}) {
+			t.Errorf("expected abc to have left group once, members left: %v", left)
+		}
+		left = left[0:0]
+		lock.Unlock()
+	}
+
+	// NOTE : the mocked behavior is accumulated across the tests, so they are
+	// 		  NOT run in parallel.  this simplifies test setup so that each test
+	// 	 	  can specify only the error behavior required and leverage setup
+	//        from previous steps.
+	tests := []struct {
+		scenario string
+		prepare  func(*mockCoordinator)
+		function func(*testing.T, context.Context, *ConsumerGroup)
+	}{
+		{
+			scenario: "fails to find coordinator (general error)",
+			prepare: func(mc *mockCoordinator) {
+				mc.findCoordinatorFunc = func(findCoordinatorRequestV0) (findCoordinatorResponseV0, error) {
+					return findCoordinatorResponseV0{}, errors.New("dial error")
+				}
+			},
+			function: func(t *testing.T, ctx context.Context, group *ConsumerGroup) {
+				gen, err := group.Next(ctx)
+				if err == nil {
+					t.Errorf("expected an error")
+				} else if err.Error() != "dial error" {
+					t.Errorf("got wrong error: %+v", err)
+				}
+				if gen != nil {
+					t.Error("expected a nil consumer group generation")
+				}
+			},
+		},
+
+		{
+			scenario: "fails to find coordinator (error code in response)",
+			prepare: func(mc *mockCoordinator) {
+				mc.findCoordinatorFunc = func(findCoordinatorRequestV0) (findCoordinatorResponseV0, error) {
+					return findCoordinatorResponseV0{
+						ErrorCode: int16(NotCoordinatorForGroup),
+					}, nil
+				}
+			},
+			function: func(t *testing.T, ctx context.Context, group *ConsumerGroup) {
+				gen, err := group.Next(ctx)
+				if err == nil {
+					t.Errorf("expected an error")
+				} else if !errors.Is(err, NotCoordinatorForGroup) {
+					t.Errorf("got wrong error: %+v", err)
+				}
+				if gen != nil {
+					t.Error("expected a nil consumer group generation")
+				}
+			},
+		},
+
+		{
+			scenario: "fails to join group (general error)",
+			prepare: func(mc *mockCoordinator) {
+				mc.findCoordinatorFunc = func(findCoordinatorRequestV0) (findCoordinatorResponseV0, error) {
+					return findCoordinatorResponseV0{
+						Coordinator: findCoordinatorResponseCoordinatorV0{
+							NodeID: 1,
+							Host:   "foo.bar.com",
+							Port:   12345,
+						},
+					}, nil
+				}
+				mc.joinGroupFunc = func(joinGroupRequest) (joinGroupResponse, error) {
+					return joinGroupResponse{}, errors.New("join group failed")
+				}
+				// NOTE : no stub for leaving the group b/c the member never joined.
+			},
+			function: func(t *testing.T, ctx context.Context, group *ConsumerGroup) {
+				gen, err := group.Next(ctx)
+				if err == nil {
+					t.Errorf("expected an error")
+				} else if err.Error() != "join group failed" {
+					t.Errorf("got wrong error: %+v", err)
+				}
+				if gen != nil {
+					t.Error("expected a nil consumer group generation")
+				}
+			},
+		},
+
+		{
+			scenario: "fails to join group (error code)",
+			prepare: func(mc *mockCoordinator) {
+				mc.findCoordinatorFunc = func(findCoordinatorRequestV0) (findCoordinatorResponseV0, error) {
+					return findCoordinatorResponseV0{
+						Coordinator: findCoordinatorResponseCoordinatorV0{
+							NodeID: 1,
+							Host:   "foo.bar.com",
+							Port:   12345,
+						},
+					}, nil
+				}
+				mc.joinGroupFunc = func(joinGroupRequest) (joinGroupResponse, error) {
+					return joinGroupResponse{
+						ErrorCode: int16(InvalidTopic),
+					}, nil
+				}
+				// NOTE : no stub for leaving the group b/c the member never joined.
+			},
+			function: func(t *testing.T, ctx context.Context, group *ConsumerGroup) {
+				gen, err := group.Next(ctx)
+				if err == nil {
+					t.Errorf("expected an error")
+				} else if !errors.Is(err, InvalidTopic) {
+					t.Errorf("got wrong error: %+v", err)
+				}
+				if gen != nil {
+					t.Error("expected a nil consumer group generation")
+				}
+			},
+		},
+
+		{
+			scenario: "fails to join group (leader, unsupported protocol)",
+			prepare: func(mc *mockCoordinator) {
+				mc.joinGroupFunc = func(joinGroupRequest) (joinGroupResponse, error) {
+					return joinGroupResponse{
+						GenerationID:  12345,
+						GroupProtocol: "foo",
+						LeaderID:      "abc",
+						MemberID:      "abc",
+					}, nil
+				}
+			},
+			function: func(t *testing.T, ctx context.Context, group *ConsumerGroup) {
+				gen, err := group.Next(ctx)
+				if err == nil {
+					t.Errorf("expected an error")
+				} else if !strings.HasPrefix(err.Error(), "unable to find selected balancer") {
+					t.Errorf("got wrong error: %+v", err)
+				}
+				if gen != nil {
+					t.Error("expected a nil consumer group generation")
+				}
+				assertLeftGroup(t, "abc")
+			},
+		},
+
+		{
+			scenario: "fails to sync group (general error)",
+			prepare: func(mc *mockCoordinator) {
+				mc.joinGroupFunc = func(joinGroupRequest) (joinGroupResponse, error) {
+					return joinGroupResponse{
+						GenerationID:  12345,
+						GroupProtocol: "range",
+						LeaderID:      "abc",
+						MemberID:      "abc",
+					}, nil
+				}
+				mc.readPartitionsFunc = func(...string) ([]Partition, error) {
+					return []Partition{}, nil
+				}
+				mc.syncGroupFunc = func(syncGroupRequestV0) (syncGroupResponseV0, error) {
+					return syncGroupResponseV0{}, errors.New("sync group failed")
+				}
+			},
+			function: func(t *testing.T, ctx context.Context, group *ConsumerGroup) {
+				gen, err := group.Next(ctx)
+				if err == nil {
+					t.Errorf("expected an error")
+				} else if err.Error() != "sync group failed" {
+					t.Errorf("got wrong error: %+v", err)
+				}
+				if gen != nil {
+					t.Error("expected a nil consumer group generation")
+				}
+				assertLeftGroup(t, "abc")
+			},
+		},
+
+		{
+			scenario: "fails to sync group (error code)",
+			prepare: func(mc *mockCoordinator) {
+				mc.syncGroupFunc = func(syncGroupRequestV0) (syncGroupResponseV0, error) {
+					return syncGroupResponseV0{
+						ErrorCode: int16(InvalidTopic),
+					}, nil
+				}
+			},
+			function: func(t *testing.T, ctx context.Context, group *ConsumerGroup) {
+				gen, err := group.Next(ctx)
+				if err == nil {
+					t.Errorf("expected an error")
+				} else if !errors.Is(err, InvalidTopic) {
+					t.Errorf("got wrong error: %+v", err)
+				}
+				if gen != nil {
+					t.Error("expected a nil consumer group generation")
+				}
+				assertLeftGroup(t, "abc")
+			},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.scenario, func(t *testing.T) {
+
+			tt.prepare(&mc)
+
+			group, err := NewConsumerGroup(ConsumerGroupConfig{
+				ID:                makeGroupID(),
+				Topics:            []string{"test"},
+				Brokers:           []string{"no-such-broker"}, // should not attempt to actually dial anything
+				HeartbeatInterval: 2 * time.Second,
+				RebalanceTimeout:  time.Second,
+				JoinGroupBackoff:  time.Second,
+				RetentionTime:     time.Hour,
+				connect: func(*Dialer, ...string) (coordinator, error) {
+					return mc, nil
+				},
+				Logger: &testKafkaLogger{T: t},
+			})
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			// these tests should all execute fairly quickly since they're
+			// mocking the coordinator.
+			ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+			defer cancel()
+
+			tt.function(t, ctx, group)
+
+			if err := group.Close(); err != nil {
+				t.Errorf("error on close: %+v", err)
+			}
+		})
+	}
+}
+
+// todo : test for multi-topic?
+
+func TestGenerationExitsOnPartitionChange(t *testing.T) {
+	var count int
+	partitions := [][]Partition{
+		{
+			Partition{
+				Topic: "topic-1",
+				ID:    0,
+			},
+		},
+		{
+			Partition{
+				Topic: "topic-1",
+				ID:    0,
+			},
+			{
+				Topic: "topic-1",
+				ID:    1,
+			},
+		},
+	}
+
+	conn := mockCoordinator{
+		readPartitionsFunc: func(...string) ([]Partition, error) {
+			p := partitions[count]
+			// cap the count at len(partitions) -1 so ReadPartitions doesn't even go out of bounds
+			// and long running tests don't fail
+			if count < len(partitions) {
+				count++
+			}
+			return p, nil
+		},
+	}
+
+	// Sadly this test is time based, so at the end will be seeing if the runGroup run to completion within the
+	// allotted time. The allotted time is 4x the PartitionWatchInterval.
+	now := time.Now()
+	watchTime := 500 * time.Millisecond
+
+	gen := Generation{
+		conn:     conn,
+		done:     make(chan struct{}),
+		joined:   make(chan struct{}),
+		log:      func(func(Logger)) {},
+		logError: func(func(Logger)) {},
+	}
+
+	done := make(chan struct{})
+	go func() {
+		gen.partitionWatcher(watchTime, "topic-1")
+		close(done)
+	}()
+
+	select {
+	case <-time.After(5 * time.Second):
+		t.Fatal("timed out waiting for partition watcher to exit")
+	case <-done:
+		if time.Since(now).Seconds() > watchTime.Seconds()*4 {
+			t.Error("partitionWatcher didn't see update")
+		}
+	}
+}
+
+func TestGenerationStartsFunctionAfterClosed(t *testing.T) {
+	gen := Generation{
+		conn:     &mockCoordinator{},
+		done:     make(chan struct{}),
+		joined:   make(chan struct{}),
+		log:      func(func(Logger)) {},
+		logError: func(func(Logger)) {},
+	}
+
+	gen.close()
+
+	ch := make(chan error)
+	gen.Start(func(ctx context.Context) {
+		<-ctx.Done()
+		ch <- ctx.Err()
+	})
+
+	select {
+	case <-time.After(time.Second):
+		t.Fatal("timed out waiting for func to run")
+	case err := <-ch:
+		if !errors.Is(err, ErrGenerationEnded) {
+			t.Fatalf("expected %v but got %v", ErrGenerationEnded, err)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/crc32.go 0.4.49+ds1-1/crc32.go
--- 0.2.1-1.1/crc32.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/crc32.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,80 +1,55 @@
 package kafka
 
 import (
-	"bytes"
 	"encoding/binary"
 	"hash/crc32"
-	"sync"
 )
 
-func crc32OfMessage(magicByte int8, attributes int8, timestamp int64, key []byte, value []byte) uint32 {
-	b := acquireCrc32Buffer()
-	b.writeInt8(magicByte)
-	b.writeInt8(attributes)
-	if magicByte != 0 {
-		b.writeInt64(timestamp)
-	}
-	b.writeBytes(key)
-	b.writeBytes(value)
-	sum := b.sum
-	releaseCrc32Buffer(b)
-	return sum
-}
-
-type crc32Buffer struct {
-	sum uint32
-	buf bytes.Buffer
+type crc32Writer struct {
+	table  *crc32.Table
+	buffer [8]byte
+	crc32  uint32
 }
 
-func (c *crc32Buffer) writeInt8(i int8) {
-	c.buf.Truncate(0)
-	c.buf.WriteByte(byte(i))
-	c.update()
+func (w *crc32Writer) update(b []byte) {
+	w.crc32 = crc32.Update(w.crc32, w.table, b)
 }
 
-func (c *crc32Buffer) writeInt32(i int32) {
-	a := [4]byte{}
-	binary.BigEndian.PutUint32(a[:], uint32(i))
-	c.buf.Truncate(0)
-	c.buf.Write(a[:])
-	c.update()
+func (w *crc32Writer) writeInt8(i int8) {
+	w.buffer[0] = byte(i)
+	w.update(w.buffer[:1])
 }
 
-func (c *crc32Buffer) writeInt64(i int64) {
-	a := [8]byte{}
-	binary.BigEndian.PutUint64(a[:], uint64(i))
-	c.buf.Truncate(0)
-	c.buf.Write(a[:])
-	c.update()
+func (w *crc32Writer) writeInt16(i int16) {
+	binary.BigEndian.PutUint16(w.buffer[:2], uint16(i))
+	w.update(w.buffer[:2])
 }
 
-func (c *crc32Buffer) writeBytes(b []byte) {
-	if b == nil {
-		c.writeInt32(-1)
-	} else {
-		c.writeInt32(int32(len(b)))
-	}
-	c.sum = crc32Update(c.sum, b)
+func (w *crc32Writer) writeInt32(i int32) {
+	binary.BigEndian.PutUint32(w.buffer[:4], uint32(i))
+	w.update(w.buffer[:4])
 }
 
-func (c *crc32Buffer) update() {
-	c.sum = crc32Update(c.sum, c.buf.Bytes())
+func (w *crc32Writer) writeInt64(i int64) {
+	binary.BigEndian.PutUint64(w.buffer[:8], uint64(i))
+	w.update(w.buffer[:8])
 }
 
-func crc32Update(sum uint32, b []byte) uint32 {
-	return crc32.Update(sum, crc32.IEEETable, b)
-}
-
-var crc32BufferPool = sync.Pool{
-	New: func() interface{} { return &crc32Buffer{} },
+func (w *crc32Writer) writeBytes(b []byte) {
+	n := len(b)
+	if b == nil {
+		n = -1
+	}
+	w.writeInt32(int32(n))
+	w.update(b)
 }
 
-func acquireCrc32Buffer() *crc32Buffer {
-	c := crc32BufferPool.Get().(*crc32Buffer)
-	c.sum = 0
-	return c
+func (w *crc32Writer) Write(b []byte) (int, error) {
+	w.update(b)
+	return len(b), nil
 }
 
-func releaseCrc32Buffer(b *crc32Buffer) {
-	crc32BufferPool.Put(b)
+func (w *crc32Writer) WriteString(s string) (int, error) {
+	w.update([]byte(s))
+	return len(s), nil
 }
diff -pruN 0.2.1-1.1/crc32_test.go 0.4.49+ds1-1/crc32_test.go
--- 0.2.1-1.1/crc32_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/crc32_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,15 +1,12 @@
 package kafka
 
 import (
-	"bufio"
 	"bytes"
 	"hash/crc32"
 	"testing"
 )
 
 func TestMessageCRC32(t *testing.T) {
-	t.Parallel()
-
 	m := message{
 		MagicByte: 1,
 		Timestamp: 42,
@@ -18,15 +15,14 @@ func TestMessageCRC32(t *testing.T) {
 	}
 
 	b := &bytes.Buffer{}
-	w := bufio.NewWriter(b)
-	write(w, m)
-	w.Flush()
+	w := &writeBuffer{w: b}
+	w.write(m)
 
-	h := crc32.NewIEEE()
+	h := crc32.New(crc32.IEEETable)
 	h.Write(b.Bytes()[4:])
 
 	sum1 := h.Sum32()
-	sum2 := uint32(m.crc32())
+	sum2 := uint32(m.crc32(&crc32Writer{table: crc32.IEEETable}))
 
 	if sum1 != sum2 {
 		t.Error("bad CRC32:")
diff -pruN 0.2.1-1.1/createacls.go 0.4.49+ds1-1/createacls.go
--- 0.2.1-1.1/createacls.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/createacls.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,202 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strings"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/createacls"
+)
+
+// CreateACLsRequest represents a request sent to a kafka broker to add
+// new ACLs.
+type CreateACLsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of ACL to create.
+	ACLs []ACLEntry
+}
+
+// CreateACLsResponse represents a response from a kafka broker to an ACL
+// creation request.
+type CreateACLsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// List of errors that occurred while attempting to create
+	// the ACLs.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Errors []error
+}
+
+type ACLPermissionType int8
+
+const (
+	ACLPermissionTypeUnknown ACLPermissionType = 0
+	ACLPermissionTypeAny     ACLPermissionType = 1
+	ACLPermissionTypeDeny    ACLPermissionType = 2
+	ACLPermissionTypeAllow   ACLPermissionType = 3
+)
+
+func (apt ACLPermissionType) String() string {
+	mapping := map[ACLPermissionType]string{
+		ACLPermissionTypeUnknown: "Unknown",
+		ACLPermissionTypeAny:     "Any",
+		ACLPermissionTypeDeny:    "Deny",
+		ACLPermissionTypeAllow:   "Allow",
+	}
+	s, ok := mapping[apt]
+	if !ok {
+		s = mapping[ACLPermissionTypeUnknown]
+	}
+	return s
+}
+
+// MarshalText transforms an ACLPermissionType into its string representation.
+func (apt ACLPermissionType) MarshalText() ([]byte, error) {
+	return []byte(apt.String()), nil
+}
+
+// UnmarshalText takes a string representation of the resource type and converts it to an ACLPermissionType.
+func (apt *ACLPermissionType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]ACLPermissionType{
+		"unknown": ACLPermissionTypeUnknown,
+		"any":     ACLPermissionTypeAny,
+		"deny":    ACLPermissionTypeDeny,
+		"allow":   ACLPermissionTypeAllow,
+	}
+	parsed, ok := mapping[normalized]
+	if !ok {
+		*apt = ACLPermissionTypeUnknown
+		return fmt.Errorf("cannot parse %s as an ACLPermissionType", normalized)
+	}
+	*apt = parsed
+	return nil
+}
+
+type ACLOperationType int8
+
+const (
+	ACLOperationTypeUnknown         ACLOperationType = 0
+	ACLOperationTypeAny             ACLOperationType = 1
+	ACLOperationTypeAll             ACLOperationType = 2
+	ACLOperationTypeRead            ACLOperationType = 3
+	ACLOperationTypeWrite           ACLOperationType = 4
+	ACLOperationTypeCreate          ACLOperationType = 5
+	ACLOperationTypeDelete          ACLOperationType = 6
+	ACLOperationTypeAlter           ACLOperationType = 7
+	ACLOperationTypeDescribe        ACLOperationType = 8
+	ACLOperationTypeClusterAction   ACLOperationType = 9
+	ACLOperationTypeDescribeConfigs ACLOperationType = 10
+	ACLOperationTypeAlterConfigs    ACLOperationType = 11
+	ACLOperationTypeIdempotentWrite ACLOperationType = 12
+)
+
+func (aot ACLOperationType) String() string {
+	mapping := map[ACLOperationType]string{
+		ACLOperationTypeUnknown:         "Unknown",
+		ACLOperationTypeAny:             "Any",
+		ACLOperationTypeAll:             "All",
+		ACLOperationTypeRead:            "Read",
+		ACLOperationTypeWrite:           "Write",
+		ACLOperationTypeCreate:          "Create",
+		ACLOperationTypeDelete:          "Delete",
+		ACLOperationTypeAlter:           "Alter",
+		ACLOperationTypeDescribe:        "Describe",
+		ACLOperationTypeClusterAction:   "ClusterAction",
+		ACLOperationTypeDescribeConfigs: "DescribeConfigs",
+		ACLOperationTypeAlterConfigs:    "AlterConfigs",
+		ACLOperationTypeIdempotentWrite: "IdempotentWrite",
+	}
+	s, ok := mapping[aot]
+	if !ok {
+		s = mapping[ACLOperationTypeUnknown]
+	}
+	return s
+}
+
+// MarshalText transforms an ACLOperationType into its string representation.
+func (aot ACLOperationType) MarshalText() ([]byte, error) {
+	return []byte(aot.String()), nil
+}
+
+// UnmarshalText takes a string representation of the resource type and converts it to an ACLPermissionType.
+func (aot *ACLOperationType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]ACLOperationType{
+		"unknown":         ACLOperationTypeUnknown,
+		"any":             ACLOperationTypeAny,
+		"all":             ACLOperationTypeAll,
+		"read":            ACLOperationTypeRead,
+		"write":           ACLOperationTypeWrite,
+		"create":          ACLOperationTypeCreate,
+		"delete":          ACLOperationTypeDelete,
+		"alter":           ACLOperationTypeAlter,
+		"describe":        ACLOperationTypeDescribe,
+		"clusteraction":   ACLOperationTypeClusterAction,
+		"describeconfigs": ACLOperationTypeDescribeConfigs,
+		"alterconfigs":    ACLOperationTypeAlterConfigs,
+		"idempotentwrite": ACLOperationTypeIdempotentWrite,
+	}
+	parsed, ok := mapping[normalized]
+	if !ok {
+		*aot = ACLOperationTypeUnknown
+		return fmt.Errorf("cannot parse %s as an ACLOperationType", normalized)
+	}
+	*aot = parsed
+	return nil
+
+}
+
+type ACLEntry struct {
+	ResourceType        ResourceType
+	ResourceName        string
+	ResourcePatternType PatternType
+	Principal           string
+	Host                string
+	Operation           ACLOperationType
+	PermissionType      ACLPermissionType
+}
+
+// CreateACLs sends ACLs creation request to a kafka broker and returns the
+// response.
+func (c *Client) CreateACLs(ctx context.Context, req *CreateACLsRequest) (*CreateACLsResponse, error) {
+	acls := make([]createacls.RequestACLs, 0, len(req.ACLs))
+
+	for _, acl := range req.ACLs {
+		acls = append(acls, createacls.RequestACLs{
+			ResourceType:        int8(acl.ResourceType),
+			ResourceName:        acl.ResourceName,
+			ResourcePatternType: int8(acl.ResourcePatternType),
+			Principal:           acl.Principal,
+			Host:                acl.Host,
+			Operation:           int8(acl.Operation),
+			PermissionType:      int8(acl.PermissionType),
+		})
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &createacls.Request{
+		Creations: acls,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).CreateACLs: %w", err)
+	}
+
+	res := m.(*createacls.Response)
+	ret := &CreateACLsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Errors:   make([]error, 0, len(res.Results)),
+	}
+
+	for _, t := range res.Results {
+		ret.Errors = append(ret.Errors, makeError(t.ErrorCode, t.ErrorMessage))
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/createacls_test.go 0.4.49+ds1-1/createacls_test.go
--- 0.2.1-1.1/createacls_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/createacls_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,86 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientCreateACLs(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.0.1") {
+		return
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	group := makeGroupID()
+
+	createRes, err := client.CreateACLs(context.Background(), &CreateACLsRequest{
+		ACLs: []ACLEntry{
+			{
+				Principal:           "User:alice",
+				PermissionType:      ACLPermissionTypeAllow,
+				Operation:           ACLOperationTypeRead,
+				ResourceType:        ResourceTypeTopic,
+				ResourcePatternType: PatternTypeLiteral,
+				ResourceName:        topic,
+				Host:                "*",
+			},
+			{
+				Principal:           "User:bob",
+				PermissionType:      ACLPermissionTypeAllow,
+				Operation:           ACLOperationTypeRead,
+				ResourceType:        ResourceTypeGroup,
+				ResourcePatternType: PatternTypeLiteral,
+				ResourceName:        group,
+				Host:                "*",
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	for _, err := range createRes.Errors {
+		if err != nil {
+			t.Error(err)
+		}
+	}
+}
+
+func TestACLPermissionTypeMarshal(t *testing.T) {
+	for i := ACLPermissionTypeUnknown; i <= ACLPermissionTypeAllow; i++ {
+		text, err := i.MarshalText()
+		if err != nil {
+			t.Errorf("couldn't marshal %d to text: %s", i, err)
+		}
+		var got ACLPermissionType
+		err = got.UnmarshalText(text)
+		if err != nil {
+			t.Errorf("couldn't unmarshal %s to ACLPermissionType: %s", text, err)
+		}
+		if got != i {
+			t.Errorf("got %d, want %d", got, i)
+		}
+	}
+}
+
+func TestACLOperationTypeMarshal(t *testing.T) {
+	for i := ACLOperationTypeUnknown; i <= ACLOperationTypeIdempotentWrite; i++ {
+		text, err := i.MarshalText()
+		if err != nil {
+			t.Errorf("couldn't marshal %d to text: %s", i, err)
+		}
+		var got ACLOperationType
+		err = got.UnmarshalText(text)
+		if err != nil {
+			t.Errorf("couldn't unmarshal %s to ACLOperationType: %s", text, err)
+		}
+		if got != i {
+			t.Errorf("got %d, want %d", got, i)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/createpartitions.go 0.4.49+ds1-1/createpartitions.go
--- 0.2.1-1.1/createpartitions.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/createpartitions.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,103 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/createpartitions"
+)
+
+// CreatePartitionsRequest represents a request sent to a kafka broker to create
+// and update topic parititions.
+type CreatePartitionsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of topics to create and their configuration.
+	Topics []TopicPartitionsConfig
+
+	// When set to true, topics are not created but the configuration is
+	// validated as if they were.
+	ValidateOnly bool
+}
+
+// CreatePartitionsResponse represents a response from a kafka broker to a partition
+// creation request.
+type CreatePartitionsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Mapping of topic names to errors that occurred while attempting to create
+	// the topics.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Errors map[string]error
+}
+
+// CreatePartitions sends a partitions creation request to a kafka broker and returns the
+// response.
+func (c *Client) CreatePartitions(ctx context.Context, req *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
+	topics := make([]createpartitions.RequestTopic, len(req.Topics))
+
+	for i, t := range req.Topics {
+		topics[i] = createpartitions.RequestTopic{
+			Name:        t.Name,
+			Count:       t.Count,
+			Assignments: t.assignments(),
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &createpartitions.Request{
+		Topics:       topics,
+		TimeoutMs:    c.timeoutMs(ctx, defaultCreatePartitionsTimeout),
+		ValidateOnly: req.ValidateOnly,
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).CreatePartitions: %w", err)
+	}
+
+	res := m.(*createpartitions.Response)
+	ret := &CreatePartitionsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Errors:   make(map[string]error, len(res.Results)),
+	}
+
+	for _, t := range res.Results {
+		ret.Errors[t.Name] = makeError(t.ErrorCode, t.ErrorMessage)
+	}
+
+	return ret, nil
+}
+
+type TopicPartitionsConfig struct {
+	// Topic name
+	Name string
+
+	// Topic partition's count.
+	Count int32
+
+	// TopicPartitionAssignments among kafka brokers for this topic partitions.
+	TopicPartitionAssignments []TopicPartitionAssignment
+}
+
+func (t *TopicPartitionsConfig) assignments() []createpartitions.RequestAssignment {
+	if len(t.TopicPartitionAssignments) == 0 {
+		return nil
+	}
+	assignments := make([]createpartitions.RequestAssignment, len(t.TopicPartitionAssignments))
+	for i, a := range t.TopicPartitionAssignments {
+		assignments[i] = createpartitions.RequestAssignment{
+			BrokerIDs: a.BrokerIDs,
+		}
+	}
+	return assignments
+}
+
+type TopicPartitionAssignment struct {
+	// Broker IDs
+	BrokerIDs []int32
+}
diff -pruN 0.2.1-1.1/createpartitions_test.go 0.4.49+ds1-1/createpartitions_test.go
--- 0.2.1-1.1/createpartitions_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/createpartitions_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,73 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientCreatePartitions(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("1.0.1") {
+		return
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	res, err := client.CreatePartitions(context.Background(), &CreatePartitionsRequest{
+		Topics: []TopicPartitionsConfig{
+			{
+				Name:  topic,
+				Count: 2,
+				TopicPartitionAssignments: []TopicPartitionAssignment{
+					{
+						BrokerIDs: []int32{1},
+					},
+				},
+			},
+		},
+		ValidateOnly: false,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := res.Errors[topic]; err != nil {
+		t.Error(err)
+	}
+}
+
+func TestClientCreatePartitionsNoAssignments(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("1.0.1") {
+		return
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	res, err := client.CreatePartitions(context.Background(), &CreatePartitionsRequest{
+		Topics: []TopicPartitionsConfig{
+			{
+				Name:  topic,
+				Count: 2,
+			},
+		},
+		ValidateOnly: false,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := res.Errors[topic]; err != nil {
+		t.Error(err)
+	}
+}
diff -pruN 0.2.1-1.1/createtopics.go 0.4.49+ds1-1/createtopics.go
--- 0.2.1-1.1/createtopics.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/createtopics.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,19 +2,92 @@ package kafka
 
 import (
 	"bufio"
+	"context"
+	"fmt"
+	"net"
 	"time"
+
+	"github.com/segmentio/kafka-go/protocol/createtopics"
 )
 
+// CreateTopicsRequest represents a request sent to a kafka broker to create
+// new topics.
+type CreateTopicsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of topics to create and their configuration.
+	Topics []TopicConfig
+
+	// When set to true, topics are not created but the configuration is
+	// validated as if they were.
+	//
+	// This field will be ignored if the kafka broker did not support the
+	// CreateTopics API in version 1 or above.
+	ValidateOnly bool
+}
+
+// CreateTopicsResponse represents a response from a kafka broker to a topic
+// creation request.
+type CreateTopicsResponse struct {
+	// The amount of time that the broker throttled the request.
+	//
+	// This field will be zero if the kafka broker did not support the
+	// CreateTopics API in version 2 or above.
+	Throttle time.Duration
+
+	// Mapping of topic names to errors that occurred while attempting to create
+	// the topics.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Errors map[string]error
+}
+
+// CreateTopics sends a topic creation request to a kafka broker and returns the
+// response.
+func (c *Client) CreateTopics(ctx context.Context, req *CreateTopicsRequest) (*CreateTopicsResponse, error) {
+	topics := make([]createtopics.RequestTopic, len(req.Topics))
+
+	for i, t := range req.Topics {
+		topics[i] = createtopics.RequestTopic{
+			Name:              t.Topic,
+			NumPartitions:     int32(t.NumPartitions),
+			ReplicationFactor: int16(t.ReplicationFactor),
+			Assignments:       t.assignments(),
+			Configs:           t.configs(),
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &createtopics.Request{
+		Topics:       topics,
+		TimeoutMs:    c.timeoutMs(ctx, defaultCreateTopicsTimeout),
+		ValidateOnly: req.ValidateOnly,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).CreateTopics: %w", err)
+	}
+
+	res := m.(*createtopics.Response)
+	ret := &CreateTopicsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Errors:   make(map[string]error, len(res.Topics)),
+	}
+
+	for _, t := range res.Topics {
+		ret.Errors[t.Name] = makeError(t.ErrorCode, t.ErrorMessage)
+	}
+
+	return ret, nil
+}
+
 type ConfigEntry struct {
 	ConfigName  string
 	ConfigValue string
 }
 
 func (c ConfigEntry) toCreateTopicsRequestV0ConfigEntry() createTopicsRequestV0ConfigEntry {
-	return createTopicsRequestV0ConfigEntry{
-		ConfigName:  c.ConfigName,
-		ConfigValue: c.ConfigValue,
-	}
+	return createTopicsRequestV0ConfigEntry(c)
 }
 
 type createTopicsRequestV0ConfigEntry struct {
@@ -27,36 +100,62 @@ func (t createTopicsRequestV0ConfigEntry
 		sizeofString(t.ConfigValue)
 }
 
-func (t createTopicsRequestV0ConfigEntry) writeTo(w *bufio.Writer) {
-	writeString(w, t.ConfigName)
-	writeString(w, t.ConfigValue)
+func (t createTopicsRequestV0ConfigEntry) writeTo(wb *writeBuffer) {
+	wb.writeString(t.ConfigName)
+	wb.writeString(t.ConfigValue)
 }
 
 type ReplicaAssignment struct {
 	Partition int
-	Replicas  int
+	// The list of brokers where the partition should be allocated. There must
+	// be as many entries in thie list as there are replicas of the partition.
+	// The first entry represents the broker that will be the preferred leader
+	// for the partition.
+	//
+	// This field changed in 0.4 from `int` to `[]int`. It was invalid to pass
+	// a single integer as this is supposed to be a list. While this introduces
+	// a breaking change, it probably never worked before.
+	Replicas []int
+}
+
+func (a *ReplicaAssignment) partitionIndex() int32 {
+	return int32(a.Partition)
+}
+
+func (a *ReplicaAssignment) brokerIDs() []int32 {
+	if len(a.Replicas) == 0 {
+		return nil
+	}
+	replicas := make([]int32, len(a.Replicas))
+	for i, r := range a.Replicas {
+		replicas[i] = int32(r)
+	}
+	return replicas
 }
 
 func (a ReplicaAssignment) toCreateTopicsRequestV0ReplicaAssignment() createTopicsRequestV0ReplicaAssignment {
 	return createTopicsRequestV0ReplicaAssignment{
 		Partition: int32(a.Partition),
-		Replicas:  int32(a.Replicas),
+		Replicas:  a.brokerIDs(),
 	}
 }
 
 type createTopicsRequestV0ReplicaAssignment struct {
 	Partition int32
-	Replicas  int32
+	Replicas  []int32
 }
 
 func (t createTopicsRequestV0ReplicaAssignment) size() int32 {
 	return sizeofInt32(t.Partition) +
-		sizeofInt32(t.Replicas)
+		(int32(len(t.Replicas)+1) * sizeofInt32(0)) // N+1 because the array length is a int32
 }
 
-func (t createTopicsRequestV0ReplicaAssignment) writeTo(w *bufio.Writer) {
-	writeInt32(w, t.Partition)
-	writeInt32(w, t.Replicas)
+func (t createTopicsRequestV0ReplicaAssignment) writeTo(wb *writeBuffer) {
+	wb.writeInt32(t.Partition)
+	wb.writeInt32(int32(len(t.Replicas)))
+	for _, r := range t.Replicas {
+		wb.writeInt32(int32(r))
+	}
 }
 
 type TopicConfig struct {
@@ -77,14 +176,42 @@ type TopicConfig struct {
 	ConfigEntries []ConfigEntry
 }
 
+func (t *TopicConfig) assignments() []createtopics.RequestAssignment {
+	if len(t.ReplicaAssignments) == 0 {
+		return nil
+	}
+	assignments := make([]createtopics.RequestAssignment, len(t.ReplicaAssignments))
+	for i, a := range t.ReplicaAssignments {
+		assignments[i] = createtopics.RequestAssignment{
+			PartitionIndex: a.partitionIndex(),
+			BrokerIDs:      a.brokerIDs(),
+		}
+	}
+	return assignments
+}
+
+func (t *TopicConfig) configs() []createtopics.RequestConfig {
+	if len(t.ConfigEntries) == 0 {
+		return nil
+	}
+	configs := make([]createtopics.RequestConfig, len(t.ConfigEntries))
+	for i, c := range t.ConfigEntries {
+		configs[i] = createtopics.RequestConfig{
+			Name:  c.ConfigName,
+			Value: c.ConfigValue,
+		}
+	}
+	return configs
+}
+
 func (t TopicConfig) toCreateTopicsRequestV0Topic() createTopicsRequestV0Topic {
-	var requestV0ReplicaAssignments []createTopicsRequestV0ReplicaAssignment
+	requestV0ReplicaAssignments := make([]createTopicsRequestV0ReplicaAssignment, 0, len(t.ReplicaAssignments))
 	for _, a := range t.ReplicaAssignments {
 		requestV0ReplicaAssignments = append(
 			requestV0ReplicaAssignments,
 			a.toCreateTopicsRequestV0ReplicaAssignment())
 	}
-	var requestV0ConfigEntries []createTopicsRequestV0ConfigEntry
+	requestV0ConfigEntries := make([]createTopicsRequestV0ConfigEntry, 0, len(t.ConfigEntries))
 	for _, c := range t.ConfigEntries {
 		requestV0ConfigEntries = append(
 			requestV0ConfigEntries,
@@ -126,16 +253,18 @@ func (t createTopicsRequestV0Topic) size
 		sizeofArray(len(t.ConfigEntries), func(i int) int32 { return t.ConfigEntries[i].size() })
 }
 
-func (t createTopicsRequestV0Topic) writeTo(w *bufio.Writer) {
-	writeString(w, t.Topic)
-	writeInt32(w, t.NumPartitions)
-	writeInt16(w, t.ReplicationFactor)
-	writeArray(w, len(t.ReplicaAssignments), func(i int) { t.ReplicaAssignments[i].writeTo(w) })
-	writeArray(w, len(t.ConfigEntries), func(i int) { t.ConfigEntries[i].writeTo(w) })
+func (t createTopicsRequestV0Topic) writeTo(wb *writeBuffer) {
+	wb.writeString(t.Topic)
+	wb.writeInt32(t.NumPartitions)
+	wb.writeInt16(t.ReplicationFactor)
+	wb.writeArray(len(t.ReplicaAssignments), func(i int) { t.ReplicaAssignments[i].writeTo(wb) })
+	wb.writeArray(len(t.ConfigEntries), func(i int) { t.ConfigEntries[i].writeTo(wb) })
 }
 
 // See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics
-type createTopicsRequestV0 struct {
+type createTopicsRequest struct {
+	v apiVersion // v0, v1, v2
+
 	// Topics contains n array of single topic creation requests. Can not
 	// have multiple entries for the same topic.
 	Topics []createTopicsRequestV0Topic
@@ -143,86 +272,136 @@ type createTopicsRequestV0 struct {
 	// Timeout ms to wait for a topic to be completely created on the
 	// controller node. Values <= 0 will trigger topic creation and return immediately
 	Timeout int32
+
+	// If true, check that the topics can be created as specified, but don't create anything.
+	// Internal use only for Kafka 4.0 support.
+	ValidateOnly bool
 }
 
-func (t createTopicsRequestV0) size() int32 {
-	return sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) +
+func (t createTopicsRequest) size() int32 {
+	sz := sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) +
 		sizeofInt32(t.Timeout)
+	if t.v >= v1 {
+		sz += 1
+	}
+	return sz
 }
 
-func (t createTopicsRequestV0) writeTo(w *bufio.Writer) {
-	writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) })
-	writeInt32(w, t.Timeout)
+func (t createTopicsRequest) writeTo(wb *writeBuffer) {
+	wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) })
+	wb.writeInt32(t.Timeout)
+	if t.v >= v1 {
+		wb.writeBool(t.ValidateOnly)
+	}
 }
 
-type createTopicsResponseV0TopicError struct {
+type createTopicsResponseTopicError struct {
+	v apiVersion
+
 	// Topic name
 	Topic string
 
 	// ErrorCode holds response error code
 	ErrorCode int16
+
+	// ErrorMessage holds response error message string
+	ErrorMessage string
 }
 
-func (t createTopicsResponseV0TopicError) size() int32 {
-	return sizeofString(t.Topic) +
+func (t createTopicsResponseTopicError) size() int32 {
+	sz := sizeofString(t.Topic) +
 		sizeofInt16(t.ErrorCode)
+	if t.v >= v1 {
+		sz += sizeofString(t.ErrorMessage)
+	}
+	return sz
 }
 
-func (t createTopicsResponseV0TopicError) writeTo(w *bufio.Writer) {
-	writeString(w, t.Topic)
-	writeInt16(w, t.ErrorCode)
+func (t createTopicsResponseTopicError) writeTo(wb *writeBuffer) {
+	wb.writeString(t.Topic)
+	wb.writeInt16(t.ErrorCode)
+	if t.v >= v1 {
+		wb.writeString(t.ErrorMessage)
+	}
 }
 
-func (t *createTopicsResponseV0TopicError) readFrom(r *bufio.Reader, size int) (remain int, err error) {
+func (t *createTopicsResponseTopicError) readFrom(r *bufio.Reader, size int) (remain int, err error) {
 	if remain, err = readString(r, size, &t.Topic); err != nil {
 		return
 	}
 	if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil {
 		return
 	}
+	if t.v >= v1 {
+		if remain, err = readString(r, remain, &t.ErrorMessage); err != nil {
+			return
+		}
+	}
 	return
 }
 
 // See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics
-type createTopicsResponseV0 struct {
-	TopicErrors []createTopicsResponseV0TopicError
+type createTopicsResponse struct {
+	v apiVersion
+
+	ThrottleTime int32 // v2+
+	TopicErrors  []createTopicsResponseTopicError
 }
 
-func (t createTopicsResponseV0) size() int32 {
-	return sizeofArray(len(t.TopicErrors), func(i int) int32 { return t.TopicErrors[i].size() })
+func (t createTopicsResponse) size() int32 {
+	sz := sizeofArray(len(t.TopicErrors), func(i int) int32 { return t.TopicErrors[i].size() })
+	if t.v >= v2 {
+		sz += sizeofInt32(t.ThrottleTime)
+	}
+	return sz
 }
 
-func (t createTopicsResponseV0) writeTo(w *bufio.Writer) {
-	writeArray(w, len(t.TopicErrors), func(i int) { t.TopicErrors[i].writeTo(w) })
+func (t createTopicsResponse) writeTo(wb *writeBuffer) {
+	if t.v >= v2 {
+		wb.writeInt32(t.ThrottleTime)
+	}
+	wb.writeArray(len(t.TopicErrors), func(i int) { t.TopicErrors[i].writeTo(wb) })
 }
 
-func (t *createTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
+func (t *createTopicsResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) {
 	fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
-		var topic createTopicsResponseV0TopicError
-		if fnRemain, fnErr = (&topic).readFrom(r, size); err != nil {
+		topic := createTopicsResponseTopicError{v: t.v}
+		if fnRemain, fnErr = (&topic).readFrom(r, size); fnErr != nil {
 			return
 		}
 		t.TopicErrors = append(t.TopicErrors, topic)
 		return
 	}
-	if remain, err = readArrayWith(r, size, fn); err != nil {
+	remain = size
+	if t.v >= v2 {
+		if remain, err = readInt32(r, size, &t.ThrottleTime); err != nil {
+			return
+		}
+	}
+	if remain, err = readArrayWith(r, remain, fn); err != nil {
 		return
 	}
 
 	return
 }
 
-func (c *Conn) createTopics(request createTopicsRequestV0) (createTopicsResponseV0, error) {
-	var response createTopicsResponseV0
+func (c *Conn) createTopics(request createTopicsRequest) (createTopicsResponse, error) {
+	version, err := c.negotiateVersion(createTopics, v0, v1, v2)
+	if err != nil {
+		return createTopicsResponse{}, err
+	}
 
-	err := c.writeOperation(
+	request.v = version
+	response := createTopicsResponse{v: version}
+
+	err = c.writeOperation(
 		func(deadline time.Time, id int32) error {
 			if request.Timeout == 0 {
 				now := time.Now()
 				deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
 				request.Timeout = milliseconds(deadlineToTimeout(deadline, now))
 			}
-			return c.writeRequest(createTopicsRequest, v0, id, request)
+			return c.writeRequest(createTopics, version, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -234,6 +413,9 @@ func (c *Conn) createTopics(request crea
 		return response, err
 	}
 	for _, tr := range response.TopicErrors {
+		if tr.ErrorCode == int16(TopicAlreadyExists) {
+			continue
+		}
 		if tr.ErrorCode != 0 {
 			return response, Error(tr.ErrorCode)
 		}
@@ -246,22 +428,15 @@ func (c *Conn) createTopics(request crea
 // operational semantics. In other words, if CreateTopics is invoked with a
 // configuration for an existing topic, it will have no effect.
 func (c *Conn) CreateTopics(topics ...TopicConfig) error {
-	var requestV0Topics []createTopicsRequestV0Topic
+	requestV0Topics := make([]createTopicsRequestV0Topic, 0, len(topics))
 	for _, t := range topics {
 		requestV0Topics = append(
 			requestV0Topics,
 			t.toCreateTopicsRequestV0Topic())
 	}
 
-	_, err := c.createTopics(createTopicsRequestV0{
+	_, err := c.createTopics(createTopicsRequest{
 		Topics: requestV0Topics,
 	})
-
-	switch err {
-	case TopicAlreadyExists:
-		// ok
-		return nil
-	default:
-		return err
-	}
+	return err
 }
diff -pruN 0.2.1-1.1/createtopics_test.go 0.4.49+ds1-1/createtopics_test.go
--- 0.2.1-1.1/createtopics_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/createtopics_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,37 +3,194 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"errors"
+	"net"
 	"reflect"
+	"strconv"
 	"testing"
 )
 
-func TestCreateTopicsResponseV0(t *testing.T) {
-	item := createTopicsResponseV0{
-		TopicErrors: []createTopicsResponseV0TopicError{
+func TestConnCreateTopics(t *testing.T) {
+	topic1 := makeTopic()
+	topic2 := makeTopic()
+
+	conn, err := DialContext(context.Background(), "tcp", "localhost:9092")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	defer func() {
+		err := conn.Close()
+		if err != nil {
+			t.Fatalf("failed to close connection: %v", err)
+		}
+	}()
+
+	controller, _ := conn.Controller()
+
+	controllerConn, err := Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer controllerConn.Close()
+
+	err = controllerConn.CreateTopics(TopicConfig{
+		Topic:             topic1,
+		NumPartitions:     1,
+		ReplicationFactor: 1,
+	})
+	if err != nil {
+		t.Fatalf("unexpected error creating topic: %s", err.Error())
+	}
+
+	err = controllerConn.CreateTopics(TopicConfig{
+		Topic:             topic1,
+		NumPartitions:     1,
+		ReplicationFactor: 1,
+	})
+
+	// Duplicate topic should not return an error
+	if err != nil {
+		t.Fatalf("unexpected error creating duplicate topic topic: %v", err)
+	}
+
+	err = controllerConn.CreateTopics(
+		TopicConfig{
+			Topic:             topic1,
+			NumPartitions:     1,
+			ReplicationFactor: 1,
+		},
+		TopicConfig{
+			Topic:             topic2,
+			NumPartitions:     1,
+			ReplicationFactor: 1,
+		},
+		TopicConfig{
+			Topic:             topic2,
+			NumPartitions:     1,
+			ReplicationFactor: 1,
+		},
+	)
+
+	if err == nil {
+		t.Fatal("CreateTopics should have returned an error for invalid requests")
+	}
+
+	if !errors.Is(err, InvalidRequest) {
+		t.Fatalf("expected invalid request: %v", err)
+	}
+
+	deleteTopic(t, topic1)
+}
+
+func TestClientCreateTopics(t *testing.T) {
+	const (
+		topic1 = "client-topic-1"
+		topic2 = "client-topic-2"
+		topic3 = "client-topic-3"
+	)
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	config := []ConfigEntry{{
+		ConfigName:  "retention.ms",
+		ConfigValue: "3600000",
+	}}
+
+	res, err := client.CreateTopics(context.Background(), &CreateTopicsRequest{
+		Topics: []TopicConfig{
 			{
-				Topic:     "topic",
-				ErrorCode: 2,
+				Topic:             topic1,
+				NumPartitions:     -1,
+				ReplicationFactor: -1,
+				ReplicaAssignments: []ReplicaAssignment{
+					{
+						Partition: 0,
+						Replicas:  []int{1},
+					},
+					{
+						Partition: 1,
+						Replicas:  []int{1},
+					},
+					{
+						Partition: 2,
+						Replicas:  []int{1},
+					},
+				},
+				ConfigEntries: config,
+			},
+			{
+				Topic:             topic2,
+				NumPartitions:     2,
+				ReplicationFactor: 1,
+				ConfigEntries:     config,
+			},
+			{
+				Topic:             topic3,
+				NumPartitions:     1,
+				ReplicationFactor: 1,
+				ConfigEntries:     config,
 			},
 		},
+	})
+	if err != nil {
+		t.Fatal(err)
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
-	item.writeTo(w)
-	w.Flush()
+	defer deleteTopic(t, topic1, topic2, topic3)
 
-	var found createTopicsResponseV0
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
-	if err != nil {
-		t.Error(err)
-		t.FailNow()
+	expectTopics := map[string]struct{}{
+		topic1: {},
+		topic2: {},
+		topic3: {},
+	}
+
+	for topic, error := range res.Errors {
+		delete(expectTopics, topic)
+
+		if error != nil {
+			t.Errorf("%s => %s", topic, error)
+		}
+	}
+
+	for topic := range expectTopics {
+		t.Errorf("topic missing in response: %s", topic)
 	}
-	if remain != 0 {
-		t.Errorf("expected 0 remain, got %v", remain)
-		t.FailNow()
-	}
-	if !reflect.DeepEqual(item, found) {
-		t.Error("expected item and found to be the same")
-		t.FailNow()
+}
+
+func TestCreateTopicsResponse(t *testing.T) {
+	supportedVersions := []apiVersion{v0, v1, v2}
+	for _, v := range supportedVersions {
+		item := createTopicsResponse{
+			v: v,
+			TopicErrors: []createTopicsResponseTopicError{
+				{
+					v:         v,
+					Topic:     "topic",
+					ErrorCode: 2,
+				},
+			},
+		}
+
+		b := bytes.NewBuffer(nil)
+		w := &writeBuffer{w: b}
+		item.writeTo(w)
+
+		found := createTopicsResponse{v: v}
+		remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
+		if err != nil {
+			t.Error(err)
+			t.FailNow()
+		}
+		if remain != 0 {
+			t.Errorf("expected 0 remain, got %v", remain)
+			t.FailNow()
+		}
+		if !reflect.DeepEqual(item, found) {
+			t.Error("expected item and found to be the same")
+			t.FailNow()
+		}
 	}
 }
diff -pruN 0.2.1-1.1/debian/changelog 0.4.49+ds1-1/debian/changelog
--- 0.2.1-1.1/debian/changelog	2021-01-02 01:16:11.000000000 +0000
+++ 0.4.49+ds1-1/debian/changelog	2025-09-21 22:22:34.000000000 +0000
@@ -1,9 +1,21 @@
-golang-github-segmentio-kafka-go (0.2.1-1.1) unstable; urgency=medium
+golang-github-segmentio-kafka-go (0.4.49+ds1-1) unstable; urgency=low
 
-  * Non maintainer upload by the Reproducible Builds team.
-  * No source change upload to rebuild on buildd with .buildinfo files.
+  [ Debian Janitor ]
+  * Set upstream metadata fields: Bug-Database, Bug-Submit, Repository,
+    Repository-Browse.
+  * Apply multi-arch hint.
 
- -- Holger Levsen <holger@debian.org>  Sat, 02 Jan 2021 02:16:11 +0100
+  [ Lena Voytek ]
+  * New upstream version 0.4.49
+  * Update uploader (Closes: #940419).
+  * Use dh-sequence-golang.
+  * Use default salsa-ci.
+  * Update copyright file excludes.
+  * Update build dependencies to match new upstream.
+  * Repack to remove vendored go-xerial-snappy dependency.
+  * Do not build examples in package.
+
+ -- Lena Voytek <lena@voytek.dev>  Sun, 21 Sep 2025 18:22:34 -0400
 
 golang-github-segmentio-kafka-go (0.2.1-1) unstable; urgency=medium
 
diff -pruN 0.2.1-1.1/debian/compat 0.4.49+ds1-1/debian/compat
--- 0.2.1-1.1/debian/compat	2018-11-20 17:06:41.000000000 +0000
+++ 0.4.49+ds1-1/debian/compat	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-11
diff -pruN 0.2.1-1.1/debian/control 0.4.49+ds1-1/debian/control
--- 0.2.1-1.1/debian/control	2018-11-20 17:06:41.000000000 +0000
+++ 0.4.49+ds1-1/debian/control	2025-09-21 22:22:34.000000000 +0000
@@ -2,14 +2,17 @@ Source: golang-github-segmentio-kafka-go
 Section: devel
 Priority: optional
 Maintainer: Debian Go Packaging Team <team+pkg-go@tracker.debian.org>
-Uploaders: Alexandre Viau <aviau@debian.org>
-Build-Depends: debhelper (>= 11),
-               dh-golang,
+Uploaders: Lena Voytek <lena@voytek.dev>
+Build-Depends: debhelper-compat (= 13),
+               dh-sequence-golang,
                golang-any,
+               golang-github-aws-aws-sdk-go-dev,
+               golang-github-aws-aws-sdk-go-v2-dev,
+               golang-github-klauspost-compress-dev,
                golang-github-pierrec-lz4-dev,
-               golang-golang-x-net-dev,
-               golang-snappy-go-dev
-Standards-Version: 4.2.1
+               golang-github-xdg-go-scram-dev,
+               golang-golang-x-net-dev
+Standards-Version: 4.7.2
 Homepage: https://github.com/segmentio/kafka-go
 Vcs-Browser: https://salsa.debian.org/go-team/packages/golang-github-segmentio-kafka-go
 Vcs-Git: https://salsa.debian.org/go-team/packages/golang-github-segmentio-kafka-go.git
@@ -20,8 +23,8 @@ Package: golang-github-segmentio-kafka-g
 Architecture: all
 Depends: ${misc:Depends},
          golang-github-pierrec-lz4-dev,
-         golang-golang-x-net-dev,
-         golang-snappy-go-dev
+         golang-golang-x-net-dev
+Multi-Arch: foreign
 Description: Kafka library in Go
  Package kafka provides low and high level APIs for interacting with Kafka,
  mirroring concepts and implementing interfaces of the Go standard library
diff -pruN 0.2.1-1.1/debian/copyright 0.4.49+ds1-1/debian/copyright
--- 0.2.1-1.1/debian/copyright	2018-11-20 17:06:41.000000000 +0000
+++ 0.4.49+ds1-1/debian/copyright	2025-09-21 22:22:34.000000000 +0000
@@ -2,7 +2,7 @@ Format: https://www.debian.org/doc/packa
 Upstream-Name: kafka-go
 Source: https://github.com/segmentio/kafka-go
 Files-Excluded:
-  Godeps/_workspace
+  compress/snappy/go-xerial-snappy
 
 Files: *
 Copyright: 2017 Segment
@@ -10,6 +10,7 @@ License: Expat
 
 Files: debian/*
 Copyright: 2018 Alexandre Viau <aviau@debian.org>
+           2025 Lena Voytek <lena@voytek.dev>
 License: Expat
 Comment: Debian packaging is licensed under the same terms as upstream
 
diff -pruN 0.2.1-1.1/debian/gitlab-ci.yml 0.4.49+ds1-1/debian/gitlab-ci.yml
--- 0.2.1-1.1/debian/gitlab-ci.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/debian/gitlab-ci.yml	2025-09-21 22:22:34.000000000 +0000
@@ -0,0 +1,47 @@
+# DO NOT MODIFY
+# This file was automatically generated from the authoritative copy at:
+# https://salsa.debian.org/go-team/infra/pkg-go-tools/blob/master/config/gitlabciyml.go
+---
+stages:
+  - test
+  - package
+
+include:
+  - project: go-team/infra/pkg-go-tools
+    ref: master
+    file: pipeline/test-archive.yml
+    # Run the Go team CI only in the go-team project that has access to GitLab
+    # CI runners tagged 'go-ci'
+    rules:
+      - if: $CI_PROJECT_ROOT_NAMESPACE  == "go-team"
+
+Salsa CI:
+  stage: package
+  trigger:
+    include:
+      - project: salsa-ci-team/pipeline
+        ref: master
+        file: recipes/debian.yml
+    strategy: depend
+  rules:
+    # Do not create a pipeline for tags unless SALSA_CI_ENABLE_PIPELINE_ON_TAGS is set
+    - if: $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/
+      when: never
+    # Avoid duplicated pipelines, do not run detached pipelines
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+      when: never
+    # Run Salsa CI only if the Play button is pressed on the pipeline
+    - if: $CI_PIPELINE_SOURCE == "push"
+      when: manual
+  variables:
+    SALSA_CI_DISABLE_REPROTEST: 1 # Disable to save CI runner resources
+
+# If Salsa CI is not running at
+# https://salsa.debian.org/%{project_path}/-/pipelines, ensure that
+# https://salsa.debian.org/%{project_path}/-/settings/ci_cd has in field "CI/CD
+# configuration file" the same filename as this file.
+#
+# If Salsa CI is running, but first job is stuck because the project doesn't
+# have any runners online assigned to it, ensure that
+# https://salsa.debian.org/%{project_path}/-/settings/ci_cd has under "Runners"
+# the setting for "Enable instance runners for this project" enabled.
diff -pruN 0.2.1-1.1/debian/rules 0.4.49+ds1-1/debian/rules
--- 0.2.1-1.1/debian/rules	2018-11-20 17:06:41.000000000 +0000
+++ 0.4.49+ds1-1/debian/rules	2025-09-21 22:22:34.000000000 +0000
@@ -1,7 +1,10 @@
 #!/usr/bin/make -f
 
+# Ignore mongo example, missing build dependency
+export DH_GOLANG_EXCLUDES := examples/
+
 %:
-	dh $@ --buildsystem=golang --with=golang
+	dh $@ --builddirectory=_build --buildsystem=golang
 
 override_dh_auto_test:
 	# Tests require kafka
diff -pruN 0.2.1-1.1/debian/upstream/metadata 0.4.49+ds1-1/debian/upstream/metadata
--- 0.2.1-1.1/debian/upstream/metadata	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/debian/upstream/metadata	2025-09-21 22:22:34.000000000 +0000
@@ -0,0 +1,5 @@
+---
+Bug-Database: https://github.com/segmentio/kafka-go/issues
+Bug-Submit: https://github.com/segmentio/kafka-go/issues/new
+Repository: https://github.com/segmentio/kafka-go.git
+Repository-Browse: https://github.com/segmentio/kafka-go
diff -pruN 0.2.1-1.1/debian/watch 0.4.49+ds1-1/debian/watch
--- 0.2.1-1.1/debian/watch	2018-11-20 17:06:41.000000000 +0000
+++ 0.4.49+ds1-1/debian/watch	2025-09-21 22:22:34.000000000 +0000
@@ -1,4 +1,5 @@
 version=4
 opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/golang-github-segmentio-kafka-go-\$1\.tar\.gz/,\
-uversionmangle=s/(\d)[_\.\-\+]?(RC|rc|pre|dev|beta|alpha)[.]?(\d*)$/\$1~\$2\$3/ \
+uversionmangle=s/(\d)[_\.\-\+]?(RC|rc|pre|dev|beta|alpha)[.]?(\d*)$/\$1~\$2\$3/,\
+dversionmangle=s/\+ds\d*$//,repacksuffix=+ds1" \
   https://github.com/segmentio/kafka-go/tags .*/v?(\d\S*)\.tar\.gz
diff -pruN 0.2.1-1.1/deleteacls.go 0.4.49+ds1-1/deleteacls.go
--- 0.2.1-1.1/deleteacls.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/deleteacls.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,114 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/deleteacls"
+)
+
+// DeleteACLsRequest represents a request sent to a kafka broker to delete
+// ACLs.
+type DeleteACLsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of ACL filters to use for deletion.
+	Filters []DeleteACLsFilter
+}
+
+type DeleteACLsFilter struct {
+	ResourceTypeFilter        ResourceType
+	ResourceNameFilter        string
+	ResourcePatternTypeFilter PatternType
+	PrincipalFilter           string
+	HostFilter                string
+	Operation                 ACLOperationType
+	PermissionType            ACLPermissionType
+}
+
+// DeleteACLsResponse represents a response from a kafka broker to an ACL
+// deletion request.
+type DeleteACLsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// List of the results from the deletion request.
+	Results []DeleteACLsResult
+}
+
+type DeleteACLsResult struct {
+	Error        error
+	MatchingACLs []DeleteACLsMatchingACLs
+}
+
+type DeleteACLsMatchingACLs struct {
+	Error               error
+	ResourceType        ResourceType
+	ResourceName        string
+	ResourcePatternType PatternType
+	Principal           string
+	Host                string
+	Operation           ACLOperationType
+	PermissionType      ACLPermissionType
+}
+
+// DeleteACLs sends ACLs deletion request to a kafka broker and returns the
+// response.
+func (c *Client) DeleteACLs(ctx context.Context, req *DeleteACLsRequest) (*DeleteACLsResponse, error) {
+	filters := make([]deleteacls.RequestFilter, 0, len(req.Filters))
+
+	for _, filter := range req.Filters {
+		filters = append(filters, deleteacls.RequestFilter{
+			ResourceTypeFilter:        int8(filter.ResourceTypeFilter),
+			ResourceNameFilter:        filter.ResourceNameFilter,
+			ResourcePatternTypeFilter: int8(filter.ResourcePatternTypeFilter),
+			PrincipalFilter:           filter.PrincipalFilter,
+			HostFilter:                filter.HostFilter,
+			Operation:                 int8(filter.Operation),
+			PermissionType:            int8(filter.PermissionType),
+		})
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &deleteacls.Request{
+		Filters: filters,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).DeleteACLs: %w", err)
+	}
+
+	res := m.(*deleteacls.Response)
+
+	results := make([]DeleteACLsResult, 0, len(res.FilterResults))
+
+	for _, result := range res.FilterResults {
+		matchingACLs := make([]DeleteACLsMatchingACLs, 0, len(result.MatchingACLs))
+
+		for _, matchingACL := range result.MatchingACLs {
+			matchingACLs = append(matchingACLs, DeleteACLsMatchingACLs{
+				Error:               makeError(matchingACL.ErrorCode, matchingACL.ErrorMessage),
+				ResourceType:        ResourceType(matchingACL.ResourceType),
+				ResourceName:        matchingACL.ResourceName,
+				ResourcePatternType: PatternType(matchingACL.ResourcePatternType),
+				Principal:           matchingACL.Principal,
+				Host:                matchingACL.Host,
+				Operation:           ACLOperationType(matchingACL.Operation),
+				PermissionType:      ACLPermissionType(matchingACL.PermissionType),
+			})
+		}
+
+		results = append(results, DeleteACLsResult{
+			Error:        makeError(result.ErrorCode, result.ErrorMessage),
+			MatchingACLs: matchingACLs,
+		})
+	}
+
+	ret := &DeleteACLsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Results:  results,
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/deleteacls_test.go 0.4.49+ds1-1/deleteacls_test.go
--- 0.2.1-1.1/deleteacls_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/deleteacls_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,112 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestClientDeleteACLs(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.0.1") {
+		return
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	group := makeGroupID()
+
+	createRes, err := client.CreateACLs(context.Background(), &CreateACLsRequest{
+		ACLs: []ACLEntry{
+			{
+				Principal:           "User:alice",
+				PermissionType:      ACLPermissionTypeAllow,
+				Operation:           ACLOperationTypeRead,
+				ResourceType:        ResourceTypeTopic,
+				ResourcePatternType: PatternTypeLiteral,
+				ResourceName:        topic,
+				Host:                "*",
+			},
+			{
+				Principal:           "User:bob",
+				PermissionType:      ACLPermissionTypeAllow,
+				Operation:           ACLOperationTypeRead,
+				ResourceType:        ResourceTypeGroup,
+				ResourcePatternType: PatternTypeLiteral,
+				ResourceName:        group,
+				Host:                "*",
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	for _, err := range createRes.Errors {
+		if err != nil {
+			t.Error(err)
+		}
+	}
+
+	deleteResp, err := client.DeleteACLs(context.Background(), &DeleteACLsRequest{
+		Filters: []DeleteACLsFilter{
+			{
+				ResourceTypeFilter:        ResourceTypeTopic,
+				ResourceNameFilter:        topic,
+				ResourcePatternTypeFilter: PatternTypeLiteral,
+				Operation:                 ACLOperationTypeRead,
+				PermissionType:            ACLPermissionTypeAllow,
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedDeleteResp := DeleteACLsResponse{
+		Throttle: 0,
+		Results: []DeleteACLsResult{
+			{
+				Error: makeError(0, ""),
+				MatchingACLs: []DeleteACLsMatchingACLs{
+					{
+						Error:               makeError(0, ""),
+						ResourceType:        ResourceTypeTopic,
+						ResourceName:        topic,
+						ResourcePatternType: PatternTypeLiteral,
+						Principal:           "User:alice",
+						Host:                "*",
+						Operation:           ACLOperationTypeRead,
+						PermissionType:      ACLPermissionTypeAllow,
+					},
+				},
+			},
+		},
+	}
+
+	assert.Equal(t, expectedDeleteResp, *deleteResp)
+
+	describeResp, err := client.DescribeACLs(context.Background(), &DescribeACLsRequest{
+		Filter: ACLFilter{
+			ResourceTypeFilter:        ResourceTypeTopic,
+			ResourceNameFilter:        topic,
+			ResourcePatternTypeFilter: PatternTypeLiteral,
+			Operation:                 ACLOperationTypeRead,
+			PermissionType:            ACLPermissionTypeAllow,
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedDescribeResp := DescribeACLsResponse{
+		Throttle:  0,
+		Error:     makeError(0, ""),
+		Resources: []ACLResource{},
+	}
+
+	assert.Equal(t, expectedDescribeResp, *describeResp)
+}
diff -pruN 0.2.1-1.1/deletegroups.go 0.4.49+ds1-1/deletegroups.go
--- 0.2.1-1.1/deletegroups.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/deletegroups.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,60 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/deletegroups"
+)
+
+// DeleteGroupsRequest represents a request sent to a kafka broker to delete
+// consumer groups.
+type DeleteGroupsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// Identifiers of groups to delete.
+	GroupIDs []string
+}
+
+// DeleteGroupsResponse represents a response from a kafka broker to a consumer group
+// deletion request.
+type DeleteGroupsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Mapping of group ids to errors that occurred while attempting to delete those groups.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Errors map[string]error
+}
+
+// DeleteGroups sends a delete groups request and returns the response. The request is sent to the group coordinator of the first group
+// of the request. All deleted groups must be managed by the same group coordinator.
+func (c *Client) DeleteGroups(
+	ctx context.Context,
+	req *DeleteGroupsRequest,
+) (*DeleteGroupsResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &deletegroups.Request{
+		GroupIDs: req.GroupIDs,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).DeleteGroups: %w", err)
+	}
+
+	r := m.(*deletegroups.Response)
+
+	ret := &DeleteGroupsResponse{
+		Throttle: makeDuration(r.ThrottleTimeMs),
+		Errors:   make(map[string]error, len(r.Responses)),
+	}
+
+	for _, t := range r.Responses {
+		ret.Errors[t.GroupID] = makeError(t.ErrorCode, "")
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/deletegroups_test.go 0.4.49+ds1-1/deletegroups_test.go
--- 0.2.1-1.1/deletegroups_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/deletegroups_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,80 @@
+package kafka
+
+import (
+	"context"
+	"errors"
+	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientDeleteGroups(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("1.1.0") {
+		t.Skip("Skipping test because kafka version is not high enough.")
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+
+	groupID := makeGroupID()
+
+	group, err := NewConsumerGroup(ConsumerGroupConfig{
+		ID:                groupID,
+		Topics:            []string{topic},
+		Brokers:           []string{"localhost:9092"},
+		HeartbeatInterval: 2 * time.Second,
+		RebalanceTimeout:  2 * time.Second,
+		RetentionTime:     time.Hour,
+		Logger:            &testKafkaLogger{T: t},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer group.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+	defer cancel()
+
+	gen, err := group.Next(ctx)
+	if gen == nil {
+		t.Fatalf("expected generation 1 not to be nil")
+	}
+	if err != nil {
+		t.Fatalf("expected no error, but got %+v", err)
+	}
+
+	// delete not empty group
+	res, err := client.DeleteGroups(ctx, &DeleteGroupsRequest{
+		GroupIDs: []string{groupID},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !errors.Is(res.Errors[groupID], NonEmptyGroup) {
+		t.Fatalf("expected NonEmptyGroup error, but got %+v", res.Errors[groupID])
+	}
+
+	err = group.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// delete empty group
+	res, err = client.DeleteGroups(ctx, &DeleteGroupsRequest{
+		GroupIDs: []string{groupID},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err = res.Errors[groupID]; err != nil {
+		t.Error(err)
+	}
+}
diff -pruN 0.2.1-1.1/deletetopics.go 0.4.49+ds1-1/deletetopics.go
--- 0.2.1-1.1/deletetopics.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/deletetopics.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,11 +2,72 @@ package kafka
 
 import (
 	"bufio"
+	"context"
+	"fmt"
+	"net"
 	"time"
+
+	"github.com/segmentio/kafka-go/protocol/deletetopics"
 )
 
+// DeleteTopicsRequest represents a request sent to a kafka broker to delete
+// topics.
+type DeleteTopicsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// Names of topics to delete.
+	Topics []string
+}
+
+// DeleteTopicsResponse represents a response from a kafka broker to a topic
+// deletion request.
+type DeleteTopicsResponse struct {
+	// The amount of time that the broker throttled the request.
+	//
+	// This field will be zero if the kafka broker did not support the
+	// DeleteTopics API in version 1 or above.
+	Throttle time.Duration
+
+	// Mapping of topic names to errors that occurred while attempting to delete
+	// the topics.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Errors map[string]error
+}
+
+// DeleteTopics sends a topic deletion request to a kafka broker and returns the
+// response.
+func (c *Client) DeleteTopics(ctx context.Context, req *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &deletetopics.Request{
+		TopicNames: req.Topics,
+		TimeoutMs:  c.timeoutMs(ctx, defaultDeleteTopicsTimeout),
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).DeleteTopics: %w", err)
+	}
+
+	res := m.(*deletetopics.Response)
+	ret := &DeleteTopicsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Errors:   make(map[string]error, len(res.Responses)),
+	}
+
+	for _, t := range res.Responses {
+		if t.ErrorCode == 0 {
+			ret.Errors[t.Name] = nil
+		} else {
+			ret.Errors[t.Name] = Error(t.ErrorCode)
+		}
+	}
+
+	return ret, nil
+}
+
 // See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics
-type deleteTopicsRequestV1 struct {
+type deleteTopicsRequest struct {
 	// Topics holds the topic names
 	Topics []string
 
@@ -16,55 +77,61 @@ type deleteTopicsRequestV1 struct {
 	Timeout int32
 }
 
-func (t deleteTopicsRequestV1) size() int32 {
+func (t deleteTopicsRequest) size() int32 {
 	return sizeofStringArray(t.Topics) +
 		sizeofInt32(t.Timeout)
 }
 
-func (t deleteTopicsRequestV1) writeTo(w *bufio.Writer) {
-	writeStringArray(w, t.Topics)
-	writeInt32(w, t.Timeout)
+func (t deleteTopicsRequest) writeTo(wb *writeBuffer) {
+	wb.writeStringArray(t.Topics)
+	wb.writeInt32(t.Timeout)
 }
 
-type deleteTopicsResponseV1 struct {
-	// ThrottleTimeMS holds the duration in milliseconds for which the request
-	// was throttled due to quota violation (Zero if the request did not violate
-	// any quota)
-	ThrottleTimeMS int32
+type deleteTopicsResponse struct {
+	v apiVersion // v0, v1
 
+	ThrottleTime int32
 	// TopicErrorCodes holds per topic error codes
-	TopicErrorCodes []deleteTopicsResponseV1TopicErrorCode
+	TopicErrorCodes []deleteTopicsResponseV0TopicErrorCode
 }
 
-func (t deleteTopicsResponseV1) size() int32 {
-	return sizeofInt32(t.ThrottleTimeMS) +
-		sizeofArray(len(t.TopicErrorCodes), func(i int) int32 { return t.TopicErrorCodes[i].size() })
+func (t deleteTopicsResponse) size() int32 {
+	sz := sizeofArray(len(t.TopicErrorCodes), func(i int) int32 { return t.TopicErrorCodes[i].size() })
+	if t.v >= v1 {
+		sz += sizeofInt32(t.ThrottleTime)
+	}
+	return sz
 }
 
-func (t *deleteTopicsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
-	if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil {
-		return
-	}
+func (t *deleteTopicsResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) {
 	fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) {
-		var item deleteTopicsResponseV1TopicErrorCode
-		if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil {
+		var item deleteTopicsResponseV0TopicErrorCode
+		if fnRemain, fnErr = (&item).readFrom(withReader, withSize); fnErr != nil {
 			return
 		}
 		t.TopicErrorCodes = append(t.TopicErrorCodes, item)
 		return
 	}
+	remain = size
+	if t.v >= v1 {
+		if remain, err = readInt32(r, size, &t.ThrottleTime); err != nil {
+			return
+		}
+	}
 	if remain, err = readArrayWith(r, remain, fn); err != nil {
 		return
 	}
 	return
 }
 
-func (t deleteTopicsResponseV1) writeTo(w *bufio.Writer) {
-	writeInt32(w, t.ThrottleTimeMS)
-	writeArray(w, len(t.TopicErrorCodes), func(i int) { t.TopicErrorCodes[i].writeTo(w) })
+func (t deleteTopicsResponse) writeTo(wb *writeBuffer) {
+	if t.v >= v1 {
+		wb.writeInt32(t.ThrottleTime)
+	}
+	wb.writeArray(len(t.TopicErrorCodes), func(i int) { t.TopicErrorCodes[i].writeTo(wb) })
 }
 
-type deleteTopicsResponseV1TopicErrorCode struct {
+type deleteTopicsResponseV0TopicErrorCode struct {
 	// Topic holds the topic name
 	Topic string
 
@@ -72,12 +139,12 @@ type deleteTopicsResponseV1TopicErrorCod
 	ErrorCode int16
 }
 
-func (t deleteTopicsResponseV1TopicErrorCode) size() int32 {
+func (t deleteTopicsResponseV0TopicErrorCode) size() int32 {
 	return sizeofString(t.Topic) +
 		sizeofInt16(t.ErrorCode)
 }
 
-func (t *deleteTopicsResponseV1TopicErrorCode) readFrom(r *bufio.Reader, size int) (remain int, err error) {
+func (t *deleteTopicsResponseV0TopicErrorCode) readFrom(r *bufio.Reader, size int) (remain int, err error) {
 	if remain, err = readString(r, size, &t.Topic); err != nil {
 		return
 	}
@@ -87,24 +154,32 @@ func (t *deleteTopicsResponseV1TopicErro
 	return
 }
 
-func (t deleteTopicsResponseV1TopicErrorCode) writeTo(w *bufio.Writer) {
-	writeString(w, t.Topic)
-	writeInt16(w, t.ErrorCode)
+func (t deleteTopicsResponseV0TopicErrorCode) writeTo(wb *writeBuffer) {
+	wb.writeString(t.Topic)
+	wb.writeInt16(t.ErrorCode)
 }
 
 // deleteTopics deletes the specified topics.
 //
 // See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics
-func (c *Conn) deleteTopics(request deleteTopicsRequestV1) (deleteTopicsResponseV1, error) {
-	var response deleteTopicsResponseV1
-	err := c.writeOperation(
+func (c *Conn) deleteTopics(request deleteTopicsRequest) (deleteTopicsResponse, error) {
+	version, err := c.negotiateVersion(deleteTopics, v0, v1)
+	if err != nil {
+		return deleteTopicsResponse{}, err
+	}
+
+	response := deleteTopicsResponse{
+		v: version,
+	}
+
+	err = c.writeOperation(
 		func(deadline time.Time, id int32) error {
 			if request.Timeout == 0 {
 				now := time.Now()
 				deadline = adjustDeadlineForRTT(deadline, now, defaultRTT)
 				request.Timeout = milliseconds(deadlineToTimeout(deadline, now))
 			}
-			return c.writeRequest(deleteTopicsRequest, v1, id, request)
+			return c.writeRequest(deleteTopics, version, id, request)
 		},
 		func(deadline time.Time, size int) error {
 			return expectZeroSize(func() (remain int, err error) {
@@ -113,7 +188,7 @@ func (c *Conn) deleteTopics(request dele
 		},
 	)
 	if err != nil {
-		return deleteTopicsResponseV1{}, err
+		return deleteTopicsResponse{}, err
 	}
 	for _, c := range response.TopicErrorCodes {
 		if c.ErrorCode != 0 {
diff -pruN 0.2.1-1.1/deletetopics_test.go 0.4.49+ds1-1/deletetopics_test.go
--- 0.2.1-1.1/deletetopics_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/deletetopics_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,14 +3,34 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
 	"reflect"
 	"testing"
 )
 
+func TestClientDeleteTopics(t *testing.T) {
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+
+	res, err := client.DeleteTopics(context.Background(), &DeleteTopicsRequest{
+		Topics: []string{topic},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := res.Errors[topic]; err != nil {
+		t.Error(err)
+	}
+}
+
 func TestDeleteTopicsResponseV1(t *testing.T) {
-	item := deleteTopicsResponseV1{
-		ThrottleTimeMS: 123,
-		TopicErrorCodes: []deleteTopicsResponseV1TopicErrorCode{
+	item := deleteTopicsResponse{
+		TopicErrorCodes: []deleteTopicsResponseV0TopicErrorCode{
 			{
 				Topic:     "a",
 				ErrorCode: 7,
@@ -18,13 +38,12 @@ func TestDeleteTopicsResponseV1(t *testi
 		},
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
-	var found deleteTopicsResponseV1
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	var found deleteTopicsResponse
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Fatal(err)
 	}
diff -pruN 0.2.1-1.1/describeacls.go 0.4.49+ds1-1/describeacls.go
--- 0.2.1-1.1/describeacls.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/describeacls.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,107 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/describeacls"
+)
+
+// DescribeACLsRequest represents a request sent to a kafka broker to describe
+// existing ACLs.
+type DescribeACLsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// Filter to filter ACLs on.
+	Filter ACLFilter
+}
+
+type ACLFilter struct {
+	ResourceTypeFilter ResourceType
+	ResourceNameFilter string
+	// ResourcePatternTypeFilter was added in v1 and is not available prior to that.
+	ResourcePatternTypeFilter PatternType
+	PrincipalFilter           string
+	HostFilter                string
+	Operation                 ACLOperationType
+	PermissionType            ACLPermissionType
+}
+
+// DescribeACLsResponse represents a response from a kafka broker to an ACL
+// describe request.
+type DescribeACLsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Error that occurred while attempting to describe
+	// the ACLs.
+	Error error
+
+	// ACL resources returned from the describe request.
+	Resources []ACLResource
+}
+
+type ACLResource struct {
+	ResourceType ResourceType
+	ResourceName string
+	PatternType  PatternType
+	ACLs         []ACLDescription
+}
+
+type ACLDescription struct {
+	Principal      string
+	Host           string
+	Operation      ACLOperationType
+	PermissionType ACLPermissionType
+}
+
+func (c *Client) DescribeACLs(ctx context.Context, req *DescribeACLsRequest) (*DescribeACLsResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &describeacls.Request{
+		Filter: describeacls.ACLFilter{
+			ResourceTypeFilter:        int8(req.Filter.ResourceTypeFilter),
+			ResourceNameFilter:        req.Filter.ResourceNameFilter,
+			ResourcePatternTypeFilter: int8(req.Filter.ResourcePatternTypeFilter),
+			PrincipalFilter:           req.Filter.PrincipalFilter,
+			HostFilter:                req.Filter.HostFilter,
+			Operation:                 int8(req.Filter.Operation),
+			PermissionType:            int8(req.Filter.PermissionType),
+		},
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).DescribeACLs: %w", err)
+	}
+
+	res := m.(*describeacls.Response)
+	resources := make([]ACLResource, len(res.Resources))
+
+	for resourceIdx, respResource := range res.Resources {
+		descriptions := make([]ACLDescription, len(respResource.ACLs))
+
+		for descriptionIdx, respDescription := range respResource.ACLs {
+			descriptions[descriptionIdx] = ACLDescription{
+				Principal:      respDescription.Principal,
+				Host:           respDescription.Host,
+				Operation:      ACLOperationType(respDescription.Operation),
+				PermissionType: ACLPermissionType(respDescription.PermissionType),
+			}
+		}
+
+		resources[resourceIdx] = ACLResource{
+			ResourceType: ResourceType(respResource.ResourceType),
+			ResourceName: respResource.ResourceName,
+			PatternType:  PatternType(respResource.PatternType),
+			ACLs:         descriptions,
+		}
+	}
+
+	ret := &DescribeACLsResponse{
+		Throttle:  makeDuration(res.ThrottleTimeMs),
+		Error:     makeError(res.ErrorCode, res.ErrorMessage),
+		Resources: resources,
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/describeacls_test.go 0.4.49+ds1-1/describeacls_test.go
--- 0.2.1-1.1/describeacls_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/describeacls_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,88 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestClientDescribeACLs(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.0.1") {
+		return
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	group := makeGroupID()
+
+	createRes, err := client.CreateACLs(context.Background(), &CreateACLsRequest{
+		ACLs: []ACLEntry{
+			{
+				Principal:           "User:alice",
+				PermissionType:      ACLPermissionTypeAllow,
+				Operation:           ACLOperationTypeRead,
+				ResourceType:        ResourceTypeTopic,
+				ResourcePatternType: PatternTypeLiteral,
+				ResourceName:        topic,
+				Host:                "*",
+			},
+			{
+				Principal:           "User:bob",
+				PermissionType:      ACLPermissionTypeAllow,
+				Operation:           ACLOperationTypeRead,
+				ResourceType:        ResourceTypeGroup,
+				ResourcePatternType: PatternTypeLiteral,
+				ResourceName:        group,
+				Host:                "*",
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	for _, err := range createRes.Errors {
+		if err != nil {
+			t.Error(err)
+		}
+	}
+
+	describeResp, err := client.DescribeACLs(context.Background(), &DescribeACLsRequest{
+		Filter: ACLFilter{
+			ResourceTypeFilter:        ResourceTypeTopic,
+			ResourceNameFilter:        topic,
+			ResourcePatternTypeFilter: PatternTypeLiteral,
+			Operation:                 ACLOperationTypeRead,
+			PermissionType:            ACLPermissionTypeAllow,
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedDescribeResp := DescribeACLsResponse{
+		Throttle: 0,
+		Error:    makeError(0, ""),
+		Resources: []ACLResource{
+			{
+				ResourceType: ResourceTypeTopic,
+				ResourceName: topic,
+				PatternType:  PatternTypeLiteral,
+				ACLs: []ACLDescription{
+					{
+						Principal:      "User:alice",
+						Host:           "*",
+						Operation:      ACLOperationTypeRead,
+						PermissionType: ACLPermissionTypeAllow,
+					},
+				},
+			},
+		},
+	}
+
+	assert.Equal(t, expectedDescribeResp, *describeResp)
+}
diff -pruN 0.2.1-1.1/describeclientquotas.go 0.4.49+ds1-1/describeclientquotas.go
--- 0.2.1-1.1/describeclientquotas.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/describeclientquotas.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,126 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/describeclientquotas"
+)
+
+// DescribeClientQuotasRequest represents a request sent to a kafka broker to
+// describe client quotas.
+type DescribeClientQuotasRequest struct {
+	// Address of the kafka broker to send the request to
+	Addr net.Addr
+
+	// List of quota components to describe.
+	Components []DescribeClientQuotasRequestComponent
+
+	// Whether the match is strict, i.e. should exclude entities with
+	// unspecified entity types.
+	Strict bool
+}
+
+type DescribeClientQuotasRequestComponent struct {
+	// The entity type that the filter component applies to.
+	EntityType string
+
+	// How to match the entity (0 = exact name, 1 = default name,
+	// 2 = any specified name).
+	MatchType int8
+
+	// The string to match against, or null if unused for the match type.
+	Match string
+}
+
+// DescribeClientQuotasResponse represents a response from a kafka broker to a describe client quota request.
+type DescribeClientQuotasResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Error is set to a non-nil value including the code and message if a top-level
+	// error was encountered when doing the update.
+	Error error
+
+	// List of describe client quota responses.
+	Entries []DescribeClientQuotasResponseQuotas
+}
+
+type DescribeClientQuotasEntity struct {
+	// The quota entity type.
+	EntityType string
+
+	// The name of the quota entity, or null if the default.
+	EntityName string
+}
+
+type DescribeClientQuotasValue struct {
+	// The quota configuration key.
+	Key string
+
+	// The quota configuration value.
+	Value float64
+}
+
+type DescribeClientQuotasResponseQuotas struct {
+	// List of client quota entities and their descriptions.
+	Entities []DescribeClientQuotasEntity
+
+	// The client quota configuration values.
+	Values []DescribeClientQuotasValue
+}
+
+// DescribeClientQuotas sends a describe client quotas request to a kafka broker and returns
+// the response.
+func (c *Client) DescribeClientQuotas(ctx context.Context, req *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) {
+	components := make([]describeclientquotas.Component, len(req.Components))
+
+	for componentIdx, component := range req.Components {
+		components[componentIdx] = describeclientquotas.Component{
+			EntityType: component.EntityType,
+			MatchType:  component.MatchType,
+			Match:      component.Match,
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &describeclientquotas.Request{
+		Components: components,
+		Strict:     req.Strict,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).DescribeClientQuotas: %w", err)
+	}
+
+	res := m.(*describeclientquotas.Response)
+	responseEntries := make([]DescribeClientQuotasResponseQuotas, len(res.Entries))
+
+	for responseEntryIdx, responseEntry := range res.Entries {
+		responseEntities := make([]DescribeClientQuotasEntity, len(responseEntry.Entities))
+		for responseEntityIdx, responseEntity := range responseEntry.Entities {
+			responseEntities[responseEntityIdx] = DescribeClientQuotasEntity{
+				EntityType: responseEntity.EntityType,
+				EntityName: responseEntity.EntityName,
+			}
+		}
+
+		responseValues := make([]DescribeClientQuotasValue, len(responseEntry.Values))
+		for responseValueIdx, responseValue := range responseEntry.Values {
+			responseValues[responseValueIdx] = DescribeClientQuotasValue{
+				Key:   responseValue.Key,
+				Value: responseValue.Value,
+			}
+		}
+		responseEntries[responseEntryIdx] = DescribeClientQuotasResponseQuotas{
+			Entities: responseEntities,
+			Values:   responseValues,
+		}
+	}
+	ret := &DescribeClientQuotasResponse{
+		Throttle: time.Duration(res.ThrottleTimeMs),
+		Entries:  responseEntries,
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/describeconfigs.go 0.4.49+ds1-1/describeconfigs.go
--- 0.2.1-1.1/describeconfigs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/describeconfigs.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,162 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/describeconfigs"
+)
+
+// DescribeConfigsRequest represents a request sent to a kafka broker to describe configs.
+type DescribeConfigsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of resources to get details for.
+	Resources []DescribeConfigRequestResource
+
+	// Ignored if API version is less than v1
+	IncludeSynonyms bool
+
+	// Ignored if API version is less than v3
+	IncludeDocumentation bool
+}
+
+type DescribeConfigRequestResource struct {
+	// Resource Type
+	ResourceType ResourceType
+
+	// Resource Name
+	ResourceName string
+
+	// ConfigNames is a list of configurations to update.
+	ConfigNames []string
+}
+
+// DescribeConfigsResponse represents a response from a kafka broker to a describe config request.
+type DescribeConfigsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Resources
+	Resources []DescribeConfigResponseResource
+}
+
+// DescribeConfigResponseResource.
+type DescribeConfigResponseResource struct {
+	// Resource Type
+	ResourceType int8
+
+	// Resource Name
+	ResourceName string
+
+	// Error
+	Error error
+
+	// ConfigEntries
+	ConfigEntries []DescribeConfigResponseConfigEntry
+}
+
+// DescribeConfigResponseConfigEntry.
+type DescribeConfigResponseConfigEntry struct {
+	ConfigName  string
+	ConfigValue string
+	ReadOnly    bool
+
+	// Ignored if API version is greater than v0
+	IsDefault bool
+
+	// Ignored if API version is less than v1
+	ConfigSource int8
+
+	IsSensitive bool
+
+	// Ignored if API version is less than v1
+	ConfigSynonyms []DescribeConfigResponseConfigSynonym
+
+	// Ignored if API version is less than v3
+	ConfigType int8
+
+	// Ignored if API version is less than v3
+	ConfigDocumentation string
+}
+
+// DescribeConfigResponseConfigSynonym.
+type DescribeConfigResponseConfigSynonym struct {
+	// Ignored if API version is less than v1
+	ConfigName string
+
+	// Ignored if API version is less than v1
+	ConfigValue string
+
+	// Ignored if API version is less than v1
+	ConfigSource int8
+}
+
+// DescribeConfigs sends a config altering request to a kafka broker and returns the
+// response.
+func (c *Client) DescribeConfigs(ctx context.Context, req *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
+	resources := make([]describeconfigs.RequestResource, len(req.Resources))
+
+	for i, t := range req.Resources {
+		resources[i] = describeconfigs.RequestResource{
+			ResourceType: int8(t.ResourceType),
+			ResourceName: t.ResourceName,
+			ConfigNames:  t.ConfigNames,
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &describeconfigs.Request{
+		Resources:            resources,
+		IncludeSynonyms:      req.IncludeSynonyms,
+		IncludeDocumentation: req.IncludeDocumentation,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).DescribeConfigs: %w", err)
+	}
+
+	res := m.(*describeconfigs.Response)
+	ret := &DescribeConfigsResponse{
+		Throttle:  makeDuration(res.ThrottleTimeMs),
+		Resources: make([]DescribeConfigResponseResource, len(res.Resources)),
+	}
+
+	for i, t := range res.Resources {
+
+		configEntries := make([]DescribeConfigResponseConfigEntry, len(t.ConfigEntries))
+		for j, v := range t.ConfigEntries {
+
+			configSynonyms := make([]DescribeConfigResponseConfigSynonym, len(v.ConfigSynonyms))
+			for k, cs := range v.ConfigSynonyms {
+				configSynonyms[k] = DescribeConfigResponseConfigSynonym{
+					ConfigName:   cs.ConfigName,
+					ConfigValue:  cs.ConfigValue,
+					ConfigSource: cs.ConfigSource,
+				}
+			}
+
+			configEntries[j] = DescribeConfigResponseConfigEntry{
+				ConfigName:          v.ConfigName,
+				ConfigValue:         v.ConfigValue,
+				ReadOnly:            v.ReadOnly,
+				ConfigSource:        v.ConfigSource,
+				IsDefault:           v.IsDefault,
+				IsSensitive:         v.IsSensitive,
+				ConfigSynonyms:      configSynonyms,
+				ConfigType:          v.ConfigType,
+				ConfigDocumentation: v.ConfigDocumentation,
+			}
+		}
+
+		ret.Resources[i] = DescribeConfigResponseResource{
+			ResourceType:  t.ResourceType,
+			ResourceName:  t.ResourceName,
+			Error:         makeError(t.ErrorCode, t.ErrorMessage),
+			ConfigEntries: configEntries,
+		}
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/describeconfigs_test.go 0.4.49+ds1-1/describeconfigs_test.go
--- 0.2.1-1.1/describeconfigs_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/describeconfigs_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,67 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestClientDescribeConfigs(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		return
+	}
+
+	const (
+		MaxMessageBytes      = "max.message.bytes"
+		MaxMessageBytesValue = "200000"
+	)
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	_, err := client.AlterConfigs(context.Background(), &AlterConfigsRequest{
+		Resources: []AlterConfigRequestResource{{
+			ResourceType: ResourceTypeTopic,
+			ResourceName: topic,
+			Configs: []AlterConfigRequestConfig{{
+				Name:  MaxMessageBytes,
+				Value: MaxMessageBytesValue,
+			},
+			},
+		}},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	describeResp, err := client.DescribeConfigs(context.Background(), &DescribeConfigsRequest{
+		Resources: []DescribeConfigRequestResource{{
+			ResourceType: ResourceTypeTopic,
+			ResourceName: topic,
+			ConfigNames:  []string{MaxMessageBytes},
+		}},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	maxMessageBytesValue := "0"
+	for _, resource := range describeResp.Resources {
+		if resource.ResourceType == int8(ResourceTypeTopic) && resource.ResourceName == topic {
+			for _, entry := range resource.ConfigEntries {
+				if entry.ConfigName == MaxMessageBytes {
+					maxMessageBytesValue = entry.ConfigValue
+				}
+			}
+		}
+	}
+	assert.Equal(t, maxMessageBytesValue, MaxMessageBytesValue)
+}
diff -pruN 0.2.1-1.1/describegroups.go 0.4.49+ds1-1/describegroups.go
--- 0.2.1-1.1/describegroups.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/describegroups.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,186 +1,298 @@
 package kafka
 
-import "bufio"
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"fmt"
+	"net"
+
+	"github.com/segmentio/kafka-go/protocol/describegroups"
+)
+
+// DescribeGroupsRequest is a request to the DescribeGroups API.
+type DescribeGroupsRequest struct {
+	// Addr is the address of the kafka broker to send the request to.
+	Addr net.Addr
 
-// See http://kafka.apache.org/protocol.html#The_Messages_DescribeGroups
-type describeGroupsRequestV1 struct {
-	// List of groupIds to request metadata for (an empty groupId array
-	// will return empty group metadata).
+	// GroupIDs is a slice of groups to get details for.
 	GroupIDs []string
 }
 
-func (t describeGroupsRequestV1) size() int32 {
-	return sizeofStringArray(t.GroupIDs)
+// DescribeGroupsResponse is a response from the DescribeGroups API.
+type DescribeGroupsResponse struct {
+	// Groups is a slice of details for the requested groups.
+	Groups []DescribeGroupsResponseGroup
 }
 
-func (t describeGroupsRequestV1) writeTo(w *bufio.Writer) {
-	writeStringArray(w, t.GroupIDs)
+// DescribeGroupsResponseGroup contains the response details for a single group.
+type DescribeGroupsResponseGroup struct {
+	// Error is set to a non-nil value if there was an error fetching the details
+	// for this group.
+	Error error
+
+	// GroupID is the ID of the group.
+	GroupID string
+
+	// GroupState is a description of the group state.
+	GroupState string
+
+	// Members contains details about each member of the group.
+	Members []DescribeGroupsResponseMember
 }
 
-type describeGroupsResponseMemberV1 struct {
-	// MemberID assigned by the group coordinator
+// MemberInfo represents the membership information for a single group member.
+type DescribeGroupsResponseMember struct {
+	// MemberID is the ID of the group member.
 	MemberID string
 
-	// ClientID used in the member's latest join group request
+	// ClientID is the ID of the client that the group member is using.
 	ClientID string
 
-	// ClientHost used in the request session corresponding to the member's
-	// join group.
+	// ClientHost is the host of the client that the group member is connecting from.
 	ClientHost string
 
-	// MemberMetadata the metadata corresponding to the current group protocol
-	// in use (will only be present if the group is stable).
-	MemberMetadata []byte
-
-	// MemberAssignments provided by the group leader (will only be present if
-	// the group is stable).
-	//
-	// See consumer groups section of https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
-	MemberAssignments []byte
-}
-
-func (t describeGroupsResponseMemberV1) size() int32 {
-	return sizeofString(t.MemberID) +
-		sizeofString(t.ClientID) +
-		sizeofString(t.ClientHost) +
-		sizeofBytes(t.MemberMetadata) +
-		sizeofBytes(t.MemberAssignments)
-}
-
-func (t describeGroupsResponseMemberV1) writeTo(w *bufio.Writer) {
-	writeString(w, t.MemberID)
-	writeString(w, t.ClientID)
-	writeString(w, t.ClientHost)
-	writeBytes(w, t.MemberMetadata)
-	writeBytes(w, t.MemberAssignments)
-}
+	// MemberMetadata contains metadata about this group member.
+	MemberMetadata DescribeGroupsResponseMemberMetadata
 
-func (t *describeGroupsResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
-	if remain, err = readString(r, size, &t.MemberID); err != nil {
-		return
-	}
-	if remain, err = readString(r, remain, &t.ClientID); err != nil {
-		return
-	}
-	if remain, err = readString(r, remain, &t.ClientHost); err != nil {
-		return
-	}
-	if remain, err = readBytes(r, remain, &t.MemberMetadata); err != nil {
-		return
-	}
-	if remain, err = readBytes(r, remain, &t.MemberAssignments); err != nil {
-		return
-	}
-	return
+	// MemberAssignments contains the topic partitions that this member is assigned to.
+	MemberAssignments DescribeGroupsResponseAssignments
 }
 
-type describeGroupsResponseGroupV1 struct {
-	// ErrorCode holds response error code
-	ErrorCode int16
+// GroupMemberMetadata stores metadata associated with a group member.
+type DescribeGroupsResponseMemberMetadata struct {
+	// Version is the version of the metadata.
+	Version int
 
-	// GroupID holds the unique group identifier
-	GroupID string
+	// Topics is the list of topics that the member is assigned to.
+	Topics []string
 
-	// State holds current state of the group (one of: Dead, Stable, AwaitingSync,
-	// PreparingRebalance, or empty if there is no active group)
-	State string
+	// UserData is the user data for the member.
+	UserData []byte
 
-	// ProtocolType holds the current group protocol type (will be empty if there is
-	// no active group)
-	ProtocolType string
+	// OwnedPartitions contains the partitions owned by this group member; only set if
+	// consumers are using a cooperative rebalancing assignor protocol.
+	OwnedPartitions []DescribeGroupsResponseMemberMetadataOwnedPartition
+}
 
-	// Protocol holds the current group protocol (only provided if the group is Stable)
-	Protocol string
+type DescribeGroupsResponseMemberMetadataOwnedPartition struct {
+	// Topic is the name of the topic.
+	Topic string
 
-	// Members contains the current group members (only provided if the group is not Dead)
-	Members []describeGroupsResponseMemberV1
+	// Partitions is the partitions that are owned by the group in the topic.
+	Partitions []int
 }
 
-func (t describeGroupsResponseGroupV1) size() int32 {
-	return sizeofInt16(t.ErrorCode) +
-		sizeofString(t.GroupID) +
-		sizeofString(t.State) +
-		sizeofString(t.ProtocolType) +
-		sizeofString(t.Protocol) +
-		sizeofArray(len(t.Members), func(i int) int32 { return t.Members[i].size() })
+// GroupMemberAssignmentsInfo stores the topic partition assignment data for a group member.
+type DescribeGroupsResponseAssignments struct {
+	// Version is the version of the assignments data.
+	Version int
+
+	// Topics contains the details of the partition assignments for each topic.
+	Topics []GroupMemberTopic
+
+	// UserData is the user data for the member.
+	UserData []byte
 }
 
-func (t describeGroupsResponseGroupV1) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.ErrorCode)
-	writeString(w, t.GroupID)
-	writeString(w, t.State)
-	writeString(w, t.ProtocolType)
-	writeString(w, t.Protocol)
-	writeArray(w, len(t.Members), func(i int) { t.Members[i].writeTo(w) })
+// GroupMemberTopic is a mapping from a topic to a list of partitions in the topic. It is used
+// to represent the topic partitions that have been assigned to a group member.
+type GroupMemberTopic struct {
+	// Topic is the name of the topic.
+	Topic string
+
+	// Partitions is a slice of partition IDs that this member is assigned to in the topic.
+	Partitions []int
 }
 
-func (t *describeGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
-	if remain, err = readInt16(r, size, &t.ErrorCode); err != nil {
-		return
+// DescribeGroups calls the Kafka DescribeGroups API to get information about one or more
+// consumer groups. See https://kafka.apache.org/protocol#The_Messages_DescribeGroups for
+// more information.
+func (c *Client) DescribeGroups(
+	ctx context.Context,
+	req *DescribeGroupsRequest,
+) (*DescribeGroupsResponse, error) {
+	protoResp, err := c.roundTrip(
+		ctx,
+		req.Addr,
+		&describegroups.Request{
+			Groups: req.GroupIDs,
+		},
+	)
+	if err != nil {
+		return nil, err
 	}
-	if remain, err = readString(r, remain, &t.GroupID); err != nil {
-		return
+	apiResp := protoResp.(*describegroups.Response)
+	resp := &DescribeGroupsResponse{}
+
+	for _, apiGroup := range apiResp.Groups {
+		group := DescribeGroupsResponseGroup{
+			Error:      makeError(apiGroup.ErrorCode, ""),
+			GroupID:    apiGroup.GroupID,
+			GroupState: apiGroup.GroupState,
+		}
+
+		for _, member := range apiGroup.Members {
+			decodedMetadata, err := decodeMemberMetadata(member.MemberMetadata)
+			if err != nil {
+				return nil, err
+			}
+			decodedAssignments, err := decodeMemberAssignments(member.MemberAssignment)
+			if err != nil {
+				return nil, err
+			}
+
+			group.Members = append(group.Members, DescribeGroupsResponseMember{
+				MemberID:          member.MemberID,
+				ClientID:          member.ClientID,
+				ClientHost:        member.ClientHost,
+				MemberAssignments: decodedAssignments,
+				MemberMetadata:    decodedMetadata,
+			})
+		}
+		resp.Groups = append(resp.Groups, group)
 	}
-	if remain, err = readString(r, remain, &t.State); err != nil {
-		return
+
+	return resp, nil
+}
+
+// decodeMemberMetadata converts raw metadata bytes to a
+// DescribeGroupsResponseMemberMetadata struct.
+//
+// See https://github.com/apache/kafka/blob/2.4/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java#L49
+// for protocol details.
+func decodeMemberMetadata(rawMetadata []byte) (DescribeGroupsResponseMemberMetadata, error) {
+	mm := DescribeGroupsResponseMemberMetadata{}
+
+	if len(rawMetadata) == 0 {
+		return mm, nil
 	}
-	if remain, err = readString(r, remain, &t.ProtocolType); err != nil {
-		return
+
+	buf := bytes.NewBuffer(rawMetadata)
+	bufReader := bufio.NewReader(buf)
+	remain := len(rawMetadata)
+
+	var err error
+	var version16 int16
+
+	if remain, err = readInt16(bufReader, remain, &version16); err != nil {
+		return mm, err
 	}
-	if remain, err = readString(r, remain, &t.Protocol); err != nil {
-		return
+	mm.Version = int(version16)
+
+	if remain, err = readStringArray(bufReader, remain, &mm.Topics); err != nil {
+		return mm, err
+	}
+	if remain, err = readBytes(bufReader, remain, &mm.UserData); err != nil {
+		return mm, err
 	}
 
-	fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
-		item := describeGroupsResponseMemberV1{}
-		if fnRemain, fnErr = (&item).readFrom(r, size); err != nil {
+	if mm.Version == 1 && remain > 0 {
+		fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
+			op := DescribeGroupsResponseMemberMetadataOwnedPartition{}
+			if fnRemain, fnErr = readString(r, size, &op.Topic); fnErr != nil {
+				return
+			}
+
+			ps := []int32{}
+			if fnRemain, fnErr = readInt32Array(r, fnRemain, &ps); fnErr != nil {
+				return
+			}
+
+			for _, p := range ps {
+				op.Partitions = append(op.Partitions, int(p))
+			}
+
+			mm.OwnedPartitions = append(mm.OwnedPartitions, op)
 			return
 		}
-		t.Members = append(t.Members, item)
-		return
+
+		if remain, err = readArrayWith(bufReader, remain, fn); err != nil {
+			return mm, err
+		}
 	}
-	if remain, err = readArrayWith(r, remain, fn); err != nil {
-		return
+
+	if remain != 0 {
+		return mm, fmt.Errorf("Got non-zero number of bytes remaining: %d", remain)
 	}
 
-	return
+	return mm, nil
 }
 
-type describeGroupsResponseV1 struct {
-	// Duration in milliseconds for which the request was throttled due
-	// to quota violation (Zero if the request did not violate any quota)
-	ThrottleTimeMS int32
+// decodeMemberAssignments converts raw assignment bytes to a DescribeGroupsResponseAssignments
+// struct.
+//
+// See https://github.com/apache/kafka/blob/2.4/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java#L49
+// for protocol details.
+func decodeMemberAssignments(rawAssignments []byte) (DescribeGroupsResponseAssignments, error) {
+	ma := DescribeGroupsResponseAssignments{}
 
-	// Groups holds selected group information
-	Groups []describeGroupsResponseGroupV1
-}
+	if len(rawAssignments) == 0 {
+		return ma, nil
+	}
 
-func (t describeGroupsResponseV1) size() int32 {
-	return sizeofInt32(t.ThrottleTimeMS) +
-		sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() })
-}
+	buf := bytes.NewBuffer(rawAssignments)
+	bufReader := bufio.NewReader(buf)
+	remain := len(rawAssignments)
 
-func (t describeGroupsResponseV1) writeTo(w *bufio.Writer) {
-	writeInt32(w, t.ThrottleTimeMS)
-	writeArray(w, len(t.Groups), func(i int) { t.Groups[i].writeTo(w) })
-}
+	var err error
+	var version16 int16
+
+	if remain, err = readInt16(bufReader, remain, &version16); err != nil {
+		return ma, err
+	}
+	ma.Version = int(version16)
+
+	fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
+		item := GroupMemberTopic{}
+
+		if fnRemain, fnErr = readString(r, size, &item.Topic); fnErr != nil {
+			return
+		}
+
+		partitions := []int32{}
+
+		if fnRemain, fnErr = readInt32Array(r, fnRemain, &partitions); fnErr != nil {
+			return
+		}
+		for _, partition := range partitions {
+			item.Partitions = append(item.Partitions, int(partition))
+		}
 
-func (t *describeGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
-	if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil {
+		ma.Topics = append(ma.Topics, item)
 		return
 	}
+	if remain, err = readArrayWith(bufReader, remain, fn); err != nil {
+		return ma, err
+	}
+
+	if remain, err = readBytes(bufReader, remain, &ma.UserData); err != nil {
+		return ma, err
+	}
+
+	if remain != 0 {
+		return ma, fmt.Errorf("Got non-zero number of bytes remaining: %d", remain)
+	}
+
+	return ma, nil
+}
 
+// readInt32Array reads an array of int32s. It's adapted from the implementation of
+// readStringArray.
+func readInt32Array(r *bufio.Reader, sz int, v *[]int32) (remain int, err error) {
+	var content []int32
 	fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
-		item := describeGroupsResponseGroupV1{}
-		if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil {
+		var value int32
+		if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil {
 			return
 		}
-		t.Groups = append(t.Groups, item)
+		content = append(content, value)
 		return
 	}
-	if remain, err = readArrayWith(r, remain, fn); err != nil {
+	if remain, err = readArrayWith(r, sz, fn); err != nil {
 		return
 	}
 
+	*v = content
 	return
 }
diff -pruN 0.2.1-1.1/describegroups_test.go 0.4.49+ds1-1/describegroups_test.go
--- 0.2.1-1.1/describegroups_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/describegroups_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,52 +1,130 @@
 package kafka
 
 import (
-	"bufio"
-	"bytes"
+	"context"
+	"fmt"
+	"os"
 	"reflect"
+	"sort"
 	"testing"
+	"time"
 )
 
-func TestDescribeGroupsResponseV1(t *testing.T) {
-	item := describeGroupsResponseV1{
-		ThrottleTimeMS: 1,
-		Groups: []describeGroupsResponseGroupV1{
-			{
-				ErrorCode:    2,
-				GroupID:      "a",
-				State:        "b",
-				ProtocolType: "c",
-				Protocol:     "d",
-				Members: []describeGroupsResponseMemberV1{
-					{
-						MemberID:          "e",
-						ClientID:          "f",
-						ClientHost:        "g",
-						MemberMetadata:    []byte("h"),
-						MemberAssignments: []byte("i"),
-					},
-				},
-			},
+func TestClientDescribeGroups(t *testing.T) {
+	if os.Getenv("KAFKA_VERSION") == "2.3.1" {
+		// There's a bug in 2.3.1 that causes the MemberMetadata to be in the wrong format and thus
+		// leads to an error when decoding the DescribeGroupsResponse.
+		//
+		// See https://issues.apache.org/jira/browse/KAFKA-9150 for details.
+		t.Skip("Skipping because kafka version is 2.3.1")
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	gid := fmt.Sprintf("%s-test-group", topic)
+
+	createTopic(t, topic, 2)
+	defer deleteTopic(t, topic)
+
+	w := newTestWriter(WriterConfig{
+		Topic: topic,
+	})
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	err := w.WriteMessages(
+		ctx,
+		Message{
+			Key:   []byte("key"),
+			Value: []byte("value"),
 		},
+	)
+
+	if err != nil {
+		t.Fatal(err)
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
-	item.writeTo(w)
-	w.Flush()
+	r := NewReader(ReaderConfig{
+		Brokers:  []string{"localhost:9092"},
+		Topic:    topic,
+		GroupID:  gid,
+		MinBytes: 10,
+		MaxBytes: 1000,
+	})
+	_, err = r.ReadMessage(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
 
-	var found describeGroupsResponseV1
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	resp, err := client.DescribeGroups(
+		ctx,
+		&DescribeGroupsRequest{
+			GroupIDs: []string{gid},
+		},
+	)
 	if err != nil {
-		t.Error(err)
-		t.FailNow()
+		t.Fatal(err)
+	}
+	if len(resp.Groups) != 1 {
+		t.Fatal(
+			"Unexpected number of groups returned",
+			"expected", 1,
+			"got", len(resp.Groups),
+		)
+	}
+	g := resp.Groups[0]
+	if g.Error != nil {
+		t.Error(
+			"Wrong error in group response",
+			"expected", nil,
+			"got", g.Error,
+		)
 	}
-	if remain != 0 {
-		t.Errorf("expected 0 remain, got %v", remain)
-		t.FailNow()
-	}
-	if !reflect.DeepEqual(item, found) {
-		t.Error("expected item and found to be the same")
-		t.FailNow()
+
+	if g.GroupID != gid {
+		t.Error(
+			"Wrong groupID",
+			"expected", gid,
+			"got", g.GroupID,
+		)
+	}
+
+	if len(g.Members) != 1 {
+		t.Fatal(
+			"Wrong group members length",
+			"expected", 1,
+			"got", len(g.Members),
+		)
+	}
+	if len(g.Members[0].MemberAssignments.Topics) != 1 {
+		t.Fatal(
+			"Wrong topics length",
+			"expected", 1,
+			"got", len(g.Members[0].MemberAssignments.Topics),
+		)
+	}
+	mt := g.Members[0].MemberAssignments.Topics[0]
+	if mt.Topic != topic {
+		t.Error(
+			"Wrong member assignment topic",
+			"expected", topic,
+			"got", mt.Topic,
+		)
+	}
+
+	// Partitions can be in any order, sort them
+	sort.Slice(mt.Partitions, func(a, b int) bool {
+		return mt.Partitions[a] < mt.Partitions[b]
+	})
+
+	if !reflect.DeepEqual([]int{0, 1}, mt.Partitions) {
+		t.Error(
+			"Wrong member assignment partitions",
+			"expected", []int{0, 1},
+			"got", mt.Partitions,
+		)
 	}
 }
diff -pruN 0.2.1-1.1/describeuserscramcredentials.go 0.4.49+ds1-1/describeuserscramcredentials.go
--- 0.2.1-1.1/describeuserscramcredentials.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/describeuserscramcredentials.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,97 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/describeuserscramcredentials"
+)
+
+// DescribeUserScramCredentialsRequest represents a request sent to a kafka broker to
+// describe user scram credentials.
+type DescribeUserScramCredentialsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// List of Scram users to describe
+	Users []UserScramCredentialsUser
+}
+
+type UserScramCredentialsUser struct {
+	Name string
+}
+
+// DescribeUserScramCredentialsResponse represents a response from a kafka broker to a describe user
+// credentials request.
+type DescribeUserScramCredentialsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Top level error that occurred while attempting to describe
+	// the user scram credentials.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Error error
+
+	// List of described user scram credentials.
+	Results []DescribeUserScramCredentialsResponseResult
+}
+
+type DescribeUserScramCredentialsResponseResult struct {
+	User            string
+	CredentialInfos []DescribeUserScramCredentialsCredentialInfo
+	Error           error
+}
+
+type DescribeUserScramCredentialsCredentialInfo struct {
+	Mechanism  ScramMechanism
+	Iterations int
+}
+
+// DescribeUserScramCredentials sends a user scram credentials describe request to a kafka broker and returns
+// the response.
+func (c *Client) DescribeUserScramCredentials(ctx context.Context, req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
+	users := make([]describeuserscramcredentials.RequestUser, len(req.Users))
+
+	for userIdx, user := range req.Users {
+		users[userIdx] = describeuserscramcredentials.RequestUser{
+			Name: user.Name,
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &describeuserscramcredentials.Request{
+		Users: users,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).DescribeUserScramCredentials: %w", err)
+	}
+
+	res := m.(*describeuserscramcredentials.Response)
+	responseResults := make([]DescribeUserScramCredentialsResponseResult, len(res.Results))
+
+	for responseIdx, responseResult := range res.Results {
+		credentialInfos := make([]DescribeUserScramCredentialsCredentialInfo, len(responseResult.CredentialInfos))
+
+		for credentialInfoIdx, credentialInfo := range responseResult.CredentialInfos {
+			credentialInfos[credentialInfoIdx] = DescribeUserScramCredentialsCredentialInfo{
+				Mechanism:  ScramMechanism(credentialInfo.Mechanism),
+				Iterations: int(credentialInfo.Iterations),
+			}
+		}
+		responseResults[responseIdx] = DescribeUserScramCredentialsResponseResult{
+			User:            responseResult.User,
+			CredentialInfos: credentialInfos,
+			Error:           makeError(responseResult.ErrorCode, responseResult.ErrorMessage),
+		}
+	}
+	ret := &DescribeUserScramCredentialsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Error:    makeError(res.ErrorCode, res.ErrorMessage),
+		Results:  responseResults,
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/describeuserscramcredentials_test.go 0.4.49+ds1-1/describeuserscramcredentials_test.go
--- 0.2.1-1.1/describeuserscramcredentials_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/describeuserscramcredentials_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,140 @@
+package kafka
+
+import (
+	"context"
+	"errors"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestDescribeUserScramCredentials(t *testing.T) {
+	// https://issues.apache.org/jira/browse/KAFKA-10259
+	if !ktesting.KafkaIsAtLeast("2.7.0") {
+		return
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	name := makeTopic()
+
+	createRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{
+		Upsertions: []UserScramCredentialsUpsertion{
+			{
+				Name:           name,
+				Mechanism:      ScramMechanismSha512,
+				Iterations:     15000,
+				Salt:           []byte("my-salt"),
+				SaltedPassword: []byte("my-salted-password"),
+			},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(createRes.Results) != 1 {
+		t.Fatalf("expected 1 createResult; got %d", len(createRes.Results))
+	}
+
+	if createRes.Results[0].User != name {
+		t.Fatalf("expected createResult with user: %s, got %s", name, createRes.Results[0].User)
+	}
+
+	if createRes.Results[0].Error != nil {
+		t.Fatalf("didn't expect an error in createResult, got %v", createRes.Results[0].Error)
+	}
+
+	describeCreationRes, err := client.DescribeUserScramCredentials(context.Background(), &DescribeUserScramCredentialsRequest{
+		Users: []UserScramCredentialsUser{
+			{
+				Name: name,
+			},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expectedCreation := DescribeUserScramCredentialsResponse{
+		Throttle: makeDuration(0),
+		Error:    makeError(0, ""),
+		Results: []DescribeUserScramCredentialsResponseResult{
+			{
+				User: name,
+				CredentialInfos: []DescribeUserScramCredentialsCredentialInfo{
+					{
+						Mechanism:  ScramMechanismSha512,
+						Iterations: 15000,
+					},
+				},
+				Error: makeError(0, ""),
+			},
+		},
+	}
+
+	assert.Equal(t, expectedCreation, *describeCreationRes)
+
+	deleteRes, err := client.AlterUserScramCredentials(context.Background(), &AlterUserScramCredentialsRequest{
+		Deletions: []UserScramCredentialsDeletion{
+			{
+				Name:      name,
+				Mechanism: ScramMechanismSha512,
+			},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(deleteRes.Results) != 1 {
+		t.Fatalf("expected 1 deleteResult; got %d", len(deleteRes.Results))
+	}
+
+	if deleteRes.Results[0].User != name {
+		t.Fatalf("expected deleteResult with user: %s, got %s", name, deleteRes.Results[0].User)
+	}
+
+	if deleteRes.Results[0].Error != nil {
+		t.Fatalf("didn't expect an error in deleteResult, got %v", deleteRes.Results[0].Error)
+	}
+
+	describeDeletionRes, err := client.DescribeUserScramCredentials(context.Background(), &DescribeUserScramCredentialsRequest{
+		Users: []UserScramCredentialsUser{
+			{
+				Name: name,
+			},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !errors.Is(describeDeletionRes.Error, makeError(0, "")) {
+		t.Fatalf("didn't expect a top level error on describe results after deletion, got %v", describeDeletionRes.Error)
+	}
+
+	if len(describeDeletionRes.Results) != 1 {
+		t.Fatalf("expected one describe results after deletion, got %d describe results", len(describeDeletionRes.Results))
+	}
+
+	result := describeDeletionRes.Results[0]
+
+	if result.User != name {
+		t.Fatalf("expected describeResult with user: %s, got %s", name, result.User)
+	}
+
+	if len(result.CredentialInfos) != 0 {
+		t.Fatalf("didn't expect describeResult credential infos, got %v", result.CredentialInfos)
+	}
+
+	if !errors.Is(result.Error, ResourceNotFound) {
+		t.Fatalf("expected describeResult resourcenotfound error, got %s", result.Error)
+	}
+}
diff -pruN 0.2.1-1.1/dialer.go 0.4.49+ds1-1/dialer.go
--- 0.2.1-1.1/dialer.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/dialer.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,9 +3,15 @@ package kafka
 import (
 	"context"
 	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
 	"net"
 	"strconv"
+	"strings"
 	"time"
+
+	"github.com/segmentio/kafka-go/sasl"
 )
 
 // The Dialer type mirrors the net.Dialer API but is designed to open kafka
@@ -14,6 +20,13 @@ type Dialer struct {
 	// Unique identifier for client connections established by this Dialer.
 	ClientID string
 
+	// Optionally specifies the function that the dialer uses to establish
+	// network connections. If nil, net.(*Dialer).DialContext is used instead.
+	//
+	// When DialFunc is set, LocalAddr, DualStack, FallbackDelay, and KeepAlive
+	// are ignored.
+	DialFunc func(ctx context.Context, network string, address string) (net.Conn, error)
+
 	// Timeout is the maximum amount of time a dial will wait for a connect to
 	// complete. If Deadline is also set, it may fail earlier.
 	//
@@ -54,12 +67,28 @@ type Dialer struct {
 	// support keep-alives ignore this field.
 	KeepAlive time.Duration
 
-	// Resolver optionally specifies an alternate resolver to use.
+	// Resolver optionally gives a hook to convert the broker address into an
+	// alternate host or IP address which is useful for custom service discovery.
+	// If a custom resolver returns any possible hosts, the first one will be
+	// used and the original discarded. If a port number is included with the
+	// resolved host, it will only be used if a port number was not previously
+	// specified. If no port is specified or resolved, the default of 9092 will be
+	// used.
 	Resolver Resolver
 
 	// TLS enables Dialer to open secure connections.  If nil, standard net.Conn
 	// will be used.
 	TLS *tls.Config
+
+	// SASLMechanism configures the Dialer to use SASL authentication.  If nil,
+	// no authentication will be performed.
+	SASLMechanism sasl.Mechanism
+
+	// The transactional id to use for transactional delivery. Idempotent
+	// deliver should be enabled if transactional id is configured.
+	// For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs
+	// Empty string means that the connection will be non-transactional.
+	TransactionalID string
 }
 
 // Dial connects to the address on the named network.
@@ -81,23 +110,15 @@ func (d *Dialer) Dial(network string, ad
 // 1 minute, the connect to each single address will be given 15 seconds to
 // complete before trying the next one.
 func (d *Dialer) DialContext(ctx context.Context, network string, address string) (*Conn, error) {
-	if d.Timeout != 0 {
-		var cancel context.CancelFunc
-		ctx, cancel = context.WithTimeout(ctx, d.Timeout)
-		defer cancel()
-	}
-
-	if !d.Deadline.IsZero() {
-		var cancel context.CancelFunc
-		ctx, cancel = context.WithDeadline(ctx, d.Deadline)
-		defer cancel()
-	}
-
-	c, err := d.dialContext(ctx, network, address)
-	if err != nil {
-		return nil, err
-	}
-	return NewConnWith(c, ConnConfig{ClientID: d.ClientID}), nil
+	return d.connect(
+		ctx,
+		network,
+		address,
+		ConnConfig{
+			ClientID:        d.ClientID,
+			TransactionalID: d.TransactionalID,
+		},
+	)
 }
 
 // DialLeader opens a connection to the leader of the partition for a given
@@ -120,16 +141,14 @@ func (d *Dialer) DialLeader(ctx context.
 // descriptor. It's strongly advised to use descriptor of the partition that comes out of
 // functions LookupPartition or LookupPartitions.
 func (d *Dialer) DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) {
-	c, err := d.dialContext(ctx, network, net.JoinHostPort(partition.Leader.Host, strconv.Itoa(partition.Leader.Port)))
-	if err != nil {
-		return nil, err
-	}
-
-	return NewConnWith(c, ConnConfig{
-		ClientID:  d.ClientID,
-		Topic:     partition.Topic,
-		Partition: partition.ID,
-	}), nil
+	return d.connect(ctx, network, net.JoinHostPort(partition.Leader.Host, strconv.Itoa(partition.Leader.Port)), ConnConfig{
+		ClientID:        d.ClientID,
+		Topic:           partition.Topic,
+		Partition:       partition.ID,
+		Broker:          partition.Leader.ID,
+		Rack:            partition.Leader.Rack,
+		TransactionalID: d.TransactionalID,
+	})
 }
 
 // LookupLeader searches for the kafka broker that is the leader of the
@@ -153,7 +172,10 @@ func (d *Dialer) LookupPartition(ctx con
 	go func() {
 		for attempt := 0; true; attempt++ {
 			if attempt != 0 {
-				sleep(ctx, backoff(attempt, 100*time.Millisecond, 10*time.Second))
+				if !sleep(ctx, backoff(attempt, 100*time.Millisecond, 10*time.Second)) {
+					errch <- ctx.Err()
+					return
+				}
 			}
 
 			partitions, err := c.ReadPartitions(topic)
@@ -215,9 +237,9 @@ func (d *Dialer) LookupPartitions(ctx co
 	return prt, err
 }
 
-// connectTLS returns a tls.Conn that has already completed the Handshake
-func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn) (tlsConn *tls.Conn, err error) {
-	tlsConn = tls.Client(conn, d.TLS)
+// connectTLS returns a tls.Conn that has already completed the Handshake.
+func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn, config *tls.Config) (tlsConn *tls.Conn, err error) {
+	tlsConn = tls.Client(conn, config)
 	errch := make(chan error)
 
 	go func() {
@@ -238,34 +260,120 @@ func (d *Dialer) connectTLS(ctx context.
 	return
 }
 
-func (d *Dialer) dialContext(ctx context.Context, network string, address string) (net.Conn, error) {
-	if r := d.Resolver; r != nil {
-		host, port := splitHostPort(address)
-		addrs, err := r.LookupHost(ctx, host)
+// connect opens a socket connection to the broker, wraps it to create a
+// kafka connection, and performs SASL authentication if configured to do so.
+func (d *Dialer) connect(ctx context.Context, network, address string, connCfg ConnConfig) (*Conn, error) {
+	if d.Timeout != 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, d.Timeout)
+		defer cancel()
+	}
+
+	if !d.Deadline.IsZero() {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithDeadline(ctx, d.Deadline)
+		defer cancel()
+	}
+
+	c, err := d.dialContext(ctx, network, address)
+	if err != nil {
+		return nil, fmt.Errorf("failed to dial: %w", err)
+	}
+
+	conn := NewConnWith(c, connCfg)
+
+	if d.SASLMechanism != nil {
+		host, port, err := splitHostPortNumber(address)
 		if err != nil {
-			return nil, err
+			return nil, fmt.Errorf("could not determine host/port for SASL authentication: %w", err)
 		}
-		if len(addrs) != 0 {
-			address = addrs[0]
+		metadata := &sasl.Metadata{
+			Host: host,
+			Port: port,
 		}
-		if len(port) != 0 {
-			address, _ = splitHostPort(address)
-			address = net.JoinHostPort(address, port)
+		if err := d.authenticateSASL(sasl.WithMetadata(ctx, metadata), conn); err != nil {
+			_ = conn.Close()
+			return nil, fmt.Errorf("could not successfully authenticate to %s:%d with SASL: %w", host, port, err)
 		}
 	}
 
-	conn, err := (&net.Dialer{
-		LocalAddr:     d.LocalAddr,
-		DualStack:     d.DualStack,
-		FallbackDelay: d.FallbackDelay,
-		KeepAlive:     d.KeepAlive,
-	}).DialContext(ctx, network, address)
+	return conn, nil
+}
+
+// authenticateSASL performs all of the required requests to authenticate this
+// connection.  If any step fails, this function returns with an error.  A nil
+// error indicates successful authentication.
+//
+// In case of error, this function *does not* close the connection.  That is the
+// responsibility of the caller.
+func (d *Dialer) authenticateSASL(ctx context.Context, conn *Conn) error {
+	if err := conn.saslHandshake(d.SASLMechanism.Name()); err != nil {
+		return fmt.Errorf("SASL handshake failed: %w", err)
+	}
+
+	sess, state, err := d.SASLMechanism.Start(ctx)
 	if err != nil {
-		return nil, err
+		return fmt.Errorf("SASL authentication process could not be started: %w", err)
+	}
+
+	for completed := false; !completed; {
+		challenge, err := conn.saslAuthenticate(state)
+		switch {
+		case err == nil:
+		case errors.Is(err, io.EOF):
+			// the broker may communicate a failed exchange by closing the
+			// connection (esp. in the case where we're passing opaque sasl
+			// data over the wire since there's no protocol info).
+			return SASLAuthenticationFailed
+		default:
+			return err
+		}
+
+		completed, state, err = sess.Next(ctx, challenge)
+		if err != nil {
+			return fmt.Errorf("SASL authentication process has failed: %w", err)
+		}
+	}
+
+	return nil
+}
+
+func (d *Dialer) dialContext(ctx context.Context, network string, addr string) (net.Conn, error) {
+	address, err := lookupHost(ctx, addr, d.Resolver)
+	if err != nil {
+		return nil, fmt.Errorf("failed to resolve host: %w", err)
+	}
+
+	dial := d.DialFunc
+	if dial == nil {
+		dial = (&net.Dialer{
+			LocalAddr:     d.LocalAddr,
+			DualStack:     d.DualStack,
+			FallbackDelay: d.FallbackDelay,
+			KeepAlive:     d.KeepAlive,
+		}).DialContext
+	}
+
+	conn, err := dial(ctx, network, address)
+	if err != nil {
+		return nil, fmt.Errorf("failed to open connection to %s: %w", address, err)
 	}
 
 	if d.TLS != nil {
-		return d.connectTLS(ctx, conn)
+		c := d.TLS
+		// If no ServerName is set, infer the ServerName
+		// from the hostname we're connecting to.
+		if c.ServerName == "" {
+			c = d.TLS.Clone()
+			// Copied from tls.go in the standard library.
+			colonPos := strings.LastIndex(address, ":")
+			if colonPos == -1 {
+				colonPos = len(address)
+			}
+			hostname := address[:colonPos]
+			c.ServerName = hostname
+		}
+		return d.connectTLS(ctx, conn, c)
 	}
 
 	return conn, nil
@@ -307,14 +415,6 @@ func LookupPartitions(ctx context.Contex
 	return DefaultDialer.LookupPartitions(ctx, network, address, topic)
 }
 
-// The Resolver interface is used as an abstraction to provide service discovery
-// of the hosts of a kafka cluster.
-type Resolver interface {
-	// LookupHost looks up the given host using the local resolver.
-	// It returns a slice of that host's addresses.
-	LookupHost(ctx context.Context, host string) (addrs []string, err error)
-}
-
 func sleep(ctx context.Context, duration time.Duration) bool {
 	if duration == 0 {
 		select {
@@ -342,10 +442,52 @@ func backoff(attempt int, min time.Durat
 	return d
 }
 
+func canonicalAddress(s string) string {
+	return net.JoinHostPort(splitHostPort(s))
+}
+
 func splitHostPort(s string) (host string, port string) {
 	host, port, _ = net.SplitHostPort(s)
 	if len(host) == 0 && len(port) == 0 {
 		host = s
+		port = "9092"
 	}
 	return
 }
+
+func splitHostPortNumber(s string) (host string, portNumber int, err error) {
+	host, port := splitHostPort(s)
+	portNumber, err = strconv.Atoi(port)
+	if err != nil {
+		return host, 0, fmt.Errorf("%s: %w", s, err)
+	}
+	return host, portNumber, nil
+}
+
+func lookupHost(ctx context.Context, address string, resolver Resolver) (string, error) {
+	host, port := splitHostPort(address)
+
+	if resolver != nil {
+		resolved, err := resolver.LookupHost(ctx, host)
+		if err != nil {
+			return "", fmt.Errorf("failed to resolve host %s: %w", host, err)
+		}
+
+		// if the resolver doesn't return anything, we'll fall back on the provided
+		// address instead
+		if len(resolved) > 0 {
+			resolvedHost, resolvedPort := splitHostPort(resolved[0])
+
+			// we'll always prefer the resolved host
+			host = resolvedHost
+
+			// in the case of port though, the provided address takes priority, and we
+			// only use the resolved address to set the port when not specified
+			if port == "" {
+				port = resolvedPort
+			}
+		}
+	}
+
+	return net.JoinHostPort(host, port), nil
+}
diff -pruN 0.2.1-1.1/dialer_test.go 0.4.49+ds1-1/dialer_test.go
--- 0.2.1-1.1/dialer_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/dialer_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -4,6 +4,8 @@ import (
 	"context"
 	"crypto/tls"
 	"crypto/x509"
+	"errors"
+	"fmt"
 	"io"
 	"net"
 	"reflect"
@@ -26,8 +28,6 @@ func TestDialer(t *testing.T) {
 	for _, test := range tests {
 		testFunc := test.function
 		t.Run(test.scenario, func(t *testing.T) {
-			t.Parallel()
-
 			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
 			defer cancel()
 
@@ -37,20 +37,19 @@ func TestDialer(t *testing.T) {
 }
 
 func testDialerLookupPartitions(t *testing.T, ctx context.Context, d *Dialer) {
-	const topic = "test-dialer-LookupPartitions"
-
-	createTopic(t, topic, 1)
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
 
 	// Write a message to ensure the partition gets created.
-	w := NewWriter(WriterConfig{
-		Brokers: []string{"localhost:9092"},
-		Topic:   topic,
-	})
+	w := &Writer{
+		Addr:      TCP("localhost:9092"),
+		Topic:     topic,
+		Transport: client.Transport,
+	}
 	w.WriteMessages(ctx, Message{})
 	w.Close()
 
 	partitions, err := d.LookupPartitions(ctx, "tcp", "localhost:9092", topic)
-
 	if err != nil {
 		t.Error(err)
 		return
@@ -62,11 +61,12 @@ func testDialerLookupPartitions(t *testi
 
 	want := []Partition{
 		{
-			Topic:    "test-dialer-LookupPartitions",
-			Leader:   Broker{Host: "localhost", Port: 9092, ID: 1},
-			Replicas: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
-			Isr:      []Broker{{Host: "localhost", Port: 9092, ID: 1}},
-			ID:       0,
+			Topic:           topic,
+			Leader:          Broker{Host: "localhost", Port: 9092, ID: 1},
+			Replicas:        []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+			Isr:             []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+			OfflineReplicas: []Broker{},
+			ID:              0,
 		},
 	}
 	if !reflect.DeepEqual(partitions, want) {
@@ -171,17 +171,15 @@ wE3YmpC3Q0g9r44nEbz4Bw==
 }
 
 func TestDialerTLS(t *testing.T) {
-	t.Parallel()
-
-	const topic = "test-dialer-LookupPartitions"
-
-	createTopic(t, topic, 1)
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
 
 	// Write a message to ensure the partition gets created.
-	w := NewWriter(WriterConfig{
-		Brokers: []string{"localhost:9092"},
-		Topic:   topic,
-	})
+	w := &Writer{
+		Addr:      TCP("localhost:9092"),
+		Topic:     topic,
+		Transport: client.Transport,
+	}
 	w.WriteMessages(context.Background(), Message{})
 	w.Close()
 
@@ -233,11 +231,12 @@ func TestDialerTLS(t *testing.T) {
 
 	want := []Partition{
 		{
-			Topic:    topic,
-			Leader:   Broker{Host: "localhost", Port: 9092, ID: 1},
-			Replicas: []Broker{{Host: "localhost", Port: 9092, ID: 1}},
-			Isr:      []Broker{{Host: "localhost", Port: 9092, ID: 1}},
-			ID:       0,
+			Topic:           topic,
+			Leader:          Broker{Host: "localhost", Port: 9092, ID: 1},
+			Replicas:        []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+			Isr:             []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+			OfflineReplicas: []Broker{},
+			ID:              0,
 		},
 	}
 	if !reflect.DeepEqual(partitions, want) {
@@ -297,9 +296,112 @@ func TestDialerConnectTLSHonorsContext(t
 	ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*25)
 	defer cancel()
 
-	_, err := d.connectTLS(ctx, conn)
-	if context.DeadlineExceeded != err {
+	_, err := d.connectTLS(ctx, conn, d.TLS)
+	if !errors.Is(err, context.DeadlineExceeded) {
 		t.Errorf("expected err to be %v; got %v", context.DeadlineExceeded, err)
 		t.FailNow()
 	}
 }
+
+func TestDialerResolver(t *testing.T) {
+	ctx := context.TODO()
+
+	tests := []struct {
+		scenario string
+		address  string
+		resolver map[string][]string
+	}{
+		{
+			scenario: "resolve domain to ip",
+			address:  "example.com",
+			resolver: map[string][]string{
+				"example.com": {"127.0.0.1"},
+			},
+		},
+		{
+			scenario: "resolve domain to ip and port",
+			address:  "example.com",
+			resolver: map[string][]string{
+				"example.com": {"127.0.0.1:9092"},
+			},
+		},
+		{
+			scenario: "resolve domain with port to ip",
+			address:  "example.com:9092",
+			resolver: map[string][]string{
+				"example.com": {"127.0.0.1:9092"},
+			},
+		},
+		{
+			scenario: "resolve domain with port to ip with different port",
+			address:  "example.com:9092",
+			resolver: map[string][]string{
+				"example.com": {"127.0.0.1:80"},
+			},
+		},
+		{
+			scenario: "resolve domain with port to ip",
+			address:  "example.com:9092",
+			resolver: map[string][]string{
+				"example.com": {"127.0.0.1"},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		t.Run(test.scenario, func(t *testing.T) {
+			topic := makeTopic()
+			createTopic(t, topic, 1)
+			defer deleteTopic(t, topic)
+
+			d := Dialer{
+				Resolver: &mockResolver{addrs: test.resolver},
+			}
+
+			// Write a message to ensure the partition gets created.
+			w := NewWriter(WriterConfig{
+				Brokers: []string{"localhost:9092"},
+				Topic:   topic,
+				Dialer:  &d,
+			})
+			w.WriteMessages(context.Background(), Message{})
+			w.Close()
+
+			partitions, err := d.LookupPartitions(ctx, "tcp", test.address, topic)
+			if err != nil {
+				t.Error(err)
+				return
+			}
+
+			sort.Slice(partitions, func(i int, j int) bool {
+				return partitions[i].ID < partitions[j].ID
+			})
+
+			want := []Partition{
+				{
+					Topic:           topic,
+					Leader:          Broker{Host: "localhost", Port: 9092, ID: 1},
+					Replicas:        []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+					Isr:             []Broker{{Host: "localhost", Port: 9092, ID: 1}},
+					OfflineReplicas: []Broker{},
+					ID:              0,
+				},
+			}
+			if !reflect.DeepEqual(partitions, want) {
+				t.Errorf("bad partitions:\ngot:  %+v\nwant: %+v", partitions, want)
+			}
+		})
+	}
+}
+
+type mockResolver struct {
+	addrs map[string][]string
+}
+
+func (mr *mockResolver) LookupHost(ctx context.Context, host string) ([]string, error) {
+	if addrs, ok := mr.addrs[host]; !ok {
+		return nil, fmt.Errorf("unrecognized host %s", host)
+	} else {
+		return addrs, nil
+	}
+}
diff -pruN 0.2.1-1.1/discard.go 0.4.49+ds1-1/discard.go
--- 0.2.1-1.1/discard.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/discard.go	2025-08-21 19:15:53.000000000 +0000
@@ -15,22 +15,10 @@ func discardN(r *bufio.Reader, sz int, n
 	return sz - n, err
 }
 
-func discardInt8(r *bufio.Reader, sz int) (int, error) {
-	return discardN(r, sz, 1)
-}
-
-func discardInt16(r *bufio.Reader, sz int) (int, error) {
-	return discardN(r, sz, 2)
-}
-
 func discardInt32(r *bufio.Reader, sz int) (int, error) {
 	return discardN(r, sz, 4)
 }
 
-func discardInt64(r *bufio.Reader, sz int) (int, error) {
-	return discardN(r, sz, 8)
-}
-
 func discardString(r *bufio.Reader, sz int) (int, error) {
 	return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) {
 		if n < 0 {
diff -pruN 0.2.1-1.1/discard_test.go 0.4.49+ds1-1/discard_test.go
--- 0.2.1-1.1/discard_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/discard_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,6 +3,7 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"io"
 	"testing"
 )
@@ -52,7 +53,7 @@ func TestDiscardN(t *testing.T) {
 			scenario: "discard more than available",
 			function: func(t *testing.T, r *bufio.Reader, sz int) {
 				remain, err := discardN(r, sz, sz+1)
-				if err != errShortRead {
+				if !errors.Is(err, errShortRead) {
 					t.Errorf("Expected errShortRead, got %v", err)
 				}
 				if remain != 0 {
@@ -64,7 +65,7 @@ func TestDiscardN(t *testing.T) {
 			scenario: "discard returns error",
 			function: func(t *testing.T, r *bufio.Reader, sz int) {
 				remain, err := discardN(r, sz+2, sz+1)
-				if err != io.EOF {
+				if !errors.Is(err, io.EOF) {
 					t.Errorf("Expected EOF, got %v", err)
 				}
 				if remain != 2 {
@@ -76,7 +77,7 @@ func TestDiscardN(t *testing.T) {
 			scenario: "errShortRead doesn't mask error",
 			function: func(t *testing.T, r *bufio.Reader, sz int) {
 				remain, err := discardN(r, sz+1, sz+2)
-				if err != io.EOF {
+				if !errors.Is(err, io.EOF) {
 					t.Errorf("Expected EOF, got %v", err)
 				}
 				if remain != 1 {
diff -pruN 0.2.1-1.1/docker-compose.yml 0.4.49+ds1-1/docker-compose.yml
--- 0.2.1-1.1/docker-compose.yml	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/docker-compose.yml	2025-08-21 19:15:53.000000000 +0000
@@ -1,24 +1,39 @@
-version: "3"
+# See https://hub.docker.com/r/bitnami/kafka/tags for the complete list.
+version: '3'
 services:
+  zookeeper:
+    container_name: zookeeper
+    hostname: zookeeper
+    image: bitnamilegacy/zookeeper:latest
+    ports:
+      - 2181:2181
+    environment:
+      ALLOW_ANONYMOUS_LOGIN: yes
   kafka:
-    image: wurstmeister/kafka:0.11.0.1
+    container_name: kafka
+    image: bitnamilegacy/kafka:3.7.0
     restart: on-failure:3
     links:
       - zookeeper
     ports:
-      - "9092:9092"
+      - 9092:9092
+      - 9093:9093
     environment:
-      KAFKA_VERSION: '0.11.0.1'
-      KAFKA_BROKER_ID: 1
-      KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_ADVERTISED_HOST_NAME: 'localhost'
-      KAFKA_ADVERTISED_PORT: '9092'
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
-      KAFKA_MESSAGE_MAX_BYTES: 200000000
-
-  zookeeper:
-    image: wurstmeister/zookeeper
-    ports:
-      - "2181:2181"
+      KAFKA_CFG_BROKER_ID: 1
+      KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost'
+      KAFKA_CFG_ADVERTISED_PORT: '9092'
+      KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
+      KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000'
+      KAFKA_CFG_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093'
+      KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512'
+      KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'kafka.security.authorizer.AclAuthorizer'
+      KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
+      KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf"
+      ALLOW_PLAINTEXT_LISTENER: yes
+    entrypoint:
+      - "/bin/bash"
+      - "-c"
+      - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n  };' > /opt/bitnami/kafka/config/kafka_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config "SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]" --entity-type users --entity-name adminscram; exec /entrypoint.sh /run.sh
diff -pruN 0.2.1-1.1/docker_compose_versions/README.md 0.4.49+ds1-1/docker_compose_versions/README.md
--- 0.2.1-1.1/docker_compose_versions/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/docker_compose_versions/README.md	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,152 @@
+# Bitnami Kafka
+
+This document outlines how to create a docker-compose file for a specific Bitnami Kafka version.
+
+
+## Steps to create docker-compose
+
+- Refer to [docker-hub Bitnami Kafka tags](https://hub.docker.com/r/bitnamilegacy/kafka/tags) and sort by NEWEST to locate the image preferred, for example: `2.7.0`
+- There is documentation in the (main branch)[https://github.com/bitnami/containers/blob/main/bitnami/kafka/README.md] for environment config setup information. Refer to the `Notable Changes` section.
+- Sometimes there is a need to understand how the set up is being done. To locate the appropriate Kafka release in the repo [bitnami/containers](https://github.com/bitnami/containers), go through the [kafka commit history](https://github.com/bitnami/containers/commits/main/bitnami/kafka).
+- Once a commit is located, Refer to README.md, Dockerfile, entrypoint and various init scripts to understand the environment variables to config server.properties mapping conventions. Alternatively, you can spin up the required Kafka image and refer the mapping inside the container.
+- Ensure you follow the environment variable conventions in your docker-compose. Without proper environment variables, the Kafka cluster cannot start or can start with undesired configs. For example, Since Kafka version 2.3, all server.properties docker-compose environment configs start with `KAFKA_CFG_<config_with_underscore>`
+- Older versions of Bitnami Kafka have different conventions and limited docker-compose environment variables exposed for configs needed in server.properties
+
+
+In kafka-go, for all the test cases to succeed, Kafka cluster should have following server.properties along with a relevant kafka_jaas.conf mentioned in the KAFKA_OPTS. Goal is to ensure that the docker-compose file generates below server.properties.
+
+
+server.properties
+```
+advertised.host.name=localhost
+advertised.listeners=PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093
+advertised.port=9092
+auto.create.topics.enable=true
+broker.id=1
+delete.topic.enable=true
+group.initial.rebalance.delay.ms=0
+listeners=PLAINTEXT://:9092,SASL_PLAINTEXT://:9093
+log.dirs=/kafka/kafka-logs-1d5951569d78
+log.retention.check.interval.ms=300000
+log.retention.hours=168
+log.segment.bytes=1073741824
+message.max.bytes=200000000
+num.io.threads=8
+num.network.threads=3
+num.partitions=1
+num.recovery.threads.per.data.dir=1
+offsets.topic.replication.factor=1
+port=9092
+sasl.enabled.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512
+socket.receive.buffer.bytes=102400
+socket.request.max.bytes=104857600
+socket.send.buffer.bytes=102400
+transaction.state.log.min.isr=1
+transaction.state.log.replication.factor=1
+zookeeper.connect=zookeeper:2181
+zookeeper.connection.timeout.ms=6000
+```
+
+
+## run docker-compose and test cases
+
+run docker-compose
+```
+# docker-compose -f ./docker_compose_versions/docker-compose-<kafka_version>.yml up -d
+```
+
+
+run test cases
+```
+# go clean -cache; KAFKA_SKIP_NETTEST=1 KAFKA_VERSION=<a.b.c> go test -race -cover ./...;
+```
+
+
+## Various Bitnami Kafka version issues observed in circleci
+
+
+### Kafka v101, v111, v201, v211 and v221
+
+
+In kafka-go repo, all the tests require sasl.enabled.mechanisms as PLAIN,SCRAM-SHA-256,SCRAM-SHA-512 for the Kafka cluster.
+
+
+It has been observed for Kafka v101, v111, v201, v211 and v221 which are used in the circleci for build have issues with SCRAM.
+
+
+There is no way to override the config sasl.enabled.mechanisms causing Kafka cluster to start up as PLAIN.
+
+
+There has been some attempts made to override sasl.enabled.mechanisms 
+- Modified entrypoint in docker-compose to append the server.properties with relevant configs sasl.enabled.mechanisms before running entrypoint.sh. This resulted in failures for Kafka v101, v111, v201, v211 and v221. Once Kafka server starts, server.properties gets appended with default value of sasl.enabled.mechanisms  there by cluster to start with out PLAIN,SCRAM-SHA-256,SCRAM-SHA-512
+- Mounted a docker-compose volume for server.propeties. However, This also resulted in failures for Kafka v101, v111, v201, v211 and v221. Once Kafka server starts, server.properties gets appended with default value of sasl.enabled.mechanisms there by cluster to start with out PLAIN,SCRAM-SHA-256,SCRAM-SHA-512
+
+
+NOTE: 
+- Kafka v101, v111, v201, v211 and v221 have no docker-compose files since we need SCRAM for kafka-go test cases to succeed. 
+- There is no Bitnami Kafka image for v222 hence testing has been performed on v221
+
+
+### Kafka v231
+
+In Bitnami Kafka v2.3, all server.properties docker-compose environment configs start with `KAFKA_CFG_<config_with_underscore>`. However, it is not picking the custom populated kafka_jaas.conf.
+
+
+After a lot of debugging, it has been noticed that there aren't enough privileges to create the kafka_jaas.conf. Hence the environment variables below need to be added in docker-compose to generate the kafka_jaas.conf. This issue is not noticed after kafka v2.3
+
+
+```
+KAFKA_INTER_BROKER_USER: adminplain
+KAFKA_INTER_BROKER_PASSWORD: admin-secret
+KAFKA_BROKER_USER: adminplain
+KAFKA_BROKER_PASSWORD: admin-secret
+```
+
+There is a docker-compose file `docker-compose-231.yml` in the folder `kafka-go/docker_compose_versions` for reference.
+
+
+## References
+
+
+For user reference, please find the some of the older kafka versions commits from the [kafka commit history](https://github.com/bitnami/containers/commits/main/bitnami/kafka). For Kafka versions with no commit history, data is populated with the latest version available for the tag.
+
+
+### Kafka v010: docker-compose reference: `kafka-go/docker_compose_versions/docker-compose-010.yml`
+- [tag](https://hub.docker.com/r/bitnamilegacy/kafka/tags?page=1&ordering=last_updated&name=0.10.2.1)
+- [kafka commit](https://github.com/bitnami/containers/tree/c4240f0525916a418245c7ef46d9534a7a212c92/bitnami/kafka)
+
+
+### Kafka v011: docker-compose reference: `kafka-go/docker_compose_versions/docker-compose-011.yml`
+- [tag](https://hub.docker.com/r/bitnamilegacy/kafka/tags?page=1&ordering=last_updated&name=0.11.0)
+- [kafka commit](https://github.com/bitnami/containers/tree/7724adf655e4ca9aac69d606d41ad329ef31eeca/bitnami/kafka)
+
+
+### Kafka v101: docker-compose reference: N/A
+- [tag](https://hub.docker.com/r/bitnamilegacy/kafka/tags?page=1&ordering=last_updated&name=1.0.1)
+- [kafka commit](https://github.com/bitnami/containers/tree/44cc8f4c43ead6edebd3758c8df878f4f9da82c2/bitnami/kafka)
+
+
+### Kafka v111: docker-compose reference: N/A
+- [tag](https://hub.docker.com/r/bitnamilegacy/kafka/tags?page=1&ordering=last_updated&name=1.1.1)
+- [kafka commit](https://github.com/bitnami/containers/tree/cb593dc98c2eb7a39f2792641e741d395dbe50e7/bitnami/kafka)
+
+
+### Kafka v201: docker-compose reference: N/A
+- [tag](https://hub.docker.com/r/bitnamilegacy/kafka/tags?page=1&ordering=last_updated&name=2.0.1)
+- [kafka commit](https://github.com/bitnami/containers/tree/9ff8763df265c87c8b59f8d7ff0cf69299d636c9/bitnami/kafka)
+
+
+### Kafka v211: docker-compose reference: N/A
+- [tag](https://hub.docker.com/r/bitnamilegacy/kafka/tags?page=1&ordering=last_updated&name=2.1.1)
+- [kafka commit](https://github.com/bitnami/containers/tree/d3a9d40afc2b7e7de53486538a63084c1a565d43/bitnami/kafka)
+
+
+### Kafka v221: docker-compose reference: N/A
+- [tag](https://hub.docker.com/r/bitnamilegacy/kafka/tags?page=1&ordering=last_updated&name=2.2.1)
+- [kafka commit](https://github.com/bitnami/containers/tree/f132ef830d1ba9b78392ec4619174b4640c276c9/bitnami/kafka)
+
+
+### Kafka v231: docker-compose reference: `kafka-go/docker_compose_versions/docker-compose-231.yml`
+- [tag](https://hub.docker.com/r/bitnamilegacy/kafka/tags?page=1&ordering=last_updated&name=2.3.1)
+- [kafka commit](https://github.com/bitnami/containers/tree/ae572036b5281456b0086345fec0bdb74f7cf3a3/bitnami/kafka)
+
diff -pruN 0.2.1-1.1/docker_compose_versions/docker-compose-270.yml 0.4.49+ds1-1/docker_compose_versions/docker-compose-270.yml
--- 0.2.1-1.1/docker_compose_versions/docker-compose-270.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/docker_compose_versions/docker-compose-270.yml	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,39 @@
+# See https://hub.docker.com/r/bitnamilegacy/kafka/tags for the complete list.
+version: '3'
+services:
+  zookeeper:
+    container_name: zookeeper
+    hostname: zookeeper
+    image: bitnamilegacy/zookeeper:latest
+    ports:
+      - 2181:2181
+    environment:
+      ALLOW_ANONYMOUS_LOGIN: yes
+  kafka:
+    container_name: kafka
+    image: bitnamilegacy/kafka:2.7.0
+    restart: on-failure:3
+    links:
+      - zookeeper
+    ports:
+      - 9092:9092
+      - 9093:9093
+    environment:
+      KAFKA_CFG_BROKER_ID: 1
+      KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost'
+      KAFKA_CFG_ADVERTISED_PORT: '9092'
+      KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
+      KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000'
+      KAFKA_CFG_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093'
+      KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512'
+      KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'kafka.security.auth.SimpleAclAuthorizer'
+      KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
+      KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf"
+      ALLOW_PLAINTEXT_LISTENER: yes
+    entrypoint:
+      - "/bin/bash"
+      - "-c"
+      - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n  };' > /opt/bitnami/kafka/config/kafka_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config "SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]" --entity-type users --entity-name adminscram; exec /entrypoint.sh /run.sh
diff -pruN 0.2.1-1.1/docker_compose_versions/docker-compose-370.yml 0.4.49+ds1-1/docker_compose_versions/docker-compose-370.yml
--- 0.2.1-1.1/docker_compose_versions/docker-compose-370.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/docker_compose_versions/docker-compose-370.yml	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,39 @@
+# See https://hub.docker.com/r/bitnamilegacy/kafka/tags for the complete list.
+version: '3'
+services:
+  zookeeper:
+    container_name: zookeeper
+    hostname: zookeeper
+    image: bitnamilegacy/zookeeper:latest
+    ports:
+      - 2181:2181
+    environment:
+      ALLOW_ANONYMOUS_LOGIN: yes
+  kafka:
+    container_name: kafka
+    image: bitnamilegacy/kafka:3.7.0
+    restart: on-failure:3
+    links:
+      - zookeeper
+    ports:
+      - 9092:9092
+      - 9093:9093
+    environment:
+      KAFKA_CFG_BROKER_ID: 1
+      KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost'
+      KAFKA_CFG_ADVERTISED_PORT: '9092'
+      KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
+      KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000'
+      KAFKA_CFG_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093'
+      KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512'
+      KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'kafka.security.authorizer.AclAuthorizer'
+      KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
+      KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf"
+      ALLOW_PLAINTEXT_LISTENER: yes
+    entrypoint:
+      - "/bin/bash"
+      - "-c"
+      - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n  };' > /opt/bitnami/kafka/config/kafka_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config "SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]" --entity-type users --entity-name adminscram; exec /entrypoint.sh /run.sh
diff -pruN 0.2.1-1.1/docker_compose_versions/docker-compose-400.yml 0.4.49+ds1-1/docker_compose_versions/docker-compose-400.yml
--- 0.2.1-1.1/docker_compose_versions/docker-compose-400.yml	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/docker_compose_versions/docker-compose-400.yml	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,40 @@
+# See https://hub.docker.com/r/bitnamilegacy/kafka/tags for the complete list.
+version: '3'
+services:
+  kafka:
+    container_name: kafka
+    image: bitnamilegacy/kafka:4.0.0
+    restart: on-failure:3
+    ports:
+      - 9092:9092
+      - 9093:9093
+    environment:
+      KAFKA_CFG_NODE_ID: 1
+      KAFKA_CFG_BROKER_ID: 1
+      KAFKA_CFG_PROCESS_ROLES: broker,controller
+      KAFKA_CFG_ADVERTISED_HOST_NAME: 'localhost'
+      KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
+      KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAIN:PLAINTEXT,SASL:SASL_PLAINTEXT
+      KAFKA_CFG_LISTENERS: CONTROLLER://:9094,PLAIN://:9092,SASL://:9093
+      KAFKA_CFG_ADVERTISED_LISTENERS: PLAIN://localhost:9092,SASL://localhost:9093
+      KAFKA_CFG_INTER_BROKER_LISTENER_NAME: PLAIN
+      KAFKA_CFG_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512'
+      KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 1@localhost:9094
+      ALLOW_PLAINTEXT_LISTENER: yes
+      KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
+      KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf"
+      KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'true'
+      KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_CFG_MESSAGE_MAX_BYTES: '200000000'
+      KAFKA_CFG_AUTHORIZER_CLASS_NAME: 'org.apache.kafka.metadata.authorizer.StandardAuthorizer'
+      KAFKA_CFG_SUPER_USERS: User:adminscram256;User:adminscram512;User:adminplain
+      KAFKA_CLIENT_USERS: adminscram256,adminscram512,adminplain
+      KAFKA_CLIENT_PASSWORDS: admin-secret-256,admin-secret-512,admin-secret
+      KAFKA_CLIENT_SASL_MECHANISMS: SCRAM-SHA-256,SCRAM-SHA-512,PLAIN
+      KAFKA_INTER_BROKER_USER: adminscram512
+      KAFKA_INTER_BROKER_PASSWORD: admin-secret-512
+      KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL: SCRAM-SHA-512
+      # Note you will need to increase this to at least 4GB of memory for the tests to pass
+      # https://github.com/segmentio/kafka-go/issues/1360#issuecomment-2858935900
+      KAFKA_HEAP_OPTS: '-Xmx1000m -Xms1000m'
+      KAFKA_JVM_OPTS: '-XX:+UseG1GC'
\ No newline at end of file
diff -pruN 0.2.1-1.1/electleaders.go 0.4.49+ds1-1/electleaders.go
--- 0.2.1-1.1/electleaders.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/electleaders.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,89 @@
+package kafka
+
+import (
+	"context"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/electleaders"
+)
+
+// ElectLeadersRequest is a request to the ElectLeaders API.
+type ElectLeadersRequest struct {
+	// Addr is the address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// Topic is the name of the topic to do the leader elections in.
+	Topic string
+
+	// Partitions is the list of partitions to run leader elections for.
+	Partitions []int
+
+	// Timeout is the amount of time to wait for the election to run.
+	Timeout time.Duration
+}
+
+// ElectLeadersResponse is a response from the ElectLeaders API.
+type ElectLeadersResponse struct {
+	// ErrorCode is set to a non-nil value if a top-level error occurred.
+	Error error
+
+	// PartitionResults contains the results for each partition leader election.
+	PartitionResults []ElectLeadersResponsePartitionResult
+}
+
+// ElectLeadersResponsePartitionResult contains the response details for a single partition.
+type ElectLeadersResponsePartitionResult struct {
+	// Partition is the ID of the partition.
+	Partition int
+
+	// Error is set to a non-nil value if an error occurred electing leaders
+	// for this partition.
+	Error error
+}
+
+func (c *Client) ElectLeaders(
+	ctx context.Context,
+	req *ElectLeadersRequest,
+) (*ElectLeadersResponse, error) {
+	partitions32 := []int32{}
+	for _, partition := range req.Partitions {
+		partitions32 = append(partitions32, int32(partition))
+	}
+
+	protoResp, err := c.roundTrip(
+		ctx,
+		req.Addr,
+		&electleaders.Request{
+			TopicPartitions: []electleaders.RequestTopicPartitions{
+				{
+					Topic:        req.Topic,
+					PartitionIDs: partitions32,
+				},
+			},
+			TimeoutMs: int32(req.Timeout.Milliseconds()),
+		},
+	)
+	if err != nil {
+		return nil, err
+	}
+	apiResp := protoResp.(*electleaders.Response)
+
+	resp := &ElectLeadersResponse{
+		Error: makeError(apiResp.ErrorCode, ""),
+	}
+
+	for _, topicResult := range apiResp.ReplicaElectionResults {
+		for _, partitionResult := range topicResult.PartitionResults {
+			resp.PartitionResults = append(
+				resp.PartitionResults,
+				ElectLeadersResponsePartitionResult{
+					Partition: int(partitionResult.PartitionID),
+					Error:     makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage),
+				},
+			)
+		}
+	}
+
+	return resp, nil
+}
diff -pruN 0.2.1-1.1/electleaders_test.go 0.4.49+ds1-1/electleaders_test.go
--- 0.2.1-1.1/electleaders_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/electleaders_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,49 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientElectLeaders(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.4.0") {
+		return
+	}
+
+	ctx := context.Background()
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 2)
+	defer deleteTopic(t, topic)
+
+	// Local kafka only has 1 broker, so leader elections are no-ops.
+	resp, err := client.ElectLeaders(
+		ctx,
+		&ElectLeadersRequest{
+			Topic:      topic,
+			Partitions: []int{0, 1},
+		},
+	)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	if resp.Error != nil {
+		t.Error(
+			"Unexpected error in response",
+			"expected", nil,
+			"got", resp.Error,
+		)
+	}
+	if len(resp.PartitionResults) != 2 {
+		t.Error(
+			"Unexpected length of partition results",
+			"expected", 2,
+			"got", len(resp.PartitionResults),
+		)
+	}
+}
diff -pruN 0.2.1-1.1/endtxn.go 0.4.49+ds1-1/endtxn.go
--- 0.2.1-1.1/endtxn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/endtxn.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,61 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/endtxn"
+)
+
+// EndTxnRequest represets a request sent to a kafka broker to end a transaction.
+type EndTxnRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The transactional id key.
+	TransactionalID string
+
+	// The Producer ID (PID) for the current producer session
+	ProducerID int
+
+	// The epoch associated with the current producer session for the given PID
+	ProducerEpoch int
+
+	// Committed should be set to true if the transaction was committed, false otherwise.
+	Committed bool
+}
+
+// EndTxnResponse represents a resposne from a kafka broker to an end transaction request.
+type EndTxnResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Error is non-nil if an error occureda and contains the kafka error code.
+	// Programs may use the standard errors.Is function to test the error
+	// against kafka error codes.
+	Error error
+}
+
+// EndTxn sends an EndTxn request to a kafka broker and returns its response.
+func (c *Client) EndTxn(ctx context.Context, req *EndTxnRequest) (*EndTxnResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &endtxn.Request{
+		TransactionalID: req.TransactionalID,
+		ProducerID:      int64(req.ProducerID),
+		ProducerEpoch:   int16(req.ProducerEpoch),
+		Committed:       req.Committed,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).EndTxn: %w", err)
+	}
+
+	r := m.(*endtxn.Response)
+
+	res := &EndTxnResponse{
+		Throttle: makeDuration(r.ThrottleTimeMs),
+		Error:    makeError(r.ErrorCode, ""),
+	}
+
+	return res, nil
+}
diff -pruN 0.2.1-1.1/error.go 0.4.49+ds1-1/error.go
--- 0.2.1-1.1/error.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/error.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,11 +1,14 @@
 package kafka
 
 import (
+	"errors"
 	"fmt"
 	"io"
+	"syscall"
 )
 
 // Error represents the different error codes that may be returned by kafka.
+// https://kafka.apache.org/protocol#protocol_error_codes
 type Error int
 
 const (
@@ -22,6 +25,7 @@ const (
 	MessageSizeTooLarge                Error = 10
 	StaleControllerEpoch               Error = 11
 	OffsetMetadataTooLarge             Error = 12
+	NetworkException                   Error = 13
 	GroupLoadInProgress                Error = 14
 	GroupCoordinatorNotAvailable       Error = 15
 	NotCoordinatorForGroup             Error = 16
@@ -64,6 +68,57 @@ const (
 	TransactionalIDAuthorizationFailed Error = 53
 	SecurityDisabled                   Error = 54
 	BrokerAuthorizationFailed          Error = 55
+	KafkaStorageError                  Error = 56
+	LogDirNotFound                     Error = 57
+	SASLAuthenticationFailed           Error = 58
+	UnknownProducerId                  Error = 59
+	ReassignmentInProgress             Error = 60
+	DelegationTokenAuthDisabled        Error = 61
+	DelegationTokenNotFound            Error = 62
+	DelegationTokenOwnerMismatch       Error = 63
+	DelegationTokenRequestNotAllowed   Error = 64
+	DelegationTokenAuthorizationFailed Error = 65
+	DelegationTokenExpired             Error = 66
+	InvalidPrincipalType               Error = 67
+	NonEmptyGroup                      Error = 68
+	GroupIdNotFound                    Error = 69
+	FetchSessionIDNotFound             Error = 70
+	InvalidFetchSessionEpoch           Error = 71
+	ListenerNotFound                   Error = 72
+	TopicDeletionDisabled              Error = 73
+	FencedLeaderEpoch                  Error = 74
+	UnknownLeaderEpoch                 Error = 75
+	UnsupportedCompressionType         Error = 76
+	StaleBrokerEpoch                   Error = 77
+	OffsetNotAvailable                 Error = 78
+	MemberIDRequired                   Error = 79
+	PreferredLeaderNotAvailable        Error = 80
+	GroupMaxSizeReached                Error = 81
+	FencedInstanceID                   Error = 82
+	EligibleLeadersNotAvailable        Error = 83
+	ElectionNotNeeded                  Error = 84
+	NoReassignmentInProgress           Error = 85
+	GroupSubscribedToTopic             Error = 86
+	InvalidRecord                      Error = 87
+	UnstableOffsetCommit               Error = 88
+	ThrottlingQuotaExceeded            Error = 89
+	ProducerFenced                     Error = 90
+	ResourceNotFound                   Error = 91
+	DuplicateResource                  Error = 92
+	UnacceptableCredential             Error = 93
+	InconsistentVoterSet               Error = 94
+	InvalidUpdateVersion               Error = 95
+	FeatureUpdateFailed                Error = 96
+	PrincipalDeserializationFailure    Error = 97
+	SnapshotNotFound                   Error = 98
+	PositionOutOfRange                 Error = 99
+	UnknownTopicID                     Error = 100
+	DuplicateBrokerRegistration        Error = 101
+	BrokerIDNotRegistered              Error = 102
+	InconsistentTopicID                Error = 103
+	InconsistentClusterID              Error = 104
+	TransactionalIDNotFound            Error = 105
+	FetchSessionTopicIDError           Error = 106
 )
 
 // Error satisfies the error interface.
@@ -78,14 +133,43 @@ func (e Error) Timeout() bool {
 
 // Temporary returns true if the operation that generated the error may succeed
 // if retried at a later time.
+// Kafka error documentation specifies these as "retriable"
+// https://kafka.apache.org/protocol#protocol_error_codes
 func (e Error) Temporary() bool {
-	return e == LeaderNotAvailable ||
-		e == BrokerNotAvailable ||
-		e == ReplicaNotAvailable ||
-		e == GroupLoadInProgress ||
-		e == GroupCoordinatorNotAvailable ||
-		e == RebalanceInProgress ||
-		e.Timeout()
+	switch e {
+	case InvalidMessage,
+		UnknownTopicOrPartition,
+		LeaderNotAvailable,
+		NotLeaderForPartition,
+		RequestTimedOut,
+		NetworkException,
+		GroupLoadInProgress,
+		GroupCoordinatorNotAvailable,
+		NotCoordinatorForGroup,
+		NotEnoughReplicas,
+		NotEnoughReplicasAfterAppend,
+		NotController,
+		KafkaStorageError,
+		FetchSessionIDNotFound,
+		InvalidFetchSessionEpoch,
+		ListenerNotFound,
+		FencedLeaderEpoch,
+		UnknownLeaderEpoch,
+		OffsetNotAvailable,
+		PreferredLeaderNotAvailable,
+		EligibleLeadersNotAvailable,
+		ElectionNotNeeded,
+		NoReassignmentInProgress,
+		GroupSubscribedToTopic,
+		UnstableOffsetCommit,
+		ThrottlingQuotaExceeded,
+		UnknownTopicID,
+		InconsistentTopicID,
+		FetchSessionTopicIDError:
+		return true
+	default:
+		return false
+	}
 }
 
 // Title returns a human readable title for the error.
@@ -201,6 +285,100 @@ func (e Error) Title() string {
 		return "Security Disabled"
 	case BrokerAuthorizationFailed:
 		return "Broker Authorization Failed"
+	case KafkaStorageError:
+		return "Kafka Storage Error"
+	case LogDirNotFound:
+		return "Log Dir Not Found"
+	case SASLAuthenticationFailed:
+		return "SASL Authentication Failed"
+	case UnknownProducerId:
+		return "Unknown Producer ID"
+	case ReassignmentInProgress:
+		return "Reassignment In Progress"
+	case DelegationTokenAuthDisabled:
+		return "Delegation Token Auth Disabled"
+	case DelegationTokenNotFound:
+		return "Delegation Token Not Found"
+	case DelegationTokenOwnerMismatch:
+		return "Delegation Token Owner Mismatch"
+	case DelegationTokenRequestNotAllowed:
+		return "Delegation Token Request Not Allowed"
+	case DelegationTokenAuthorizationFailed:
+		return "Delegation Token Authorization Failed"
+	case DelegationTokenExpired:
+		return "Delegation Token Expired"
+	case InvalidPrincipalType:
+		return "Invalid Principal Type"
+	case NonEmptyGroup:
+		return "Non Empty Group"
+	case GroupIdNotFound:
+		return "Group ID Not Found"
+	case FetchSessionIDNotFound:
+		return "Fetch Session ID Not Found"
+	case InvalidFetchSessionEpoch:
+		return "Invalid Fetch Session Epoch"
+	case ListenerNotFound:
+		return "Listener Not Found"
+	case TopicDeletionDisabled:
+		return "Topic Deletion Disabled"
+	case FencedLeaderEpoch:
+		return "Fenced Leader Epoch"
+	case UnknownLeaderEpoch:
+		return "Unknown Leader Epoch"
+	case UnsupportedCompressionType:
+		return "Unsupported Compression Type"
+	case MemberIDRequired:
+		return "Member ID Required"
+	case FencedInstanceID:
+		return "Fenced Instance ID"
+	case EligibleLeadersNotAvailable:
+		return "Eligible Leader Not Available"
+	case ElectionNotNeeded:
+		return "Election Not Needed"
+	case NoReassignmentInProgress:
+		return "No Reassignment In Progress"
+	case GroupSubscribedToTopic:
+		return "Group Subscribed To Topic"
+	case InvalidRecord:
+		return "Invalid Record"
+	case UnstableOffsetCommit:
+		return "Unstable Offset Commit"
+	case ThrottlingQuotaExceeded:
+		return "Throttling Quota Exceeded"
+	case ProducerFenced:
+		return "Producer Fenced"
+	case ResourceNotFound:
+		return "Resource Not Found"
+	case DuplicateResource:
+		return "Duplicate Resource"
+	case UnacceptableCredential:
+		return "Unacceptable Credential"
+	case InconsistentVoterSet:
+		return "Inconsistent Voter Set"
+	case InvalidUpdateVersion:
+		return "Invalid Update Version"
+	case FeatureUpdateFailed:
+		return "Feature Update Failed"
+	case PrincipalDeserializationFailure:
+		return "Principal Deserialization Failure"
+	case SnapshotNotFound:
+		return "Snapshot Not Found"
+	case PositionOutOfRange:
+		return "Position Out Of Range"
+	case UnknownTopicID:
+		return "Unknown Topic ID"
+	case DuplicateBrokerRegistration:
+		return "Duplicate Broker Registration"
+	case BrokerIDNotRegistered:
+		return "Broker ID Not Registered"
+	case InconsistentTopicID:
+		return "Inconsistent Topic ID"
+	case InconsistentClusterID:
+		return "Inconsistent Cluster ID"
+	case TransactionalIDNotFound:
+		return "Transactional ID Not Found"
+	case FetchSessionTopicIDError:
+		return "Fetch Session Topic ID Error"
 	}
 	return ""
 }
@@ -318,34 +496,137 @@ func (e Error) Description() string {
 		return "the security features are disabled"
 	case BrokerAuthorizationFailed:
 		return "the broker authorization failed"
+	case KafkaStorageError:
+		return "disk error when trying to access log file on the disk"
+	case LogDirNotFound:
+		return "the user-specified log directory is not found in the broker config"
+	case SASLAuthenticationFailed:
+		return "SASL Authentication failed"
+	case UnknownProducerId:
+		return "the broker could not locate the producer metadata associated with the producer ID"
+	case ReassignmentInProgress:
+		return "a partition reassignment is in progress"
+	case DelegationTokenAuthDisabled:
+		return "delegation token feature is not enabled"
+	case DelegationTokenNotFound:
+		return "delegation token is not found on server"
+	case DelegationTokenOwnerMismatch:
+		return "specified principal is not valid owner/renewer"
+	case DelegationTokenRequestNotAllowed:
+		return "delegation token requests are not allowed on plaintext/1-way ssl channels and on delegation token authenticated channels"
+	case DelegationTokenAuthorizationFailed:
+		return "delegation token authorization failed"
+	case DelegationTokenExpired:
+		return "delegation token is expired"
+	case InvalidPrincipalType:
+		return "supplied principaltype is not supported"
+	case NonEmptyGroup:
+		return "the group is not empty"
+	case GroupIdNotFound:
+		return "the group ID does not exist"
+	case FetchSessionIDNotFound:
+		return "the fetch session ID was not found"
+	case InvalidFetchSessionEpoch:
+		return "the fetch session epoch is invalid"
+	case ListenerNotFound:
+		return "there is no listener on the leader broker that matches the listener on which metadata request was processed"
+	case TopicDeletionDisabled:
+		return "topic deletion is disabled"
+	case FencedLeaderEpoch:
+		return "the leader epoch in the request is older than the epoch on the broker"
+	case UnknownLeaderEpoch:
+		return "the leader epoch in the request is newer than the epoch on the broker"
+	case UnsupportedCompressionType:
+		return "the requesting client does not support the compression type of given partition"
+	case MemberIDRequired:
+		return "the group member needs to have a valid member id before actually entering a consumer group"
+	case FencedInstanceID:
+		return "the broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id"
+	case EligibleLeadersNotAvailable:
+		return "eligible topic partition leaders are not available"
+	case ElectionNotNeeded:
+		return "leader election not needed for topic partition"
+	case NoReassignmentInProgress:
+		return "no partition reassignment is in progress"
+	case GroupSubscribedToTopic:
+		return "deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it"
+	case InvalidRecord:
+		return "this record has failed the validation on broker and hence be rejected"
+	case UnstableOffsetCommit:
+		return "there are unstable offsets that need to be cleared"
+	case ThrottlingQuotaExceeded:
+		return "The throttling quota has been exceeded"
+	case ProducerFenced:
+		return "There is a newer producer with the same transactionalId which fences the current one"
+	case ResourceNotFound:
+		return "A request illegally referred to a resource that does not exist"
+	case DuplicateResource:
+		return "A request illegally referred to the same resource twice"
+	case UnacceptableCredential:
+		return "Requested credential would not meet criteria for acceptability"
+	case InconsistentVoterSet:
+		return "Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters"
+	case InvalidUpdateVersion:
+		return "The given update version was invalid"
+	case FeatureUpdateFailed:
+		return "Unable to update finalized features due to an unexpected server error"
+	case PrincipalDeserializationFailure:
+		return "Request principal deserialization failed during forwarding. This indicates an internal error on the broker cluster security setup"
+	case SnapshotNotFound:
+		return "Requested snapshot was not found"
+	case PositionOutOfRange:
+		return "Requested position is not greater than or equal to zero, and less than the size of the snapshot"
+	case UnknownTopicID:
+		return "This server does not host this topic ID"
+	case DuplicateBrokerRegistration:
+		return "This broker ID is already in use"
+	case BrokerIDNotRegistered:
+		return "The given broker ID was not registered"
+	case InconsistentTopicID:
+		return "The log's topic ID did not match the topic ID in the request"
+	case InconsistentClusterID:
+		return "The clusterId in the request does not match that found on the server"
+	case TransactionalIDNotFound:
+		return "The transactionalId could not be found"
+	case FetchSessionTopicIDError:
+		return "The fetch session encountered inconsistent topic ID usage"
 	}
 	return ""
 }
 
 func isTimeout(err error) bool {
-	e, ok := err.(interface {
-		Timeout() bool
-	})
-	return ok && e.Timeout()
+	var timeoutError interface{ Timeout() bool }
+	if errors.As(err, &timeoutError) {
+		return timeoutError.Timeout()
+	}
+	return false
 }
 
 func isTemporary(err error) bool {
-	e, ok := err.(interface {
-		Temporary() bool
-	})
-	return ok && e.Temporary()
+	var tempError interface{ Temporary() bool }
+	if errors.As(err, &tempError) {
+		return tempError.Temporary()
+	}
+	return false
+}
+
+func isTransientNetworkError(err error) bool {
+	return errors.Is(err, io.ErrUnexpectedEOF) ||
+		errors.Is(err, syscall.ECONNREFUSED) ||
+		errors.Is(err, syscall.ECONNRESET) ||
+		errors.Is(err, syscall.EPIPE)
 }
 
 func silentEOF(err error) error {
-	if err == io.EOF {
+	if errors.Is(err, io.EOF) {
 		err = nil
 	}
 	return err
 }
 
 func dontExpectEOF(err error) error {
-	if err == io.EOF {
-		err = io.ErrUnexpectedEOF
+	if errors.Is(err, io.EOF) {
+		return io.ErrUnexpectedEOF
 	}
 	return err
 }
@@ -358,3 +639,83 @@ func coalesceErrors(errs ...error) error
 	}
 	return nil
 }
+
+// MessageTooLargeError is returned when a message is too large to fit within the allowed size.
+type MessageTooLargeError struct {
+	Message   Message
+	Remaining []Message
+}
+
+func messageTooLarge(msgs []Message, i int) MessageTooLargeError {
+	remain := make([]Message, 0, len(msgs)-1)
+	remain = append(remain, msgs[:i]...)
+	remain = append(remain, msgs[i+1:]...)
+	return MessageTooLargeError{
+		Message:   msgs[i],
+		Remaining: remain,
+	}
+}
+
+func (e MessageTooLargeError) Error() string {
+	return MessageSizeTooLarge.Error()
+}
+
+func (e MessageTooLargeError) Unwrap() error {
+	return MessageSizeTooLarge
+}
+
+func makeError(code int16, message string) error {
+	if code == 0 {
+		return nil
+	}
+	if message == "" {
+		return Error(code)
+	}
+	return fmt.Errorf("%w: %s", Error(code), message)
+}
+
+// WriteError is returned by kafka.(*Writer).WriteMessages when the writer is
+// not configured to write messages asynchronously. WriteError values contain
+// a list of errors where each entry matches the position of a message in the
+// WriteMessages call. The program can determine the status of each message by
+// looping over the error:
+//
+//	switch err := w.WriteMessages(ctx, msgs...).(type) {
+//	case nil:
+//	case kafka.WriteErrors:
+//		for i := range msgs {
+//			if err[i] != nil {
+//				// handle the error writing msgs[i]
+//				...
+//			}
+//		}
+//	default:
+//		// handle other errors
+//		...
+//	}
+type WriteErrors []error
+
+// Count counts the number of non-nil errors in err.
+func (err WriteErrors) Count() int {
+	n := 0
+
+	for _, e := range err {
+		if e != nil {
+			n++
+		}
+	}
+
+	return n
+}
+
+func (err WriteErrors) Error() string {
+	errCount := err.Count()
+	errors := make([]string, 0, errCount)
+	for _, writeError := range err {
+		if writeError == nil {
+			continue
+		}
+		errors = append(errors, writeError.Error())
+	}
+	return fmt.Sprintf("Kafka write errors (%d/%d), errors: %v", errCount, len(err), errors)
+}
diff -pruN 0.2.1-1.1/error_test.go 0.4.49+ds1-1/error_test.go
--- 0.2.1-1.1/error_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/error_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,11 +3,11 @@ package kafka
 import (
 	"fmt"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
 )
 
 func TestError(t *testing.T) {
-	t.Parallel()
-
 	errorCodes := []Error{
 		Unknown,
 		OffsetOutOfRange,
@@ -64,6 +64,27 @@ func TestError(t *testing.T) {
 		TransactionalIDAuthorizationFailed,
 		SecurityDisabled,
 		BrokerAuthorizationFailed,
+		KafkaStorageError,
+		LogDirNotFound,
+		SASLAuthenticationFailed,
+		UnknownProducerId,
+		ReassignmentInProgress,
+		DelegationTokenAuthDisabled,
+		DelegationTokenNotFound,
+		DelegationTokenOwnerMismatch,
+		DelegationTokenRequestNotAllowed,
+		DelegationTokenAuthorizationFailed,
+		DelegationTokenExpired,
+		InvalidPrincipalType,
+		NonEmptyGroup,
+		GroupIdNotFound,
+		FetchSessionIDNotFound,
+		InvalidFetchSessionEpoch,
+		ListenerNotFound,
+		TopicDeletionDisabled,
+		FencedLeaderEpoch,
+		UnknownLeaderEpoch,
+		UnsupportedCompressionType,
 	}
 
 	for _, err := range errorCodes {
@@ -91,4 +112,16 @@ func TestError(t *testing.T) {
 			t.Error("non-empty description:", s)
 		}
 	})
+
+	t.Run("MessageTooLargeError error.Is satisfaction", func(t *testing.T) {
+		err := MessageSizeTooLarge
+		msg := []Message{
+			{Key: []byte("key"), Value: []byte("value")},
+			{Key: []byte("key"), Value: make([]byte, 8)},
+		}
+		msgTooLarge := messageTooLarge(msg, 1)
+		assert.NotErrorIs(t, err, msgTooLarge)
+		assert.Contains(t, msgTooLarge.Error(), MessageSizeTooLarge.Error())
+		assert.ErrorIs(t, msgTooLarge, MessageSizeTooLarge)
+	})
 }
diff -pruN 0.2.1-1.1/example_consumergroup_test.go 0.4.49+ds1-1/example_consumergroup_test.go
--- 0.2.1-1.1/example_consumergroup_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/example_consumergroup_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,94 @@
+package kafka_test
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"os"
+
+	"github.com/segmentio/kafka-go"
+)
+
+func ExampleGeneration_Start_consumerGroupParallelReaders() {
+	group, err := kafka.NewConsumerGroup(kafka.ConsumerGroupConfig{
+		ID:      "my-group",
+		Brokers: []string{"kafka:9092"},
+		Topics:  []string{"my-topic"},
+	})
+	if err != nil {
+		fmt.Printf("error creating consumer group: %+v\n", err)
+		os.Exit(1)
+	}
+	defer group.Close()
+
+	for {
+		gen, err := group.Next(context.TODO())
+		if err != nil {
+			break
+		}
+
+		assignments := gen.Assignments["my-topic"]
+		for _, assignment := range assignments {
+			partition, offset := assignment.ID, assignment.Offset
+			gen.Start(func(ctx context.Context) {
+				// create reader for this partition.
+				reader := kafka.NewReader(kafka.ReaderConfig{
+					Brokers:   []string{"127.0.0.1:9092"},
+					Topic:     "my-topic",
+					Partition: partition,
+				})
+				defer reader.Close()
+
+				// seek to the last committed offset for this partition.
+				reader.SetOffset(offset)
+				for {
+					msg, err := reader.ReadMessage(ctx)
+					if err != nil {
+						if errors.Is(err, kafka.ErrGenerationEnded) {
+							// generation has ended.  commit offsets.  in a real app,
+							// offsets would be committed periodically.
+							gen.CommitOffsets(map[string]map[int]int64{"my-topic": {partition: offset + 1}})
+							return
+						}
+
+						fmt.Printf("error reading message: %+v\n", err)
+						return
+					}
+
+					fmt.Printf("received message %s/%d/%d : %s\n", msg.Topic, msg.Partition, msg.Offset, string(msg.Value))
+					offset = msg.Offset
+				}
+			})
+		}
+	}
+}
+
+func ExampleGeneration_CommitOffsets_overwriteOffsets() {
+	group, err := kafka.NewConsumerGroup(kafka.ConsumerGroupConfig{
+		ID:      "my-group",
+		Brokers: []string{"kafka:9092"},
+		Topics:  []string{"my-topic"},
+	})
+	if err != nil {
+		fmt.Printf("error creating consumer group: %+v\n", err)
+		os.Exit(1)
+	}
+	defer group.Close()
+
+	gen, err := group.Next(context.TODO())
+	if err != nil {
+		fmt.Printf("error getting next generation: %+v\n", err)
+		os.Exit(1)
+	}
+	err = gen.CommitOffsets(map[string]map[int]int64{
+		"my-topic": {
+			0: 123,
+			1: 456,
+			3: 789,
+		},
+	})
+	if err != nil {
+		fmt.Printf("error committing offsets next generation: %+v\n", err)
+		os.Exit(1)
+	}
+}
diff -pruN 0.2.1-1.1/example_groupbalancer_test.go 0.4.49+ds1-1/example_groupbalancer_test.go
--- 0.2.1-1.1/example_groupbalancer_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/example_groupbalancer_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,121 @@
+package kafka
+
+import (
+	"context"
+	"encoding/json"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+	"time"
+)
+
+// ExampleNewReader_rackAffinity shows how the RackAffinityGroupBalancer can be
+// used to pair up consumers with brokers in the same AWS availability zone.
+// This code assumes that each brokers' rack is configured to be the name of the
+// AZ in which it is running.
+func ExampleNewReader_rackAffinity() {
+	r := NewReader(ReaderConfig{
+		Brokers: []string{"kafka:9092"},
+		GroupID: "my-group",
+		Topic:   "my-topic",
+		GroupBalancers: []GroupBalancer{
+			RackAffinityGroupBalancer{Rack: findRack()},
+			RangeGroupBalancer{},
+		},
+	})
+
+	r.ReadMessage(context.Background())
+
+	r.Close()
+}
+
+// findRack is the basic rack resolver strategy for use in AWS.  It supports
+//  * ECS with the task metadata endpoint enabled (returns the container
+//    instance's availability zone)
+//  * Linux EC2 (returns the instance's availability zone)
+func findRack() string {
+	switch whereAmI() {
+	case "ecs":
+		return ecsAvailabilityZone()
+	case "ec2":
+		return ec2AvailabilityZone()
+	}
+	return ""
+}
+
+const ecsContainerMetadataURI = "ECS_CONTAINER_METADATA_URI"
+
+// whereAmI determines which strategy the rack resolver should use.
+func whereAmI() string {
+	// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html
+	if os.Getenv(ecsContainerMetadataURI) != "" {
+		return "ecs"
+	}
+	// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html
+	for _, path := range [...]string{
+		"/sys/devices/virtual/dmi/id/product_uuid",
+		"/sys/hypervisor/uuid",
+	} {
+		b, err := ioutil.ReadFile(path)
+		if err != nil {
+			continue
+		}
+		s := string(b)
+		switch {
+		case strings.HasPrefix(s, "EC2"), strings.HasPrefix(s, "ec2"):
+			return "ec2"
+		}
+	}
+	return "somewhere"
+}
+
+// ecsAvailabilityZone queries the task endpoint for the metadata URI that ECS
+// injects into the ECS_CONTAINER_METADATA_URI variable in order to retrieve
+// the availability zone where the task is running.
+func ecsAvailabilityZone() string {
+	client := http.Client{
+		Timeout: time.Second,
+		Transport: &http.Transport{
+			DisableCompression: true,
+			DisableKeepAlives:  true,
+		},
+	}
+	r, err := client.Get(os.Getenv(ecsContainerMetadataURI) + "/task")
+	if err != nil {
+		return ""
+	}
+	defer r.Body.Close()
+
+	var md struct {
+		AvailabilityZone string
+	}
+	if err := json.NewDecoder(r.Body).Decode(&md); err != nil {
+		return ""
+	}
+	return md.AvailabilityZone
+}
+
+// ec2AvailabilityZone queries the metadata endpoint to discover the
+// availability zone where this code is running.  we avoid calling this function
+// unless we know we're in EC2.  Otherwise, in other environments, we would need
+// to wait for the request to 169.254.169.254 to timeout before proceeding.
+func ec2AvailabilityZone() string {
+	client := http.Client{
+		Timeout: time.Second,
+		Transport: &http.Transport{
+			DisableCompression: true,
+			DisableKeepAlives:  true,
+		},
+	}
+	r, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone")
+	if err != nil {
+		return ""
+	}
+	defer r.Body.Close()
+	b, err := ioutil.ReadAll(r.Body)
+	if err != nil {
+		return ""
+	}
+	return string(b)
+}
diff -pruN 0.2.1-1.1/example_writer_test.go 0.4.49+ds1-1/example_writer_test.go
--- 0.2.1-1.1/example_writer_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/example_writer_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -7,10 +7,10 @@ import (
 )
 
 func ExampleWriter() {
-	w := kafka.NewWriter(kafka.WriterConfig{
-		Brokers: []string{"localhost:9092"},
-		Topic:   "Topic-1",
-	})
+	w := &kafka.Writer{
+		Addr:  kafka.TCP("localhost:9092"),
+		Topic: "Topic-1",
+	}
 
 	w.WriteMessages(context.Background(),
 		kafka.Message{
diff -pruN 0.2.1-1.1/examples/.gitignore 0.4.49+ds1-1/examples/.gitignore
--- 0.2.1-1.1/examples/.gitignore	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/.gitignore	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+kafka-go
\ No newline at end of file
diff -pruN 0.2.1-1.1/examples/consumer-logger/Dockerfile 0.4.49+ds1-1/examples/consumer-logger/Dockerfile
--- 0.2.1-1.1/examples/consumer-logger/Dockerfile	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/consumer-logger/Dockerfile	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,31 @@
+#####################################
+#   STEP 1 build executable binary  #
+#####################################
+FROM golang:alpine AS builder
+
+# Install git.
+# Git is required for fetching the dependencies.
+RUN apk update && apk add --no-cache git
+
+WORKDIR /app
+
+COPY go.mod .
+COPY go.sum .
+
+RUN go mod download
+
+COPY . .
+
+# Build the binary.
+RUN CGO_ENABLED=0 GOOS=linux go build -o main
+
+#####################################
+#   STEP 2 build a small image      #
+#####################################
+FROM scratch
+
+# Copy our static executable.
+COPY --from=builder /app/main /app/main
+
+# Run the hello binary.
+ENTRYPOINT ["/app/main"]
\ No newline at end of file
diff -pruN 0.2.1-1.1/examples/consumer-logger/go.mod 0.4.49+ds1-1/examples/consumer-logger/go.mod
--- 0.2.1-1.1/examples/consumer-logger/go.mod	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/consumer-logger/go.mod	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,8 @@
+module github.com/segmentio/kafka-go/example/consumer-logger
+
+go 1.15
+
+require (
+	github.com/klauspost/compress v1.12.2 // indirect
+	github.com/segmentio/kafka-go v0.4.28
+)
diff -pruN 0.2.1-1.1/examples/consumer-logger/go.sum 0.4.49+ds1-1/examples/consumer-logger/go.sum
--- 0.2.1-1.1/examples/consumer-logger/go.sum	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/consumer-logger/go.sum	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,47 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
+github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
+github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/segmentio/kafka-go v0.4.28 h1:ATYbyenAlsoFxnV+VpIJMF87bvRuRsX7fezHNfpwkdM=
+github.com/segmentio/kafka-go v0.4.28/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284 h1:rlLehGeYg6jfoyz/eDqDU1iRXLKfR42nnNh57ytKEWo=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff -pruN 0.2.1-1.1/examples/consumer-logger/main.go 0.4.49+ds1-1/examples/consumer-logger/main.go
--- 0.2.1-1.1/examples/consumer-logger/main.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/consumer-logger/main.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,42 @@
+package main
+
+import (
+	"context"
+	"fmt"
+	"log"
+	"os"
+	"strings"
+
+	kafka "github.com/segmentio/kafka-go"
+)
+
+func getKafkaReader(kafkaURL, topic, groupID string) *kafka.Reader {
+	brokers := strings.Split(kafkaURL, ",")
+	return kafka.NewReader(kafka.ReaderConfig{
+		Brokers:  brokers,
+		GroupID:  groupID,
+		Topic:    topic,
+		MinBytes: 10e3, // 10KB
+		MaxBytes: 10e6, // 10MB
+	})
+}
+
+func main() {
+	// get kafka reader using environment variables.
+	kafkaURL := os.Getenv("kafkaURL")
+	topic := os.Getenv("topic")
+	groupID := os.Getenv("groupID")
+
+	reader := getKafkaReader(kafkaURL, topic, groupID)
+
+	defer reader.Close()
+
+	fmt.Println("start consuming ... !!")
+	for {
+		m, err := reader.ReadMessage(context.Background())
+		if err != nil {
+			log.Fatalln(err)
+		}
+		fmt.Printf("message at topic:%v partition:%v offset:%v	%s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))
+	}
+}
diff -pruN 0.2.1-1.1/examples/consumer-mongo-db/Dockerfile 0.4.49+ds1-1/examples/consumer-mongo-db/Dockerfile
--- 0.2.1-1.1/examples/consumer-mongo-db/Dockerfile	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/consumer-mongo-db/Dockerfile	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,31 @@
+#####################################
+#   STEP 1 build executable binary  #
+#####################################
+FROM golang:alpine AS builder
+
+# Install git.
+# Git is required for fetching the dependencies.
+RUN apk update && apk add --no-cache git
+
+WORKDIR /app
+
+COPY go.mod .
+COPY go.sum .
+
+RUN go mod download
+
+COPY . .
+
+# Build the binary.
+RUN CGO_ENABLED=0 GOOS=linux go build -o main
+
+#####################################
+#   STEP 2 build a small image      #
+#####################################
+FROM scratch
+
+# Copy our static executable.
+COPY --from=builder /app/main /app/main
+
+# Run the hello binary.
+ENTRYPOINT ["/app/main"]
\ No newline at end of file
diff -pruN 0.2.1-1.1/examples/consumer-mongo-db/go.mod 0.4.49+ds1-1/examples/consumer-mongo-db/go.mod
--- 0.2.1-1.1/examples/consumer-mongo-db/go.mod	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/consumer-mongo-db/go.mod	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,12 @@
+module github.com/segmentio/kafka-go/example/consumer-mongo-db
+
+go 1.15
+
+require (
+	github.com/go-stack/stack v1.8.0 // indirect
+	github.com/klauspost/compress v1.12.2 // indirect
+	github.com/mongodb/mongo-go-driver v0.3.0
+	github.com/segmentio/kafka-go v0.4.28
+	github.com/tidwall/pretty v1.1.0 // indirect
+	golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
+)
diff -pruN 0.2.1-1.1/examples/consumer-mongo-db/go.sum 0.4.49+ds1-1/examples/consumer-mongo-db/go.sum
--- 0.2.1-1.1/examples/consumer-mongo-db/go.sum	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/consumer-mongo-db/go.sum	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,55 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
+github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mongodb/mongo-go-driver v0.3.0 h1:00tKWMrabkVU1e57/TTP4ZBIfhn/wmjlSiRnIM9d0T8=
+github.com/mongodb/mongo-go-driver v0.3.0/go.mod h1:NK/HWDIIZkaYsnYa0hmtP443T5ELr0KDecmIioVuuyU=
+github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
+github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/segmentio/kafka-go v0.4.28 h1:ATYbyenAlsoFxnV+VpIJMF87bvRuRsX7fezHNfpwkdM=
+github.com/segmentio/kafka-go v0.4.28/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8=
+github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284 h1:rlLehGeYg6jfoyz/eDqDU1iRXLKfR42nnNh57ytKEWo=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff -pruN 0.2.1-1.1/examples/consumer-mongo-db/main.go 0.4.49+ds1-1/examples/consumer-mongo-db/main.go
--- 0.2.1-1.1/examples/consumer-mongo-db/main.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/consumer-mongo-db/main.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,72 @@
+package main
+
+import (
+	"context"
+	"fmt"
+	"log"
+	"os"
+
+	"github.com/mongodb/mongo-go-driver/mongo"
+	kafka "github.com/segmentio/kafka-go"
+)
+
+func getMongoCollection(mongoURL, dbName, collectionName string) *mongo.Collection {
+	client, err := mongo.Connect(context.Background(), mongoURL)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// Check the connection
+	err = client.Ping(context.Background(), nil)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	fmt.Println("Connected to MongoDB ... !!")
+
+	db := client.Database(dbName)
+	collection := db.Collection(collectionName)
+	return collection
+}
+
+func getKafkaReader(kafkaURL, topic, groupID string) *kafka.Reader {
+	return kafka.NewReader(kafka.ReaderConfig{
+		Brokers:  []string{kafkaURL},
+		GroupID:  groupID,
+		Topic:    topic,
+		MinBytes: 10e3, // 10KB
+		MaxBytes: 10e6, // 10MB
+	})
+}
+
+func main() {
+
+	// get Mongo db Collection using environment variables.
+	mongoURL := os.Getenv("mongoURL")
+	dbName := os.Getenv("dbName")
+	collectionName := os.Getenv("collectionName")
+	collection := getMongoCollection(mongoURL, dbName, collectionName)
+
+	// get kafka reader using environment variables.
+	kafkaURL := os.Getenv("kafkaURL")
+	topic := os.Getenv("topic")
+	groupID := os.Getenv("groupID")
+	reader := getKafkaReader(kafkaURL, topic, groupID)
+
+	defer reader.Close()
+
+	fmt.Println("start consuming ... !!")
+
+	for {
+		msg, err := reader.ReadMessage(context.Background())
+		if err != nil {
+			log.Fatal(err)
+		}
+		insertResult, err := collection.InsertOne(context.Background(), msg)
+		if err != nil {
+			log.Fatal(err)
+		}
+
+		fmt.Println("Inserted a single document: ", insertResult.InsertedID)
+	}
+}
diff -pruN 0.2.1-1.1/examples/docker-compose.yaml 0.4.49+ds1-1/examples/docker-compose.yaml
--- 0.2.1-1.1/examples/docker-compose.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/docker-compose.yaml	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,91 @@
+version: '2.3'
+services:
+
+  zookeeper:
+    hostname: zookeeper
+    image: bitnamilegacy/zookeeper:latest
+    restart: always
+    expose:
+    - "2181"
+    ports:
+    - "2181:2181"
+    environment:
+      ALLOW_ANONYMOUS_LOGIN: yes
+  
+  kafka:
+    hostname: kafka
+    image: bitnamilegacy/kafka:2.7.0
+    restart: always
+    env_file:
+    - kafka/kafka-variables.env
+    depends_on:
+    - zookeeper
+    expose:
+    - "9092"
+    - "8082"
+    - "8083"
+    ports:
+    - '9092:9092'
+    - '8082:8082'
+    - '8083:8083'
+
+  mongo-db:
+    image: mongo:4.0
+    restart: always
+    expose:
+    - "27017"
+    ports:
+    - "27017:27017"
+    environment:
+      MONGO_DATA_DIR: /data/db
+      MONGO_LOG_DIR: /dev/null
+
+  consumer-mongo-db:
+    build:
+      context: consumer-mongo-db
+    environment:
+      mongoURL: mongodb://mongo-db:27017
+      dbName: example_db
+      collectionName: example_coll
+      kafkaURL: kafka:9092
+      topic: topic1
+      groupID: mongo-group
+    depends_on: 
+    - kafka
+    - mongo-db
+    restart: always
+
+  consumer-logger:
+    build:
+      context: consumer-logger
+    environment:
+      kafkaURL: kafka:9092
+      topic: topic1
+      groupID: logger-group
+    depends_on: 
+    - kafka
+    restart: always
+
+  producer-random:
+    build:
+      context: producer-random
+    environment:
+      kafkaURL: kafka:9092
+      topic: topic1
+    depends_on: 
+    - kafka
+    restart: always
+
+  producer-api:
+    build:
+      context: producer-api
+    environment:
+      kafkaURL: kafka:9092
+      topic: topic1
+    expose:
+    - "8080"
+    ports:
+    - "8080:8080"
+    depends_on: 
+    - kafka
+    restart: always
diff -pruN 0.2.1-1.1/examples/kafka/kafka-variables.env 0.4.49+ds1-1/examples/kafka/kafka-variables.env
--- 0.2.1-1.1/examples/kafka/kafka-variables.env	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/kafka/kafka-variables.env	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,23 @@
+
+KAFKA_CFG_ADVERTISED_HOST_NAME=kafka
+KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
+KAFKA_CFG_CONNECT_BOOTSTRAP_SERVERS=localhost:9092
+
+KAFKA_CFG_CONNECT_REST_PORT=8082
+KAFKA_CFG_CONNECT_REST_ADVERTISED_HOST_NAME="localhost"
+
+KAFKA_CFG_CONNECT_KEY_CONVERTER="org.apache.kafka.connect.json.JsonConverter"
+KAFKA_CFG_CONNECT_VALUE_CONVERTER="org.apache.kafka.connect.json.JsonConverter"
+KAFKA_CFG_CONNECT_KEY_CONVERTER_SCHEMAS_ENABLE=0
+KAFKA_CFG_CONNECT_VALUE_CONVERTER_SCHEMAS_ENABLE=0
+
+KAFKA_CFG_CONNECT_INTERNAL_KEY_CONVERTER="org.apache.kafka.connect.json.JsonConverter"
+KAFKA_CFG_CONNECT_INTERNAL_VALUE_CONVERTER="org.apache.kafka.connect.json.JsonConverter"
+KAFKA_CFG_CONNECT_INTERNAL_KEY_CONVERTER_SCHEMAS_ENABLE=0
+KAFKA_CFG_CONNECT_INTERNAL_VALUE_CONVERTER_SCHEMAS_ENABLE=0
+
+KAFKA_CFG_CONNECT_OFFSET_STORAGE_FILE_FILENAME="/tmp/connect.offsets"
+# Flush much faster than normal, which is useful for testing/debugging
+KAFKA_CFG_CONNECT_OFFSET_FLUSH_INTERVAL_MS=10000
+
+ALLOW_PLAINTEXT_LISTENER: yes
diff -pruN 0.2.1-1.1/examples/producer-api/Dockerfile 0.4.49+ds1-1/examples/producer-api/Dockerfile
--- 0.2.1-1.1/examples/producer-api/Dockerfile	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-api/Dockerfile	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,31 @@
+#####################################
+#   STEP 1 build executable binary  #
+#####################################
+FROM golang:alpine AS builder
+
+# Install git.
+# Git is required for fetching the dependencies.
+RUN apk update && apk add --no-cache git
+
+WORKDIR /app
+
+COPY go.mod .
+COPY go.sum .
+
+RUN go mod download
+
+COPY . .
+
+# Build the binary.
+RUN CGO_ENABLED=0 GOOS=linux go build -o main
+
+#####################################
+#   STEP 2 build a small image      #
+#####################################
+FROM scratch
+
+# Copy our static executable.
+COPY --from=builder /app/main /app/main
+
+# Run the hello binary.
+ENTRYPOINT ["/app/main"]
\ No newline at end of file
diff -pruN 0.2.1-1.1/examples/producer-api/go.mod 0.4.49+ds1-1/examples/producer-api/go.mod
--- 0.2.1-1.1/examples/producer-api/go.mod	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-api/go.mod	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,8 @@
+module github.com/segmentio/kafka-go/example/producer-api
+
+go 1.15
+
+require (
+	github.com/klauspost/compress v1.12.2 // indirect
+	github.com/segmentio/kafka-go v0.4.28
+)
diff -pruN 0.2.1-1.1/examples/producer-api/go.sum 0.4.49+ds1-1/examples/producer-api/go.sum
--- 0.2.1-1.1/examples/producer-api/go.sum	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-api/go.sum	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,47 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
+github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
+github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/segmentio/kafka-go v0.4.28 h1:ATYbyenAlsoFxnV+VpIJMF87bvRuRsX7fezHNfpwkdM=
+github.com/segmentio/kafka-go v0.4.28/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284 h1:rlLehGeYg6jfoyz/eDqDU1iRXLKfR42nnNh57ytKEWo=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff -pruN 0.2.1-1.1/examples/producer-api/main.go 0.4.49+ds1-1/examples/producer-api/main.go
--- 0.2.1-1.1/examples/producer-api/main.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-api/main.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,54 @@
+package main
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"os"
+
+	kafka "github.com/segmentio/kafka-go"
+)
+
+func producerHandler(kafkaWriter *kafka.Writer) func(http.ResponseWriter, *http.Request) {
+	return http.HandlerFunc(func(wrt http.ResponseWriter, req *http.Request) {
+		body, err := ioutil.ReadAll(req.Body)
+		if err != nil {
+			log.Fatalln(err)
+		}
+		msg := kafka.Message{
+			Key:   []byte(fmt.Sprintf("address-%s", req.RemoteAddr)),
+			Value: body,
+		}
+		err = kafkaWriter.WriteMessages(req.Context(), msg)
+
+		if err != nil {
+			wrt.Write([]byte(err.Error()))
+			log.Fatalln(err)
+		}
+	})
+}
+
+func getKafkaWriter(kafkaURL, topic string) *kafka.Writer {
+	return &kafka.Writer{
+		Addr:     kafka.TCP(kafkaURL),
+		Topic:    topic,
+		Balancer: &kafka.LeastBytes{},
+	}
+}
+
+func main() {
+	// get kafka writer using environment variables.
+	kafkaURL := os.Getenv("kafkaURL")
+	topic := os.Getenv("topic")
+	kafkaWriter := getKafkaWriter(kafkaURL, topic)
+
+	defer kafkaWriter.Close()
+
+	// Add handle func for producer.
+	http.HandleFunc("/", producerHandler(kafkaWriter))
+
+	// Run the web server.
+	fmt.Println("start producer-api ... !!")
+	log.Fatal(http.ListenAndServe(":8080", nil))
+}
diff -pruN 0.2.1-1.1/examples/producer-api/test.http 0.4.49+ds1-1/examples/producer-api/test.http
--- 0.2.1-1.1/examples/producer-api/test.http	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-api/test.http	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,13 @@
+### send data text
+POST http://localhost:8080
+Content-Type: text/plain
+
+"Hello-api"
+
+### send data json
+POST http://localhost:8080
+Content-Type: application/json
+
+{
+    "data":"Hello-api"
+}
\ No newline at end of file
diff -pruN 0.2.1-1.1/examples/producer-random/Dockerfile 0.4.49+ds1-1/examples/producer-random/Dockerfile
--- 0.2.1-1.1/examples/producer-random/Dockerfile	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-random/Dockerfile	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,31 @@
+#####################################
+#   STEP 1 build executable binary  #
+#####################################
+FROM golang:alpine AS builder
+
+# Install git.
+# Git is required for fetching the dependencies.
+RUN apk update && apk add --no-cache git
+
+WORKDIR /app
+
+COPY go.mod .
+COPY go.sum .
+
+RUN go mod download
+
+COPY . .
+
+# Build the binary.
+RUN CGO_ENABLED=0 GOOS=linux go build -o main
+
+#####################################
+#   STEP 2 build a small image      #
+#####################################
+FROM scratch
+
+# Copy our static executable.
+COPY --from=builder /app/main /app/main
+
+# Run the hello binary.
+ENTRYPOINT ["/app/main"]
\ No newline at end of file
diff -pruN 0.2.1-1.1/examples/producer-random/go.mod 0.4.49+ds1-1/examples/producer-random/go.mod
--- 0.2.1-1.1/examples/producer-random/go.mod	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-random/go.mod	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,10 @@
+module github.com/segmentio/kafka-go/example/producer-random
+
+go 1.15
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/google/uuid v1.1.0
+	github.com/klauspost/compress v1.12.2 // indirect
+	github.com/segmentio/kafka-go v0.4.28
+)
diff -pruN 0.2.1-1.1/examples/producer-random/go.sum 0.4.49+ds1-1/examples/producer-random/go.sum
--- 0.2.1-1.1/examples/producer-random/go.sum	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-random/go.sum	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,50 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/uuid v1.1.0 h1:Jf4mxPC/ziBnoPIdpQdPJ9OeiomAUHLvxmPRSPH9m4s=
+github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
+github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
+github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/segmentio/kafka-go v0.4.28 h1:ATYbyenAlsoFxnV+VpIJMF87bvRuRsX7fezHNfpwkdM=
+github.com/segmentio/kafka-go v0.4.28/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284 h1:rlLehGeYg6jfoyz/eDqDU1iRXLKfR42nnNh57ytKEWo=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff -pruN 0.2.1-1.1/examples/producer-random/main.go 0.4.49+ds1-1/examples/producer-random/main.go
--- 0.2.1-1.1/examples/producer-random/main.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/examples/producer-random/main.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,42 @@
+package main
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"time"
+
+	"github.com/google/uuid"
+	kafka "github.com/segmentio/kafka-go"
+)
+
+func newKafkaWriter(kafkaURL, topic string) *kafka.Writer {
+	return &kafka.Writer{
+		Addr:     kafka.TCP(kafkaURL),
+		Topic:    topic,
+		Balancer: &kafka.LeastBytes{},
+	}
+}
+
+func main() {
+	// get kafka writer using environment variables.
+	kafkaURL := os.Getenv("kafkaURL")
+	topic := os.Getenv("topic")
+	writer := newKafkaWriter(kafkaURL, topic)
+	defer writer.Close()
+	fmt.Println("start producing ... !!")
+	for i := 0; ; i++ {
+		key := fmt.Sprintf("Key-%d", i)
+		msg := kafka.Message{
+			Key:   []byte(key),
+			Value: []byte(fmt.Sprint(uuid.New())),
+		}
+		err := writer.WriteMessages(context.Background(), msg)
+		if err != nil {
+			fmt.Println(err)
+		} else {
+			fmt.Println("produced", key)
+		}
+		time.Sleep(1 * time.Second)
+	}
+}
diff -pruN 0.2.1-1.1/export_test.go 0.4.49+ds1-1/export_test.go
--- 0.2.1-1.1/export_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/export_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,9 +0,0 @@
-package kafka
-
-import "testing"
-
-func CreateTopic(t *testing.T, partitions int) string {
-	topic := makeTopic()
-	createTopic(t, topic, partitions)
-	return topic
-}
diff -pruN 0.2.1-1.1/fetch.go 0.4.49+ds1-1/fetch.go
--- 0.2.1-1.1/fetch.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/fetch.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,194 @@
 package kafka
 
-import "bufio"
+import (
+	"context"
+	"fmt"
+	"math"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	fetchAPI "github.com/segmentio/kafka-go/protocol/fetch"
+)
+
+// FetchRequest represents a request sent to a kafka broker to retrieve records
+// from a topic partition.
+type FetchRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// Topic, partition, and offset to retrieve records from. The offset may be
+	// one of the special FirstOffset or LastOffset constants, in which case the
+	// request will automatically discover the first or last offset of the
+	// partition and submit the request for these.
+	Topic     string
+	Partition int
+	Offset    int64
+
+	// Size and time limits of the response returned by the broker.
+	MinBytes int64
+	MaxBytes int64
+	MaxWait  time.Duration
+
+	// The isolation level for the request.
+	//
+	// Defaults to ReadUncommitted.
+	//
+	// This field requires the kafka broker to support the Fetch API in version
+	// 4 or above (otherwise the value is ignored).
+	IsolationLevel IsolationLevel
+}
+
+// FetchResponse represents a response from a kafka broker to a fetch request.
+type FetchResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// The topic and partition that the response came for (will match the values
+	// in the request).
+	Topic     string
+	Partition int
+
+	// Information about the topic partition layout returned from the broker.
+	//
+	// LastStableOffset requires the kafka broker to support the Fetch API in
+	// version 4 or above (otherwise the value is zero).
+	//
+	/// LogStartOffset requires the kafka broker to support the Fetch API in
+	// version 5 or above (otherwise the value is zero).
+	HighWatermark    int64
+	LastStableOffset int64
+	LogStartOffset   int64
+
+	// An error that may have occurred while attempting to fetch the records.
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker. Programs may use the standard errors.Is
+	// function to test the error against kafka error codes.
+	Error error
+
+	// The set of records returned in the response.
+	//
+	// The program is expected to call the RecordSet's Close method when it
+	// finished reading the records.
+	//
+	// Note that kafka may return record batches that start at an offset before
+	// the one that was requested. It is the program's responsibility to skip
+	// the offsets that it is not interested in.
+	Records RecordReader
+}
+
+// Fetch sends a fetch request to a kafka broker and returns the response.
+//
+// If the broker returned an invalid response with no topics, an error wrapping
+// protocol.ErrNoTopic is returned.
+//
+// If the broker returned an invalid response with no partitions, an error
+// wrapping ErrNoPartitions is returned.
+func (c *Client) Fetch(ctx context.Context, req *FetchRequest) (*FetchResponse, error) {
+	timeout := c.timeout(ctx, math.MaxInt64)
+	maxWait := req.maxWait()
+
+	if maxWait < timeout {
+		timeout = maxWait
+	}
+
+	offset := req.Offset
+	switch offset {
+	case FirstOffset, LastOffset:
+		topic, partition := req.Topic, req.Partition
+
+		r, err := c.ListOffsets(ctx, &ListOffsetsRequest{
+			Addr: req.Addr,
+			Topics: map[string][]OffsetRequest{
+				topic: {{
+					Partition: partition,
+					Timestamp: offset,
+				}},
+			},
+		})
+		if err != nil {
+			return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err)
+		}
+
+		for _, p := range r.Topics[topic] {
+			if p.Partition == partition {
+				if p.Error != nil {
+					return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", p.Error)
+				}
+				switch offset {
+				case FirstOffset:
+					offset = p.FirstOffset
+				case LastOffset:
+					offset = p.LastOffset
+				}
+				break
+			}
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &fetchAPI.Request{
+		ReplicaID:      -1,
+		MaxWaitTime:    milliseconds(timeout),
+		MinBytes:       int32(req.MinBytes),
+		MaxBytes:       int32(req.MaxBytes),
+		IsolationLevel: int8(req.IsolationLevel),
+		SessionID:      -1,
+		SessionEpoch:   -1,
+		Topics: []fetchAPI.RequestTopic{{
+			Topic: req.Topic,
+			Partitions: []fetchAPI.RequestPartition{{
+				Partition:          int32(req.Partition),
+				CurrentLeaderEpoch: -1,
+				FetchOffset:        offset,
+				LogStartOffset:     -1,
+				PartitionMaxBytes:  int32(req.MaxBytes),
+			}},
+		}},
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err)
+	}
+
+	res := m.(*fetchAPI.Response)
+	if len(res.Topics) == 0 {
+		return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoTopic)
+	}
+	topic := &res.Topics[0]
+	if len(topic.Partitions) == 0 {
+		return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoPartition)
+	}
+	partition := &topic.Partitions[0]
+
+	ret := &FetchResponse{
+		Throttle:         makeDuration(res.ThrottleTimeMs),
+		Topic:            topic.Topic,
+		Partition:        int(partition.Partition),
+		Error:            makeError(res.ErrorCode, ""),
+		HighWatermark:    partition.HighWatermark,
+		LastStableOffset: partition.LastStableOffset,
+		LogStartOffset:   partition.LogStartOffset,
+		Records:          partition.RecordSet.Records,
+	}
+
+	if partition.ErrorCode != 0 {
+		ret.Error = makeError(partition.ErrorCode, "")
+	}
+
+	if ret.Records == nil {
+		ret.Records = NewRecordReader()
+	}
+
+	return ret, nil
+}
+
+func (req *FetchRequest) maxWait() time.Duration {
+	if req.MaxWait > 0 {
+		return req.MaxWait
+	}
+	return defaultMaxWait
+}
 
 type fetchRequestV2 struct {
 	ReplicaID   int32
@@ -13,11 +201,11 @@ func (r fetchRequestV2) size() int32 {
 	return 4 + 4 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
 }
 
-func (r fetchRequestV2) writeTo(w *bufio.Writer) {
-	writeInt32(w, r.ReplicaID)
-	writeInt32(w, r.MaxWaitTime)
-	writeInt32(w, r.MinBytes)
-	writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) })
+func (r fetchRequestV2) writeTo(wb *writeBuffer) {
+	wb.writeInt32(r.ReplicaID)
+	wb.writeInt32(r.MaxWaitTime)
+	wb.writeInt32(r.MinBytes)
+	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
 }
 
 type fetchRequestTopicV2 struct {
@@ -30,9 +218,9 @@ func (t fetchRequestTopicV2) size() int3
 		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
 }
 
-func (t fetchRequestTopicV2) writeTo(w *bufio.Writer) {
-	writeString(w, t.TopicName)
-	writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) })
+func (t fetchRequestTopicV2) writeTo(wb *writeBuffer) {
+	wb.writeString(t.TopicName)
+	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
 }
 
 type fetchRequestPartitionV2 struct {
@@ -45,10 +233,10 @@ func (p fetchRequestPartitionV2) size()
 	return 4 + 8 + 4
 }
 
-func (p fetchRequestPartitionV2) writeTo(w *bufio.Writer) {
-	writeInt32(w, p.Partition)
-	writeInt64(w, p.FetchOffset)
-	writeInt32(w, p.MaxBytes)
+func (p fetchRequestPartitionV2) writeTo(wb *writeBuffer) {
+	wb.writeInt32(p.Partition)
+	wb.writeInt64(p.FetchOffset)
+	wb.writeInt32(p.MaxBytes)
 }
 
 type fetchResponseV2 struct {
@@ -60,9 +248,9 @@ func (r fetchResponseV2) size() int32 {
 	return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
 }
 
-func (r fetchResponseV2) writeTo(w *bufio.Writer) {
-	writeInt32(w, r.ThrottleTime)
-	writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) })
+func (r fetchResponseV2) writeTo(wb *writeBuffer) {
+	wb.writeInt32(r.ThrottleTime)
+	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
 }
 
 type fetchResponseTopicV2 struct {
@@ -75,9 +263,9 @@ func (t fetchResponseTopicV2) size() int
 		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
 }
 
-func (t fetchResponseTopicV2) writeTo(w *bufio.Writer) {
-	writeString(w, t.TopicName)
-	writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) })
+func (t fetchResponseTopicV2) writeTo(wb *writeBuffer) {
+	wb.writeString(t.TopicName)
+	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
 }
 
 type fetchResponsePartitionV2 struct {
@@ -92,10 +280,10 @@ func (p fetchResponsePartitionV2) size()
 	return 4 + 2 + 8 + 4 + p.MessageSet.size()
 }
 
-func (p fetchResponsePartitionV2) writeTo(w *bufio.Writer) {
-	writeInt32(w, p.Partition)
-	writeInt16(w, p.ErrorCode)
-	writeInt64(w, p.HighwaterMarkOffset)
-	writeInt32(w, p.MessageSetSize)
-	p.MessageSet.writeTo(w)
+func (p fetchResponsePartitionV2) writeTo(wb *writeBuffer) {
+	wb.writeInt32(p.Partition)
+	wb.writeInt16(p.ErrorCode)
+	wb.writeInt64(p.HighwaterMarkOffset)
+	wb.writeInt32(p.MessageSetSize)
+	p.MessageSet.writeTo(wb)
 }
diff -pruN 0.2.1-1.1/fetch_test.go 0.4.49+ds1-1/fetch_test.go
--- 0.2.1-1.1/fetch_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fetch_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,285 @@
+package kafka
+
+import (
+	"context"
+	"errors"
+	"io"
+	"io/ioutil"
+	"net"
+	"reflect"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/compress"
+)
+
+func produceRecords(t *testing.T, n int, addr net.Addr, topic string, compression compress.Codec) []Record {
+	conn, err := (&Dialer{
+		Resolver: &net.Resolver{},
+	}).DialLeader(context.Background(), addr.Network(), addr.String(), topic, 0)
+
+	if err != nil {
+		t.Fatal("failed to open a new kafka connection:", err)
+	}
+	defer conn.Close()
+
+	msgs := makeTestSequence(n)
+	if compression == nil {
+		_, err = conn.WriteMessages(msgs...)
+	} else {
+		_, err = conn.WriteCompressedMessages(compression, msgs...)
+	}
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	records := make([]Record, len(msgs))
+	for offset, msg := range msgs {
+		records[offset] = Record{
+			Offset:  int64(offset),
+			Key:     NewBytes(msg.Key),
+			Value:   NewBytes(msg.Value),
+			Headers: msg.Headers,
+		}
+	}
+
+	return records
+}
+
+func TestClientFetch(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	records := produceRecords(t, 10, client.Addr, topic, nil)
+
+	res, err := client.Fetch(context.Background(), &FetchRequest{
+		Topic:     topic,
+		Partition: 0,
+		Offset:    0,
+		MinBytes:  1,
+		MaxBytes:  64 * 1024,
+		MaxWait:   100 * time.Millisecond,
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertFetchResponse(t, res, &FetchResponse{
+		Topic:         topic,
+		Partition:     0,
+		HighWatermark: 10,
+		Records:       NewRecordReader(records...),
+	})
+}
+
+func TestClientFetchCompressed(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	records := produceRecords(t, 10, client.Addr, topic, &compress.GzipCodec)
+
+	res, err := client.Fetch(context.Background(), &FetchRequest{
+		Topic:     topic,
+		Partition: 0,
+		Offset:    0,
+		MinBytes:  1,
+		MaxBytes:  64 * 1024,
+		MaxWait:   100 * time.Millisecond,
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertFetchResponse(t, res, &FetchResponse{
+		Topic:         topic,
+		Partition:     0,
+		HighWatermark: 10,
+		Records:       NewRecordReader(records...),
+	})
+}
+
+func assertFetchResponse(t *testing.T, found, expected *FetchResponse) {
+	t.Helper()
+
+	if found.Topic != expected.Topic {
+		t.Error("invalid topic found in response:", found.Topic)
+	}
+
+	if found.Partition != expected.Partition {
+		t.Error("invalid partition found in response:", found.Partition)
+	}
+
+	if found.HighWatermark != expected.HighWatermark {
+		t.Error("invalid high watermark found in response:", found.HighWatermark)
+	}
+
+	if found.Error != nil {
+		t.Error("unexpected error found in response:", found.Error)
+	}
+
+	records1, err := readRecords(found.Records)
+	if err != nil {
+		t.Error("error reading records:", err)
+	}
+
+	records2, err := readRecords(expected.Records)
+	if err != nil {
+		t.Error("error reading records:", err)
+	}
+
+	assertRecords(t, records1, records2)
+}
+
+type memoryRecord struct {
+	offset  int64
+	key     []byte
+	value   []byte
+	headers []Header
+}
+
+func assertRecords(t *testing.T, found, expected []memoryRecord) {
+	t.Helper()
+	i := 0
+
+	for i < len(found) && i < len(expected) {
+		r1 := found[i]
+		r2 := expected[i]
+
+		if !reflect.DeepEqual(r1, r2) {
+			t.Errorf("records at index %d don't match", i)
+			t.Logf("expected:\n%#v", r2)
+			t.Logf("found:\n%#v", r1)
+		}
+
+		i++
+	}
+
+	for i < len(found) {
+		t.Errorf("unexpected record at index %d:\n%+v", i, found[i])
+		i++
+	}
+
+	for i < len(expected) {
+		t.Errorf("missing record at index %d:\n%+v", i, expected[i])
+		i++
+	}
+}
+
+func readRecords(records RecordReader) ([]memoryRecord, error) {
+	list := []memoryRecord{}
+
+	for {
+		rec, err := records.ReadRecord()
+
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				return list, nil
+			}
+			return nil, err
+		}
+
+		var (
+			offset      = rec.Offset
+			key         = rec.Key
+			value       = rec.Value
+			headers     = rec.Headers
+			bytesKey    []byte
+			bytesValues []byte
+		)
+
+		if key != nil {
+			bytesKey, _ = ioutil.ReadAll(key)
+		}
+
+		if value != nil {
+			bytesValues, _ = ioutil.ReadAll(value)
+		}
+
+		list = append(list, memoryRecord{
+			offset:  offset,
+			key:     bytesKey,
+			value:   bytesValues,
+			headers: headers,
+		})
+	}
+}
+
+func TestClientPipeline(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	const numBatches = 100
+	const recordsPerBatch = 30
+
+	unixEpoch := time.Unix(0, 0)
+	records := make([]Record, recordsPerBatch)
+	content := []byte("1234567890")
+
+	for i := 0; i < numBatches; i++ {
+		for j := range records {
+			records[j] = Record{Value: NewBytes(content)}
+		}
+
+		_, err := client.Produce(context.Background(), &ProduceRequest{
+			Topic:        topic,
+			RequiredAcks: -1,
+			Records:      NewRecordReader(records...),
+			Compression:  Snappy,
+		})
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	offset := int64(0)
+
+	for i := 0; i < (numBatches * recordsPerBatch); {
+		req := &FetchRequest{
+			Topic:    topic,
+			Offset:   offset,
+			MinBytes: 1,
+			MaxBytes: 8192,
+			MaxWait:  500 * time.Millisecond,
+		}
+
+		res, err := client.Fetch(context.Background(), req)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if res.Error != nil {
+			t.Fatal(res.Error)
+		}
+
+		for {
+			r, err := res.Records.ReadRecord()
+			if err != nil {
+				if errors.Is(err, io.EOF) {
+					break
+				}
+				t.Fatal(err)
+			}
+
+			if r.Key != nil {
+				r.Key.Close()
+			}
+
+			if r.Value != nil {
+				r.Value.Close()
+			}
+
+			if r.Offset != offset {
+				t.Errorf("record at index %d has mismatching offset, want %d but got %d", i, offset, r.Offset)
+			}
+
+			if r.Time.IsZero() || r.Time.Equal(unixEpoch) {
+				t.Errorf("record at index %d with offset %d has not timestamp", i, r.Offset)
+			}
+
+			offset = r.Offset + 1
+			i++
+		}
+	}
+}
diff -pruN 0.2.1-1.1/findcoordinator.go 0.4.49+ds1-1/findcoordinator.go
--- 0.2.1-1.1/findcoordinator.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/findcoordinator.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,8 +2,92 @@ package kafka
 
 import (
 	"bufio"
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/findcoordinator"
+)
+
+// CoordinatorKeyType is used to specify the type of coordinator to look for.
+type CoordinatorKeyType int8
+
+const (
+	// CoordinatorKeyTypeConsumer type is used when looking for a Group coordinator.
+	CoordinatorKeyTypeConsumer CoordinatorKeyType = 0
+
+	// CoordinatorKeyTypeTransaction type is used when looking for a Transaction coordinator.
+	CoordinatorKeyTypeTransaction CoordinatorKeyType = 1
 )
 
+// FindCoordinatorRequest is the request structure for the FindCoordinator function.
+type FindCoordinatorRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The coordinator key.
+	Key string
+
+	// The coordinator key type. (Group, transaction, etc.)
+	KeyType CoordinatorKeyType
+}
+
+// FindCoordinatorResponseCoordinator contains details about the found coordinator.
+type FindCoordinatorResponseCoordinator struct {
+	// NodeID holds the broker id.
+	NodeID int
+
+	// Host of the broker
+	Host string
+
+	// Port on which broker accepts requests
+	Port int
+}
+
+// FindCoordinatorResponse is the response structure for the FindCoordinator function.
+type FindCoordinatorResponse struct {
+	// The Transaction/Group Coordinator details
+	Coordinator *FindCoordinatorResponseCoordinator
+
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// An error that may have occurred while attempting to retrieve Coordinator
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker.
+	Error error
+}
+
+// FindCoordinator sends a findCoordinator request to a kafka broker and returns the
+// response.
+func (c *Client) FindCoordinator(ctx context.Context, req *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
+
+	m, err := c.roundTrip(ctx, req.Addr, &findcoordinator.Request{
+		Key:     req.Key,
+		KeyType: int8(req.KeyType),
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).FindCoordinator: %w", err)
+	}
+
+	res := m.(*findcoordinator.Response)
+	coordinator := &FindCoordinatorResponseCoordinator{
+		NodeID: int(res.NodeID),
+		Host:   res.Host,
+		Port:   int(res.Port),
+	}
+	ret := &FindCoordinatorResponse{
+		Throttle:    makeDuration(res.ThrottleTimeMs),
+		Error:       makeError(res.ErrorCode, res.ErrorMessage),
+		Coordinator: coordinator,
+	}
+
+	return ret, nil
+}
+
 // FindCoordinatorRequestV0 requests the coordinator for the specified group or transaction
 //
 // See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator
@@ -17,8 +101,8 @@ func (t findCoordinatorRequestV0) size()
 	return sizeofString(t.CoordinatorKey)
 }
 
-func (t findCoordinatorRequestV0) writeTo(w *bufio.Writer) {
-	writeString(w, t.CoordinatorKey)
+func (t findCoordinatorRequestV0) writeTo(wb *writeBuffer) {
+	wb.writeString(t.CoordinatorKey)
 }
 
 type findCoordinatorResponseCoordinatorV0 struct {
@@ -38,10 +122,10 @@ func (t findCoordinatorResponseCoordinat
 		sizeofInt32(t.Port)
 }
 
-func (t findCoordinatorResponseCoordinatorV0) writeTo(w *bufio.Writer) {
-	writeInt32(w, t.NodeID)
-	writeString(w, t.Host)
-	writeInt32(w, t.Port)
+func (t findCoordinatorResponseCoordinatorV0) writeTo(wb *writeBuffer) {
+	wb.writeInt32(t.NodeID)
+	wb.writeString(t.Host)
+	wb.writeInt32(t.Port)
 }
 
 func (t *findCoordinatorResponseCoordinatorV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
@@ -70,9 +154,9 @@ func (t findCoordinatorResponseV0) size(
 		t.Coordinator.size()
 }
 
-func (t findCoordinatorResponseV0) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.ErrorCode)
-	t.Coordinator.writeTo(w)
+func (t findCoordinatorResponseV0) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.ErrorCode)
+	t.Coordinator.writeTo(wb)
 }
 
 func (t *findCoordinatorResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
diff -pruN 0.2.1-1.1/findcoordinator_test.go 0.4.49+ds1-1/findcoordinator_test.go
--- 0.2.1-1.1/findcoordinator_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/findcoordinator_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,8 +3,12 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"errors"
 	"reflect"
+	"strings"
 	"testing"
+	"time"
 )
 
 func TestFindCoordinatorResponseV0(t *testing.T) {
@@ -17,13 +21,12 @@ func TestFindCoordinatorResponseV0(t *te
 		},
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found findCoordinatorResponseV0
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
@@ -37,3 +40,47 @@ func TestFindCoordinatorResponseV0(t *te
 		t.FailNow()
 	}
 }
+
+func TestClientFindCoordinator(t *testing.T) {
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+	defer cancel()
+	resp, err := waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     "TransactionalID-1",
+		KeyType: CoordinatorKeyTypeTransaction,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if resp.Coordinator.Host != "localhost" {
+		t.Fatal("Coordinator should be found @ localhost")
+	}
+}
+
+// WaitForCoordinatorIndefinitely is a blocking call till a coordinator is found.
+func waitForCoordinatorIndefinitely(ctx context.Context, c *Client, req *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
+	resp, err := c.FindCoordinator(ctx, req)
+
+	for shouldRetryfindingCoordinator(resp, err) && ctx.Err() == nil {
+		time.Sleep(1 * time.Second)
+		resp, err = c.FindCoordinator(ctx, req)
+	}
+	return resp, err
+}
+
+// Should retry looking for coordinator
+// Returns true when the test Kafka broker is still setting up.
+func shouldRetryfindingCoordinator(resp *FindCoordinatorResponse, err error) bool {
+	brokerSetupIncomplete := err != nil &&
+		strings.Contains(
+			strings.ToLower(err.Error()),
+			strings.ToLower("unexpected EOF"))
+	coordinatorNotFound := resp != nil &&
+		resp.Error != nil &&
+		errors.Is(resp.Error, GroupCoordinatorNotAvailable)
+	return brokerSetupIncomplete || coordinatorNotFound
+}
diff -pruN 0.2.1-1.1/fixtures/v1-v1.hex 0.4.49+ds1-1/fixtures/v1-v1.hex
--- 0.2.1-1.1/fixtures/v1-v1.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v1-v1.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000001660000000a00000000000015c79861000000010009746573742d6564677900000001000000000000000000000000000400000000000000040000000000000000ffffffff0000011f00000000000000000000003ca293717501000000017c4f08dc7f00000005616c706861000000217b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00000000000000010000003b3d4abab001000000017c4f08dc970000000462657461000000217b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d00000000000000020000003cbcad5cde01000000017c4f09b16d0000000567616d6d61000000217b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d00000000000000030000003c8585230b01000000017c4f09b6b20000000564656c7461000000217b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v1-v1.pcapng and 0.4.49+ds1-1/fixtures/v1-v1.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.hex 0.4.49+ds1-1/fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.hex
--- 0.2.1-1.1/fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000006b40000000a00000000000021f08796000000010007746573742d383800000001000000000000000000000000001400000000000000140000000000000000ffffffff0000066f00000000000000000000003c42f0d0f101000000017c477ab6a500000005616c706861000000217b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00000000000000010000003bf4f7a99e01000000017c477abb610000000462657461000000217b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d00000000000000020000005fd3cf85ff01010000017c477b3bcbffffffff000000491f8b0800000000000000636080039bba2d51db18810cc61af76aebd340066b624e41462290a158ad949c5f9a57a26465a0a394969993935aa464a59408074ab5001b5f3ee14800000000000000000000030000005e5d1733a801010000017c477b408fffffffff000000481f8b080000000000000063608003eb95673d5f3002198c35eed50efd40064b526a49229056ac564ace2fcd2b51b232d0514acbccc9492d52b2524a8203a55a002737831e4700000000000000000000040000005e00000000020ab23c660000000000000000017c477d995f0000017c477d995fffffffffffffffffffffffffffff00000001580000000a67616d6d61427b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d0000000000000000050000005e000000000238c0553f0000000000000000017c477d9ec80000017c477d9ec8ffffffffffffffffffffffffffff00000001580000000a64656c7461427b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d0000000000000000060000006a0000000002188627120001000000000000017c477dd0b70000017c477dd0b7ffffffffffffffffffffffffffff000000011f8b08000000000000008b606060e04a4fcccd4d74aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8603a55a0600788108a12d00000000000000000000070000006a0000000002b08e2b720001000000000000017c477dd7ef0000017c477dd7efffffffffffffffffffffffffffff000000011f8b08000000000000008b606060e04a49cd294974aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8103a55a0600496dfe822d00000000000000000000080000008d00000000023cc016270000000000010000017c4784fe490000017c47850044ffffffffffffffffffffffffffff000000025c0000000e657073696c6f6e427b22636f756e74223a302c2266696c6c6572223a2265656565656565656565227d005800f60702087a657461427b22636f756e74223a302c2266696c6c6572223a2266666666666666666666227d00000000000000000a0000007d00000000026e844d550001000000010000017c4785514b0000017c47855423ffffffffffffffffffffffffffff000000021f8b08000000000000008b616060e04b2d28ceccc9cf73aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8503a55a8608860ddc4c1c55a92589d815a7c1015031003ebb53a15c000000000000000000000c0000008a0000000002e5dfd9e20000000000010000017c4c8f1e1c0000017c4c8f20e8ffffffffffffffffffffffffffff000000025400000006657461427b22636f756e74223a302c2266696c6c6572223a2267676767676767676767227d005a00980b020a7468657461427b22636f756e74223a302c2266696c6c6572223a2268686868686868686868227d00000000000000000e0000007700000000020f80521f0001000000010000017c4c8f4da50000017c4c8f4fb8ffffffffffffffffffffffffffff000000021f8b08000000000000000b616060604b2d4974aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8703a55a862886651c4c5c2519385567c001503500b01e3aa95900000000000000000000100000003a3b6d4cf601000000017c4cadbd7300000003657461000000217b22636f756e74223a302c2266696c6c6572223a2267676767676767676767227d00000000000000110000003c857f5cd501000000017c4cadbd99000000057468657461000000217b22636f756e74223a302c2266696c6c6572223a2268686868686868686868227d000000000000001300000076dbf0a20a01010000017c4cadf305ffffffff000000601f8b080000000000000063608003ab0d4959758c4006638dcfda8fcf810ce6d4924420a558ad949c5f9a57a26465a0a394969993935aa464a5940e074ab55013409a6d9412cf95c14cf9cc0a64b09664e03327030e946a01e34da7538e000000
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.pcapng and 0.4.49+ds1-1/fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v1c-v1-v1c.hex 0.4.49+ds1-1/fixtures/v1c-v1-v1c.hex
--- 0.2.1-1.1/fixtures/v1c-v1-v1c.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v1c-v1-v1c.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000002350000000a0000000000003d15acfe00000001000b746573742d627265657a7900000001000000000000000000000000000600000000000000060000000000000000ffffffff000001ec000000000000000100000079779afa8b01010000017c4f11cdc9ffffffff000000631f8b0800000000000000636080039bf9617b7418810cc61a7fc1b32b810cd6c49c828c442043b15a2939bf34af44c9ca4047292d332727b548c94a29110e946aa16680b45b5b967f780937e72490c192945a82db98243850aa05001ea2107b8f00000000000000000000020000003cda0e410e01000000017c4f1212630000000567616d6d61000000217b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d00000000000000030000003c0470399301000000017c4f12154e0000000564656c7461000000217b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d0000000000000004000000613b0e4db601010000017c4f124947ffffffff0000004b1f8b080000000000000063608003bb67b39e743302198c35fe429eee40067b6a4171664e7e1e90a958ad949c5f9a57a26465a0a394969993935aa464a5940a074ab5007d95b7894a00000000000000000000050000005edb50180901010000017c4f124fd0ffffffff000000481f8b080000000000000063608003ebfbf2b32c18810cc61a7f21ff0b40064b556a49229056ac564ace2fcd2b51b232d0514acbccc9492d52b2524a8303a55a005ec594df47000000
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v1c-v1-v1c.pcapng and 0.4.49+ds1-1/fixtures/v1c-v1-v1c.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v1c-v1c.hex 0.4.49+ds1-1/fixtures/v1c-v1c.hex
--- 0.2.1-1.1/fixtures/v1c-v1c.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v1c-v1c.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000001a20000000a0000000000001abffa5700000001000a746573742d677574737900000001000000000000000000000000000400000000000000040000000000000000ffffffff0000015a0000000000000001000000789125e5e201010000017c4f0ee474ffffffff000000621f8b0800000000000000636080039bfcfd51598c4006638d3fdf131f20833531a7202311c850ac564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55aa81920edd67a221c2e70734a800c96a4d412dcc624c181522d001d8564f48f00000000000000000000020000005f66e75d9b01010000017c4f0f55f5ffffffff000000491f8b0800000000000000636080039bfd2566fe8c4006638d3f7fe8572083353d31373711c850ac564ace2fcd2b51b232d0514acbccc9492d52b2524a8603a55a008ef7186d4800000000000000000000030000005f3cff26a901010000017c4f0f5d5cffffffff000000491f8b0800000000000000636080031b6f8db3d18c4006638d3f7f6c0c90c19a929a5392086428562b25e797e695285919e828a565e6e4a416295929a5c081522d00dd1f6ff148000000
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v1c-v1c.pcapng and 0.4.49+ds1-1/fixtures/v1c-v1c.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v2-v2.hex 0.4.49+ds1-1/fixtures/v2-v2.hex
--- 0.2.1-1.1/fixtures/v2-v2.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v2-v2.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000001760000000a0000000000001163921100000001000a746573742d6c7563696400000001000000000000000000000000000400000000000000040000000000000000ffffffff0000012e00000000000000000000008a00000000023978fc3b0000000000010000017c4f173eb90000017c4f173ed2ffffffffffffffffffffffffffff00000002580000000a616c706861427b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00560032020862657461427b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d0000000000000000020000008c0000000002fa7514ab0000000000010000017c4f175fa00000017c4f17631fffffffffffffffffffffffffffff00000002580000000a67616d6d61427b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d005a00fe0d020a64656c7461427b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d00
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v2-v2.pcapng and 0.4.49+ds1-1/fixtures/v2-v2.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v2b-v1.hex 0.4.49+ds1-1/fixtures/v2b-v1.hex
--- 0.2.1-1.1/fixtures/v2b-v1.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v2b-v1.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+0000016e0000000a00000000000023f24a1a00000001000b746573742d66656973747900000001000000000000000000000000000400000000000000040000000000000000ffffffff0000012500000000000000000000008a000000000267762fd10000000000010000017c4e71efe10000017c4e71effdffffffffffffffffffffffffffff00000002580000000a616c706861427b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00560038020862657461427b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d0000000000000000020000003c0d5ba69301000000017c4e743d2100000005616c706861000000217b22636f756e74223a302c2266696c6c6572223a2261616161616161616161227d00000000000000030000003be6e3d42501000000017c4e743d410000000462657461000000217b22636f756e74223a302c2266696c6c6572223a2262626262626262626262227d
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v2b-v1.pcapng and 0.4.49+ds1-1/fixtures/v2b-v1.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v2bc-v1-v1c.hex 0.4.49+ds1-1/fixtures/v2bc-v1-v1c.hex
--- 0.2.1-1.1/fixtures/v2bc-v1-v1c.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v2bc-v1-v1c.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000001e60000000a000000000000530076a100000001000a746573742d686172647900000001000000000000000000000000000600000000000000060000000000000000ffffffff0000019e000000000000000000000079000000000214d2dc1d0001000000010000017c4ead43a90000017c4ead43c3ffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a86300613268ea4d4121c6a93e000a81600538562275900000000000000000000020000003c48deb52601000000017c4eae54050000000567616d6d61000000217b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d00000000000000030000003ca2ba5edc01000000017c4eae5cff0000000564656c7461000000217b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d00000000000000050000007d2e0ea95201010000017c4eb07250ffffffff000000671f8b080000000000000063608003bb5b69b1958c4006638ddf86fcef40067b6a4171664e7e1e90a958ad949c5f9a57a26465a0a394969993935aa464a5940a074ab550534006587fdc55d00633a92800c860a94a2d49c4694c1a1c28d5020087e0fa5d91000000
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v2bc-v1-v1c.pcapng and 0.4.49+ds1-1/fixtures/v2bc-v1-v1c.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v2bc-v1.hex 0.4.49+ds1-1/fixtures/v2bc-v1.hex
--- 0.2.1-1.1/fixtures/v2bc-v1.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v2bc-v1.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+0000015d0000000a0000000000006d36526200000001000a746573742d686172647900000001000000000000000000000000000400000000000000040000000000000000ffffffff00000115000000000000000000000079000000000214d2dc1d0001000000010000017c4ead43a90000017c4ead43c3ffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a86300613268ea4d4121c6a93e000a81600538562275900000000000000000000020000003c48deb52601000000017c4eae54050000000567616d6d61000000217b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d00000000000000030000003ca2ba5edc01000000017c4eae5cff0000000564656c7461000000217b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v2bc-v1.pcapng and 0.4.49+ds1-1/fixtures/v2bc-v1.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v2bc-v1c.hex 0.4.49+ds1-1/fixtures/v2bc-v1c.hex
--- 0.2.1-1.1/fixtures/v2bc-v1c.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v2bc-v1c.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000001520000000a0000000000004aa4215500000001000b746573742d6b61726d696300000001000000000000000000000000000400000000000000040000000000000000ffffffff00000109000000000000000000000079000000000218f2e1220001000000010000017c4e8edde60000017c4e8eddffffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a86300623268ea4d4121c6a93e000a81600b5931557590000000000000000000003000000785a33562401010000017c4e8f57f1ffffffff000000621f8b0800000000000000636080031b93ef814f19810cc61abffef0ab40066b624e41462290a158ad949c5f9a57a26465a0a394969993935aa464a59408074ab5503340daad5d459b6ec0cdf90864b024a596e03626090e946a016143eac78f000000
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v2bc-v1c.pcapng and 0.4.49+ds1-1/fixtures/v2bc-v1c.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v2c-v2-v2c.hex 0.4.49+ds1-1/fixtures/v2c-v2-v2c.hex
--- 0.2.1-1.1/fixtures/v2c-v2-v2c.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v2c-v2-v2c.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000001ee0000000a000000000000670352ac00000001000a746573742d6e6174747900000001000000000000000000000000000600000000000000060000000000000000ffffffff000001a600000000000000000000007900000000025da9bf740001000000010000017c4f1eea730000017c4f1eea8dffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a86300613268ea4d4121c6a93e000a81600538562275900000000000000000000020000008c0000000002f53e2b600000000000010000017c4f1f1a600000017c4f1f1c65ffffffffffffffffffffffffffff00000002580000000a67616d6d61427b22636f756e74223a302c2266696c6c6572223a2263636363636363636363227d005a008a08020a64656c7461427b22636f756e74223a302c2266696c6c6572223a2264646464646464646464227d0000000000000000040000007d000000000268a8ca640001000000010000017c4f1f49f90000017c4f1f4db4ffffffffffffffffffffffffffff000000021f8b08000000000000008b616060e04b2d28ceccc9cf73aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8503a55a8608866f7c4c1c55a92589d815a7c101503100ebf4f0655c000000
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v2c-v2-v2c.pcapng and 0.4.49+ds1-1/fixtures/v2c-v2-v2c.pcapng differ
diff -pruN 0.2.1-1.1/fixtures/v2c-v2c.hex 0.4.49+ds1-1/fixtures/v2c-v2c.hex
--- 0.2.1-1.1/fixtures/v2c-v2c.hex	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/fixtures/v2c-v2c.hex	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1 @@
+000001560000000a0000000000005698dc5100000001000c746573742d6f6e656972696300000001000000000000000000000000000400000000000000040000000000000000ffffffff0000010c00000000000000000000007900000000021ad503db0001000000010000017c4f1a1f540000017c4f1a1f70ffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04acc29c84874aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8403a55a8630060b268ea4d4121c6a93e000a816009fa88cc75900000000000000000000020000007b0000000002d346070a0001000000010000017c4f1a46110000017c4f1a48d0ffffffffffffffffffffffffffff000000021f8b08000000000000008b606060e04a4fcccd4d74aa564ace2fcd2b51b232d0514acbccc9492d52b2524a8603a55a8628867f5c4c5c29a939253854a7c0015035000f1406dd5b000000
\ No newline at end of file
Binary files 0.2.1-1.1/fixtures/v2c-v2c.pcapng and 0.4.49+ds1-1/fixtures/v2c-v2c.pcapng differ
diff -pruN 0.2.1-1.1/go.mod 0.4.49+ds1-1/go.mod
--- 0.2.1-1.1/go.mod	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/go.mod	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,22 @@
+module github.com/segmentio/kafka-go
+
+go 1.23
+
+require (
+	github.com/klauspost/compress v1.15.9
+	github.com/pierrec/lz4/v4 v4.1.15
+	github.com/stretchr/testify v1.8.0
+	github.com/xdg-go/scram v1.1.2
+	golang.org/x/net v0.38.0
+)
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/xdg-go/pbkdf2 v1.0.0 // indirect
+	github.com/xdg-go/stringprep v1.0.4 // indirect
+	golang.org/x/text v0.23.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+retract [v0.4.36, v0.4.37]
diff -pruN 0.2.1-1.1/go.sum 0.4.49+ds1-1/go.sum
--- 0.2.1-1.1/go.sum	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/go.sum	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,53 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
+github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
+github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff -pruN 0.2.1-1.1/groupbalancer.go 0.4.49+ds1-1/groupbalancer.go
--- 0.2.1-1.1/groupbalancer.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/groupbalancer.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,8 @@
 package kafka
 
-import "sort"
+import (
+	"sort"
+)
 
 // GroupMember describes a single participant in a consumer group.
 type GroupMember struct {
@@ -15,10 +17,10 @@ type GroupMember struct {
 	UserData []byte
 }
 
-// GroupMemberAssignments holds MemberID => topic => partitions
+// GroupMemberAssignments holds MemberID => topic => partitions.
 type GroupMemberAssignments map[string]map[string][]int
 
-// GroupBalancer encapsulates the client side rebalancing logic
+// GroupBalancer encapsulates the client side rebalancing logic.
 type GroupBalancer interface {
 	// ProtocolName of the GroupBalancer
 	ProtocolName() string
@@ -133,8 +135,158 @@ func (r RoundRobinGroupBalancer) AssignG
 	return groupAssignments
 }
 
+// RackAffinityGroupBalancer makes a best effort to pair up consumers with
+// partitions whose leader is in the same rack.  This strategy can have
+// performance benefits by minimizing round trip latency between the consumer
+// and the broker.  In environments where network traffic across racks incurs
+// charges (such as cross AZ data transfer in AWS), this strategy is also a cost
+// optimization measure because it keeps network traffic within the local rack
+// where possible.
+//
+// The primary objective is to spread partitions evenly across consumers with a
+// secondary focus on maximizing the number of partitions where the leader and
+// the consumer are in the same rack.  For best affinity, it's recommended to
+// have a balanced spread of consumers and partition leaders across racks.
+//
+// This balancer requires Kafka version 0.10.0.0+ or later.  Earlier versions do
+// not return the brokers' racks in the metadata request.
+type RackAffinityGroupBalancer struct {
+	// Rack is the name of the rack where this consumer is running.  It will be
+	// communicated to the consumer group leader via the UserData so that
+	// assignments can be made with affinity to the partition leader.
+	Rack string
+}
+
+func (r RackAffinityGroupBalancer) ProtocolName() string {
+	return "rack-affinity"
+}
+
+func (r RackAffinityGroupBalancer) AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments {
+	membersByTopic := make(map[string][]GroupMember)
+	for _, m := range members {
+		for _, t := range m.Topics {
+			membersByTopic[t] = append(membersByTopic[t], m)
+		}
+	}
+
+	partitionsByTopic := make(map[string][]Partition)
+	for _, p := range partitions {
+		partitionsByTopic[p.Topic] = append(partitionsByTopic[p.Topic], p)
+	}
+
+	assignments := GroupMemberAssignments{}
+	for topic := range membersByTopic {
+		topicAssignments := r.assignTopic(membersByTopic[topic], partitionsByTopic[topic])
+		for member, parts := range topicAssignments {
+			memberAssignments, ok := assignments[member]
+			if !ok {
+				memberAssignments = make(map[string][]int)
+				assignments[member] = memberAssignments
+			}
+			memberAssignments[topic] = parts
+		}
+	}
+	return assignments
+}
+
+func (r RackAffinityGroupBalancer) UserData() ([]byte, error) {
+	return []byte(r.Rack), nil
+}
+
+func (r *RackAffinityGroupBalancer) assignTopic(members []GroupMember, partitions []Partition) map[string][]int {
+	zonedPartitions := make(map[string][]int)
+	for _, part := range partitions {
+		zone := part.Leader.Rack
+		zonedPartitions[zone] = append(zonedPartitions[zone], part.ID)
+	}
+
+	zonedConsumers := make(map[string][]string)
+	for _, member := range members {
+		zone := string(member.UserData)
+		zonedConsumers[zone] = append(zonedConsumers[zone], member.ID)
+	}
+
+	targetPerMember := len(partitions) / len(members)
+	remainder := len(partitions) % len(members)
+	assignments := make(map[string][]int)
+
+	// assign as many as possible in zone.  this will assign up to partsPerMember
+	// to each consumer.  it will also prefer to allocate remainder partitions
+	// in zone if possible.
+	for zone, parts := range zonedPartitions {
+		consumers := zonedConsumers[zone]
+		if len(consumers) == 0 {
+			continue
+		}
+
+		// don't over-allocate.  cap partition assignments at the calculated
+		// target.
+		partsPerMember := len(parts) / len(consumers)
+		if partsPerMember > targetPerMember {
+			partsPerMember = targetPerMember
+		}
+
+		for _, consumer := range consumers {
+			assignments[consumer] = append(assignments[consumer], parts[:partsPerMember]...)
+			parts = parts[partsPerMember:]
+		}
+
+		// if we had enough partitions for each consumer in this zone to hit its
+		// target, attempt to use any leftover partitions to satisfy the total
+		// remainder by adding at most 1 partition per consumer.
+		leftover := len(parts)
+		if partsPerMember == targetPerMember {
+			if leftover > remainder {
+				leftover = remainder
+			}
+			if leftover > len(consumers) {
+				leftover = len(consumers)
+			}
+			remainder -= leftover
+		}
+
+		// this loop covers the case where we're assigning extra partitions or
+		// if there weren't enough to satisfy the targetPerMember and the zoned
+		// partitions didn't divide evenly.
+		for i := 0; i < leftover; i++ {
+			assignments[consumers[i]] = append(assignments[consumers[i]], parts[i])
+		}
+		parts = parts[leftover:]
+
+		if len(parts) == 0 {
+			delete(zonedPartitions, zone)
+		} else {
+			zonedPartitions[zone] = parts
+		}
+	}
+
+	// assign out remainders regardless of zone.
+	var remaining []int
+	for _, partitions := range zonedPartitions {
+		remaining = append(remaining, partitions...)
+	}
+
+	for _, member := range members {
+		assigned := assignments[member.ID]
+		delta := targetPerMember - len(assigned)
+		// if it were possible to assign the remainder in zone, it's been taken
+		// care of already.  now we will portion out any remainder to a member
+		// that can take it.
+		if delta >= 0 && remainder > 0 {
+			delta++
+			remainder--
+		}
+		if delta > 0 {
+			assignments[member.ID] = append(assigned, remaining[:delta]...)
+			remaining = remaining[delta:]
+		}
+	}
+
+	return assignments
+}
+
 // findPartitions extracts the partition ids associated with the topic from the
-// list of Partitions provided
+// list of Partitions provided.
 func findPartitions(topic string, partitions []Partition) []int {
 	var ids []int
 	for _, partition := range partitions {
@@ -145,7 +297,7 @@ func findPartitions(topic string, partit
 	return ids
 }
 
-// findMembersByTopic groups the memberGroupMetadata by topic
+// findMembersByTopic groups the memberGroupMetadata by topic.
 func findMembersByTopic(members []GroupMember) map[string][]GroupMember {
 	membersByTopic := map[string][]GroupMember{}
 	for _, member := range members {
@@ -176,7 +328,7 @@ func findMembersByTopic(members []GroupM
 }
 
 // findGroupBalancer returns the GroupBalancer with the specified protocolName
-// from the slice provided
+// from the slice provided.
 func findGroupBalancer(protocolName string, balancers []GroupBalancer) (GroupBalancer, bool) {
 	for _, balancer := range balancers {
 		if balancer.ProtocolName() == protocolName {
diff -pruN 0.2.1-1.1/groupbalancer_test.go 0.4.49+ds1-1/groupbalancer_test.go
--- 0.2.1-1.1/groupbalancer_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/groupbalancer_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -375,3 +375,316 @@ func TestFindMembersByTopicSortsByMember
 		})
 	}
 }
+
+func TestRackAffinityGroupBalancer(t *testing.T) {
+	t.Run("User Data", func(t *testing.T) {
+		t.Run("unknown zone", func(t *testing.T) {
+			b := RackAffinityGroupBalancer{}
+			zone, err := b.UserData()
+			if err != nil {
+				t.Fatal(err)
+			}
+			if string(zone) != "" {
+				t.Fatalf("expected empty zone but got %s", zone)
+			}
+		})
+
+		t.Run("configure zone", func(t *testing.T) {
+			b := RackAffinityGroupBalancer{Rack: "zone1"}
+			zone, err := b.UserData()
+			if err != nil {
+				t.Fatal(err)
+			}
+			if string(zone) != "zone1" {
+				t.Fatalf("expected zone1 az but got %s", zone)
+			}
+		})
+	})
+
+	t.Run("Balance", func(t *testing.T) {
+		b := RackAffinityGroupBalancer{}
+
+		brokers := map[string]Broker{
+			"z1": {ID: 1, Rack: "z1"},
+			"z2": {ID: 2, Rack: "z2"},
+			"z3": {ID: 2, Rack: "z3"},
+			"":   {},
+		}
+
+		tests := []struct {
+			name            string
+			memberCounts    map[string]int
+			partitionCounts map[string]int
+			result          map[string]map[string]int
+		}{
+			{
+				name: "unknown and known zones",
+				memberCounts: map[string]int{
+					"":   1,
+					"z1": 1,
+					"z2": 1,
+				},
+				partitionCounts: map[string]int{
+					"z1": 5,
+					"z2": 4,
+					"":   9,
+				},
+				result: map[string]map[string]int{
+					"z1": {"": 1, "z1": 5},
+					"z2": {"": 2, "z2": 4},
+					"":   {"": 6},
+				},
+			},
+			{
+				name: "all unknown",
+				memberCounts: map[string]int{
+					"": 5,
+				},
+				partitionCounts: map[string]int{
+					"": 103,
+				},
+				result: map[string]map[string]int{
+					"": {"": 103},
+				},
+			},
+			{
+				name: "remainder stays local",
+				memberCounts: map[string]int{
+					"z1": 3,
+					"z2": 3,
+					"z3": 3,
+				},
+				partitionCounts: map[string]int{
+					"z1": 20,
+					"z2": 19,
+					"z3": 20,
+				},
+				result: map[string]map[string]int{
+					"z1": {"z1": 20},
+					"z2": {"z2": 19},
+					"z3": {"z3": 20},
+				},
+			},
+			{
+				name: "imbalanced partitions",
+				memberCounts: map[string]int{
+					"z1": 1,
+					"z2": 1,
+					"z3": 1,
+				},
+				partitionCounts: map[string]int{
+					"z1": 7,
+					"z2": 0,
+					"z3": 7,
+				},
+				result: map[string]map[string]int{
+					"z1": {"z1": 5},
+					"z2": {"z1": 2, "z3": 2},
+					"z3": {"z3": 5},
+				},
+			},
+			{
+				name: "imbalanced members",
+				memberCounts: map[string]int{
+					"z1": 5,
+					"z2": 3,
+					"z3": 1,
+				},
+				partitionCounts: map[string]int{
+					"z1": 9,
+					"z2": 9,
+					"z3": 9,
+				},
+				result: map[string]map[string]int{
+					"z1": {"z1": 9, "z3": 6},
+					"z2": {"z2": 9},
+					"z3": {"z3": 3},
+				},
+			},
+			{
+				name: "no consumers in zone",
+				memberCounts: map[string]int{
+					"z2": 10,
+				},
+				partitionCounts: map[string]int{
+					"z1": 20,
+					"z3": 19,
+				},
+				result: map[string]map[string]int{
+					"z2": {"z1": 20, "z3": 19},
+				},
+			},
+		}
+
+		for _, tt := range tests {
+			t.Run(tt.name, func(t *testing.T) {
+
+				// create members per the distribution in the test case.
+				var members []GroupMember
+				for zone, count := range tt.memberCounts {
+					for i := 0; i < count; i++ {
+						members = append(members, GroupMember{
+							ID:       zone + ":" + strconv.Itoa(len(members)+1),
+							Topics:   []string{"test"},
+							UserData: []byte(zone),
+						})
+					}
+				}
+
+				// create partitions per the distribution in the test case.
+				var partitions []Partition
+				for zone, count := range tt.partitionCounts {
+					for i := 0; i < count; i++ {
+						partitions = append(partitions, Partition{
+							ID:     len(partitions),
+							Topic:  "test",
+							Leader: brokers[zone],
+						})
+					}
+				}
+
+				res := b.AssignGroups(members, partitions)
+
+				// verification #1...all members must be assigned and with the
+				// correct load.
+				minLoad := len(partitions) / len(members)
+				maxLoad := minLoad
+				if len(partitions)%len(members) != 0 {
+					maxLoad++
+				}
+				for _, member := range members {
+					assignments := res[member.ID]["test"]
+					if len(assignments) < minLoad || len(assignments) > maxLoad {
+						t.Errorf("expected between %d and %d partitions for member %s", minLoad, maxLoad, member.ID)
+					}
+				}
+
+				// verification #2...all partitions are assigned, and the distribution
+				// per source zone matches.
+				partsPerZone := make(map[string]map[string]int)
+				uniqueParts := make(map[int]struct{})
+				for id, topicToPartitions := range res {
+
+					for topic, assignments := range topicToPartitions {
+						if topic != "test" {
+							t.Fatalf("wrong topic...expected test but got %s", topic)
+						}
+
+						var member GroupMember
+						for _, m := range members {
+							if id == m.ID {
+								member = m
+								break
+							}
+						}
+						if member.ID == "" {
+							t.Fatal("empty member ID returned")
+						}
+
+						var partition Partition
+						for _, id := range assignments {
+
+							uniqueParts[id] = struct{}{}
+
+							for _, p := range partitions {
+								if p.ID == int(id) {
+									partition = p
+									break
+								}
+							}
+							if partition.Topic == "" {
+								t.Fatal("empty topic ID returned")
+							}
+							counts, ok := partsPerZone[string(member.UserData)]
+							if !ok {
+								counts = make(map[string]int)
+								partsPerZone[string(member.UserData)] = counts
+							}
+							counts[partition.Leader.Rack]++
+						}
+					}
+				}
+
+				if len(partitions) != len(uniqueParts) {
+					t.Error("not all partitions were assigned")
+				}
+				if !reflect.DeepEqual(tt.result, partsPerZone) {
+					t.Errorf("wrong balanced zones.  expected %v but got %v", tt.result, partsPerZone)
+				}
+			})
+		}
+	})
+
+	t.Run("Multi Topic", func(t *testing.T) {
+		b := RackAffinityGroupBalancer{}
+
+		brokers := map[string]Broker{
+			"z1": {ID: 1, Rack: "z1"},
+			"z2": {ID: 2, Rack: "z2"},
+			"z3": {ID: 2, Rack: "z3"},
+			"":   {},
+		}
+
+		members := []GroupMember{
+			{
+				ID:       "z1",
+				Topics:   []string{"topic1", "topic2"},
+				UserData: []byte("z1"),
+			},
+			{
+				ID:       "z2",
+				Topics:   []string{"topic2", "topic3"},
+				UserData: []byte("z2"),
+			},
+			{
+				ID:       "z3",
+				Topics:   []string{"topic3", "topic1"},
+				UserData: []byte("z3"),
+			},
+		}
+
+		partitions := []Partition{
+			{
+				ID:     1,
+				Topic:  "topic1",
+				Leader: brokers["z1"],
+			},
+			{
+				ID:     2,
+				Topic:  "topic1",
+				Leader: brokers["z3"],
+			},
+			{
+				ID:     1,
+				Topic:  "topic2",
+				Leader: brokers["z1"],
+			},
+			{
+				ID:     2,
+				Topic:  "topic2",
+				Leader: brokers["z2"],
+			},
+			{
+				ID:     1,
+				Topic:  "topic3",
+				Leader: brokers["z3"],
+			},
+			{
+				ID:     2,
+				Topic:  "topic3",
+				Leader: brokers["z2"],
+			},
+		}
+
+		expected := GroupMemberAssignments{
+			"z1": {"topic1": []int{1}, "topic2": []int{1}},
+			"z2": {"topic2": []int{2}, "topic3": []int{2}},
+			"z3": {"topic3": []int{1}, "topic1": []int{2}},
+		}
+
+		res := b.AssignGroups(members, partitions)
+		if !reflect.DeepEqual(expected, res) {
+			t.Fatalf("incorrect group assignment.  expected %v but got %v", expected, res)
+		}
+	})
+}
diff -pruN 0.2.1-1.1/gzip/gzip.go 0.4.49+ds1-1/gzip/gzip.go
--- 0.2.1-1.1/gzip/gzip.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/gzip/gzip.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,64 +1,24 @@
+// Package gzip does nothing, it's kept for backward compatibility to avoid
+// breaking the majority of programs that imported it to install the compression
+// codec, which is now always included.
 package gzip
 
 import (
-	"bytes"
-	"compress/gzip"
-	"io/ioutil"
-
-	"github.com/segmentio/kafka-go"
+	gz "github.com/klauspost/compress/gzip"
+	"github.com/segmentio/kafka-go/compress/gzip"
 )
 
-func init() {
-	kafka.RegisterCompressionCodec(func() kafka.CompressionCodec {
-		return NewCompressionCodec()
-	})
-}
-
-type CompressionCodec struct {
-	// CompressionLevel is the level of compression to use on messages.
-	CompressionLevel int
-}
-
-const Code = 1
-
-func NewCompressionCodec() CompressionCodec {
-	return NewCompressionCodecWith(kafka.DefaultCompressionLevel)
-}
-
-func NewCompressionCodecWith(level int) CompressionCodec {
-	return CompressionCodec{
-		CompressionLevel: level,
-	}
-}
-
-// Code implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Code() int8 {
-	return Code
-}
-
-// Encode implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Encode(src []byte) ([]byte, error) {
-	buf := bytes.Buffer{}
-	buf.Grow(len(src)) // guess a size to avoid repeat allocations.
-	writer := gzip.NewWriter(&buf)
+const (
+	Code                    = 1
+	DefaultCompressionLevel = gz.DefaultCompression
+)
 
-	_, err := writer.Write(src)
-	if err != nil {
-		return nil, err
-	}
+type CompressionCodec = gzip.Codec
 
-	err = writer.Close()
-	if err != nil {
-		return nil, err
-	}
-	return buf.Bytes(), err
+func NewCompressionCodec() *CompressionCodec {
+	return NewCompressionCodecLevel(DefaultCompressionLevel)
 }
 
-// Decode implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Decode(src []byte) ([]byte, error) {
-	reader, err := gzip.NewReader(bytes.NewReader(src))
-	if err != nil {
-		return nil, err
-	}
-	return ioutil.ReadAll(reader)
+func NewCompressionCodecLevel(level int) *CompressionCodec {
+	return &CompressionCodec{Level: level}
 }
diff -pruN 0.2.1-1.1/heartbeat.go 0.4.49+ds1-1/heartbeat.go
--- 0.2.1-1.1/heartbeat.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/heartbeat.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,44 @@
 package kafka
 
-import "bufio"
+import (
+	"bufio"
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	heartbeatAPI "github.com/segmentio/kafka-go/protocol/heartbeat"
+)
+
+// HeartbeatRequest represents a heartbeat sent to kafka to indicate consume liveness.
+type HeartbeatRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// GroupID is the ID of the group.
+	GroupID string
+
+	// GenerationID is the current generation for the group.
+	GenerationID int32
+
+	// MemberID is the ID of the group member.
+	MemberID string
+
+	// GroupInstanceID is a unique identifier for the consumer.
+	GroupInstanceID string
+}
+
+// HeartbeatResponse represents a response from a heartbeat request.
+type HeartbeatResponse struct {
+	// Error is set to non-nil if an error occurred.
+	Error error
+
+	// The amount of time that the broker throttled the request.
+	//
+	// This field will be zero if the kafka broker did not support the
+	// Heartbeat API in version 1 or above.
+	Throttle time.Duration
+}
 
 type heartbeatRequestV0 struct {
 	// GroupID holds the unique group identifier
@@ -13,16 +51,41 @@ type heartbeatRequestV0 struct {
 	MemberID string
 }
 
+// Heartbeat sends a heartbeat request to a kafka broker and returns the response.
+func (c *Client) Heartbeat(ctx context.Context, req *HeartbeatRequest) (*HeartbeatResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &heartbeatAPI.Request{
+		GroupID:         req.GroupID,
+		GenerationID:    req.GenerationID,
+		MemberID:        req.MemberID,
+		GroupInstanceID: req.GroupInstanceID,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).Heartbeat: %w", err)
+	}
+
+	res := m.(*heartbeatAPI.Response)
+
+	ret := &HeartbeatResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+	}
+
+	if res.ErrorCode != 0 {
+		ret.Error = Error(res.ErrorCode)
+	}
+
+	return ret, nil
+}
+
 func (t heartbeatRequestV0) size() int32 {
 	return sizeofString(t.GroupID) +
 		sizeofInt32(t.GenerationID) +
 		sizeofString(t.MemberID)
 }
 
-func (t heartbeatRequestV0) writeTo(w *bufio.Writer) {
-	writeString(w, t.GroupID)
-	writeInt32(w, t.GenerationID)
-	writeString(w, t.MemberID)
+func (t heartbeatRequestV0) writeTo(wb *writeBuffer) {
+	wb.writeString(t.GroupID)
+	wb.writeInt32(t.GenerationID)
+	wb.writeString(t.MemberID)
 }
 
 type heartbeatResponseV0 struct {
@@ -34,8 +97,8 @@ func (t heartbeatResponseV0) size() int3
 	return sizeofInt16(t.ErrorCode)
 }
 
-func (t heartbeatResponseV0) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.ErrorCode)
+func (t heartbeatResponseV0) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.ErrorCode)
 }
 
 func (t *heartbeatResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
diff -pruN 0.2.1-1.1/heartbeat_test.go 0.4.49+ds1-1/heartbeat_test.go
--- 0.2.1-1.1/heartbeat_test.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/heartbeat_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,22 +3,70 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"log"
+	"os"
 	"reflect"
 	"testing"
+	"time"
 )
 
+func TestClientHeartbeat(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+
+	groupID := makeGroupID()
+
+	group, err := NewConsumerGroup(ConsumerGroupConfig{
+		ID:                groupID,
+		Topics:            []string{topic},
+		Brokers:           []string{"localhost:9092"},
+		HeartbeatInterval: 2 * time.Second,
+		RebalanceTimeout:  2 * time.Second,
+		RetentionTime:     time.Hour,
+		Logger:            log.New(os.Stdout, "cg-test: ", 0),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer group.Close()
+
+	gen, err := group.Next(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+
+	resp, err := client.Heartbeat(ctx, &HeartbeatRequest{
+		GroupID:      groupID,
+		GenerationID: gen.ID,
+		MemberID:     gen.MemberID,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if resp.Error != nil {
+		t.Error(resp.Error)
+	}
+}
+
 func TestHeartbeatRequestV0(t *testing.T) {
 	item := heartbeatResponseV0{
 		ErrorCode: 2,
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found heartbeatResponseV0
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
diff -pruN 0.2.1-1.1/incrementalalterconfigs.go 0.4.49+ds1-1/incrementalalterconfigs.go
--- 0.2.1-1.1/incrementalalterconfigs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/incrementalalterconfigs.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,133 @@
+package kafka
+
+import (
+	"context"
+	"net"
+
+	"github.com/segmentio/kafka-go/protocol/incrementalalterconfigs"
+)
+
+type ConfigOperation int8
+
+const (
+	ConfigOperationSet      ConfigOperation = 0
+	ConfigOperationDelete   ConfigOperation = 1
+	ConfigOperationAppend   ConfigOperation = 2
+	ConfigOperationSubtract ConfigOperation = 3
+)
+
+// IncrementalAlterConfigsRequest is a request to the IncrementalAlterConfigs API.
+type IncrementalAlterConfigsRequest struct {
+	// Addr is the address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// Resources contains the list of resources to update configs for.
+	Resources []IncrementalAlterConfigsRequestResource
+
+	// ValidateOnly indicates whether Kafka should validate the changes without actually
+	// applying them.
+	ValidateOnly bool
+}
+
+// IncrementalAlterConfigsRequestResource contains the details of a single resource type whose
+// configs should be altered.
+type IncrementalAlterConfigsRequestResource struct {
+	// ResourceType is the type of resource to update.
+	ResourceType ResourceType
+
+	// ResourceName is the name of the resource to update (i.e., topic name or broker ID).
+	ResourceName string
+
+	// Configs contains the list of config key/values to update.
+	Configs []IncrementalAlterConfigsRequestConfig
+}
+
+// IncrementalAlterConfigsRequestConfig describes a single config key/value pair that should
+// be altered.
+type IncrementalAlterConfigsRequestConfig struct {
+	// Name is the name of the config.
+	Name string
+
+	// Value is the value to set for this config.
+	Value string
+
+	// ConfigOperation indicates how this config should be updated (e.g., add, delete, etc.).
+	ConfigOperation ConfigOperation
+}
+
+// IncrementalAlterConfigsResponse is a response from the IncrementalAlterConfigs API.
+type IncrementalAlterConfigsResponse struct {
+	// Resources contains details of each resource config that was updated.
+	Resources []IncrementalAlterConfigsResponseResource
+}
+
+// IncrementalAlterConfigsResponseResource contains the response details for a single resource
+// whose configs were updated.
+type IncrementalAlterConfigsResponseResource struct {
+	// Error is set to a non-nil value if an error occurred while updating this specific
+	// config.
+	Error error
+
+	// ResourceType is the type of resource that was updated.
+	ResourceType ResourceType
+
+	// ResourceName is the name of the resource that was updated.
+	ResourceName string
+}
+
+func (c *Client) IncrementalAlterConfigs(
+	ctx context.Context,
+	req *IncrementalAlterConfigsRequest,
+) (*IncrementalAlterConfigsResponse, error) {
+	apiReq := &incrementalalterconfigs.Request{
+		ValidateOnly: req.ValidateOnly,
+	}
+
+	for _, res := range req.Resources {
+		apiRes := incrementalalterconfigs.RequestResource{
+			ResourceType: int8(res.ResourceType),
+			ResourceName: res.ResourceName,
+		}
+
+		for _, config := range res.Configs {
+			apiRes.Configs = append(
+				apiRes.Configs,
+				incrementalalterconfigs.RequestConfig{
+					Name:            config.Name,
+					Value:           config.Value,
+					ConfigOperation: int8(config.ConfigOperation),
+				},
+			)
+		}
+
+		apiReq.Resources = append(
+			apiReq.Resources,
+			apiRes,
+		)
+	}
+
+	protoResp, err := c.roundTrip(
+		ctx,
+		req.Addr,
+		apiReq,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	resp := &IncrementalAlterConfigsResponse{}
+
+	apiResp := protoResp.(*incrementalalterconfigs.Response)
+	for _, res := range apiResp.Responses {
+		resp.Resources = append(
+			resp.Resources,
+			IncrementalAlterConfigsResponseResource{
+				Error:        makeError(res.ErrorCode, res.ErrorMessage),
+				ResourceType: ResourceType(res.ResourceType),
+				ResourceName: res.ResourceName,
+			},
+		)
+	}
+
+	return resp, nil
+}
diff -pruN 0.2.1-1.1/incrementalalterconfigs_test.go 0.4.49+ds1-1/incrementalalterconfigs_test.go
--- 0.2.1-1.1/incrementalalterconfigs_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/incrementalalterconfigs_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,94 @@
+package kafka
+
+import (
+	"context"
+	"reflect"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientIncrementalAlterConfigs(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.4.0") {
+		return
+	}
+
+	const (
+		configKey   = "max.message.bytes"
+		configValue = "200000"
+	)
+
+	ctx := context.Background()
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	resp, err := client.IncrementalAlterConfigs(
+		ctx,
+		&IncrementalAlterConfigsRequest{
+			Resources: []IncrementalAlterConfigsRequestResource{
+				{
+					ResourceName: topic,
+					ResourceType: ResourceTypeTopic,
+					Configs: []IncrementalAlterConfigsRequestConfig{
+						{
+							Name:            configKey,
+							Value:           configValue,
+							ConfigOperation: ConfigOperationSet,
+						},
+					},
+				},
+			},
+		},
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expRes := []IncrementalAlterConfigsResponseResource{
+		{
+			ResourceType: ResourceTypeTopic,
+			ResourceName: topic,
+		},
+	}
+	if !reflect.DeepEqual(expRes, resp.Resources) {
+		t.Error(
+			"Wrong response resources",
+			"expected", expRes,
+			"got", resp.Resources,
+		)
+	}
+
+	dResp, err := client.DescribeConfigs(
+		ctx,
+		&DescribeConfigsRequest{
+			Resources: []DescribeConfigRequestResource{
+				{
+					ResourceType: ResourceTypeTopic,
+					ResourceName: topic,
+					ConfigNames: []string{
+						"max.message.bytes",
+					},
+				},
+			},
+		},
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(dResp.Resources) != 1 || len(dResp.Resources[0].ConfigEntries) != 1 {
+		t.Fatal("Invalid structure for DescribeResourcesResponse")
+	}
+
+	v := dResp.Resources[0].ConfigEntries[0].ConfigValue
+	if v != configValue {
+		t.Error(
+			"Wrong altered value for max.message.bytes",
+			"expected", configValue,
+			"got", v,
+		)
+	}
+}
diff -pruN 0.2.1-1.1/initproducerid.go 0.4.49+ds1-1/initproducerid.go
--- 0.2.1-1.1/initproducerid.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/initproducerid.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,82 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/initproducerid"
+)
+
+// InitProducerIDRequest is the request structure for the InitProducerId function.
+type InitProducerIDRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The transactional id key.
+	TransactionalID string
+
+	// Time after which a transaction should time out
+	TransactionTimeoutMs int
+
+	// The Producer ID (PID).
+	// This is used to disambiguate requests if a transactional id is reused following its expiration.
+	// Only supported in version >=3 of the request, will be ignore otherwise.
+	ProducerID int
+
+	// The producer's current epoch.
+	// This will be checked against the producer epoch on the broker,
+	// and the request will return an error if they do not match.
+	// Only supported in version >=3 of the request, will be ignore otherwise.
+	ProducerEpoch int
+}
+
+// ProducerSession contains useful information about the producer session from the broker's response.
+type ProducerSession struct {
+	// The Producer ID (PID) for the current producer session
+	ProducerID int
+
+	// The epoch associated with the current producer session for the given PID
+	ProducerEpoch int
+}
+
+// InitProducerIDResponse is the response structure for the InitProducerId function.
+type InitProducerIDResponse struct {
+	// The Transaction/Group Coordinator details
+	Producer *ProducerSession
+
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// An error that may have occurred while attempting to retrieve initProducerId
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker.
+	Error error
+}
+
+// InitProducerID sends a initProducerId request to a kafka broker and returns the
+// response.
+func (c *Client) InitProducerID(ctx context.Context, req *InitProducerIDRequest) (*InitProducerIDResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &initproducerid.Request{
+		TransactionalID:      req.TransactionalID,
+		TransactionTimeoutMs: int32(req.TransactionTimeoutMs),
+		ProducerID:           int64(req.ProducerID),
+		ProducerEpoch:        int16(req.ProducerEpoch),
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).InitProducerId: %w", err)
+	}
+
+	res := m.(*initproducerid.Response)
+
+	return &InitProducerIDResponse{
+		Producer: &ProducerSession{
+			ProducerID:    int(res.ProducerID),
+			ProducerEpoch: int(res.ProducerEpoch),
+		},
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Error:    makeError(res.ErrorCode, ""),
+	}, nil
+}
diff -pruN 0.2.1-1.1/initproducerid_test.go 0.4.49+ds1-1/initproducerid_test.go
--- 0.2.1-1.1/initproducerid_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/initproducerid_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,100 @@
+package kafka
+
+import (
+	"context"
+	"errors"
+	"net"
+	"strconv"
+	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientInitProducerId(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		return
+	}
+
+	// TODO: look into why this test fails on Kafka 3.0.0 and higher when transactional support
+	// work is revisited.
+	if ktesting.KafkaIsAtLeast("3.0.0") {
+		t.Skip("Skipping test because it fails on Kafka version 3.0.0 or higher.")
+	}
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	tid := makeTransactionalID()
+	// Wait for kafka setup and Coordinator to be available.
+	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+	defer cancel()
+	respc, err := waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     tid,
+		KeyType: CoordinatorKeyTypeTransaction,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Now establish a connection with the transaction coordinator
+	transactionCoordinator := TCP(net.JoinHostPort(respc.Coordinator.Host, strconv.Itoa(int(respc.Coordinator.Port))))
+	client, shutdown = newClient(transactionCoordinator)
+	defer shutdown()
+
+	// Check if producer epoch increases and PID remains the same when producer is
+	// initialized again with the same transactionalID
+	resp, err := client.InitProducerID(context.Background(), &InitProducerIDRequest{
+		Addr:                 transactionCoordinator,
+		TransactionalID:      tid,
+		TransactionTimeoutMs: 30000,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if resp.Error != nil {
+		t.Fatal(resp.Error)
+	}
+
+	epoch1 := resp.Producer.ProducerEpoch
+	pid1 := resp.Producer.ProducerID
+
+	resp, err = client.InitProducerID(context.Background(), &InitProducerIDRequest{
+		Addr:                 transactionCoordinator,
+		TransactionalID:      tid,
+		TransactionTimeoutMs: 30000,
+		ProducerID:           pid1,
+		ProducerEpoch:        epoch1,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if resp.Error != nil {
+		t.Fatal(resp.Error)
+	}
+
+	epoch2 := resp.Producer.ProducerEpoch
+	pid2 := resp.Producer.ProducerID
+
+	if pid1 != pid2 {
+		t.Fatalf("PID should stay the same across producer sessions; expected: %v got: %v", pid1, pid2)
+	}
+
+	if epoch2-epoch1 <= 0 {
+		t.Fatal("Epoch should increase when producer is initialized again with the same transactionID")
+	}
+
+	// Checks if transaction timeout is too high
+	// Transaction timeout should never be higher than broker config `transaction.max.timeout.ms`
+	resp, _ = client.InitProducerID(context.Background(), &InitProducerIDRequest{
+		Addr:                 client.Addr,
+		TransactionalID:      tid,
+		TransactionTimeoutMs: 30000000,
+	})
+	if !errors.Is(resp.Error, InvalidTransactionTimeout) {
+		t.Fatal("Should have errored with: Transaction timeout specified is higher than `transaction.max.timeout.ms`")
+	}
+}
diff -pruN 0.2.1-1.1/joingroup.go 0.4.49+ds1-1/joingroup.go
--- 0.2.1-1.1/joingroup.go	2018-11-20 17:06:32.000000000 +0000
+++ 0.4.49+ds1-1/joingroup.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,13 +3,190 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/consumer"
+	"github.com/segmentio/kafka-go/protocol/joingroup"
 )
 
-type memberGroupMetadata struct {
-	// MemberID assigned by the group coordinator or null if joining for the
-	// first time.
+// JoinGroupRequest is the request structure for the JoinGroup function.
+type JoinGroupRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// GroupID of the group to join.
+	GroupID string
+
+	// The duration after which the coordinator considers the consumer dead
+	// if it has not received a heartbeat.
+	SessionTimeout time.Duration
+
+	// The duration the coordination will wait for each member to rejoin when rebalancing the group.
+	RebalanceTimeout time.Duration
+
+	// The ID assigned by the group coordinator.
 	MemberID string
-	Metadata groupMetadata
+
+	// The unique identifier for the consumer instance.
+	GroupInstanceID string
+
+	// The name for the class of protocols implemented by the group being joined.
+	ProtocolType string
+
+	// The list of protocols the member supports.
+	Protocols []GroupProtocol
+}
+
+// GroupProtocol represents a consumer group protocol.
+type GroupProtocol struct {
+	// The protocol name.
+	Name string
+
+	// The protocol metadata.
+	Metadata GroupProtocolSubscription
+}
+
+type GroupProtocolSubscription struct {
+	// The Topics to subscribe to.
+	Topics []string
+
+	// UserData assosiated with the subscription for the given protocol
+	UserData []byte
+
+	// Partitions owned by this consumer.
+	OwnedPartitions map[string][]int
+}
+
+// JoinGroupResponse is the response structure for the JoinGroup function.
+type JoinGroupResponse struct {
+	// An error that may have occurred when attempting to join the group.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Error error
+
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// The generation ID of the group.
+	GenerationID int
+
+	// The group protocol selected by the coordinatior.
+	ProtocolName string
+
+	// The group protocol name.
+	ProtocolType string
+
+	// The leader of the group.
+	LeaderID string
+
+	// The group member ID.
+	MemberID string
+
+	// The members of the group.
+	Members []JoinGroupResponseMember
+}
+
+// JoinGroupResponseMember represents a group memmber in a reponse to a JoinGroup request.
+type JoinGroupResponseMember struct {
+	// The group memmber ID.
+	ID string
+
+	// The unique identifier of the consumer instance.
+	GroupInstanceID string
+
+	// The group member metadata.
+	Metadata GroupProtocolSubscription
+}
+
+// JoinGroup sends a join group request to the coordinator and returns the response.
+func (c *Client) JoinGroup(ctx context.Context, req *JoinGroupRequest) (*JoinGroupResponse, error) {
+	joinGroup := joingroup.Request{
+		GroupID:            req.GroupID,
+		SessionTimeoutMS:   int32(req.SessionTimeout.Milliseconds()),
+		RebalanceTimeoutMS: int32(req.RebalanceTimeout.Milliseconds()),
+		MemberID:           req.MemberID,
+		GroupInstanceID:    req.GroupInstanceID,
+		ProtocolType:       req.ProtocolType,
+		Protocols:          make([]joingroup.RequestProtocol, 0, len(req.Protocols)),
+	}
+
+	for _, proto := range req.Protocols {
+		protoMeta := consumer.Subscription{
+			Version:         consumer.MaxVersionSupported,
+			Topics:          proto.Metadata.Topics,
+			UserData:        proto.Metadata.UserData,
+			OwnedPartitions: make([]consumer.TopicPartition, 0, len(proto.Metadata.OwnedPartitions)),
+		}
+		for topic, partitions := range proto.Metadata.OwnedPartitions {
+			tp := consumer.TopicPartition{
+				Topic:      topic,
+				Partitions: make([]int32, 0, len(partitions)),
+			}
+			for _, partition := range partitions {
+				tp.Partitions = append(tp.Partitions, int32(partition))
+			}
+			protoMeta.OwnedPartitions = append(protoMeta.OwnedPartitions, tp)
+		}
+
+		metaBytes, err := protocol.Marshal(consumer.MaxVersionSupported, protoMeta)
+		if err != nil {
+			return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err)
+		}
+
+		joinGroup.Protocols = append(joinGroup.Protocols, joingroup.RequestProtocol{
+			Name:     proto.Name,
+			Metadata: metaBytes,
+		})
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &joinGroup)
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err)
+	}
+
+	r := m.(*joingroup.Response)
+
+	res := &JoinGroupResponse{
+		Error:        makeError(r.ErrorCode, ""),
+		Throttle:     makeDuration(r.ThrottleTimeMS),
+		GenerationID: int(r.GenerationID),
+		ProtocolName: r.ProtocolName,
+		ProtocolType: r.ProtocolType,
+		LeaderID:     r.LeaderID,
+		MemberID:     r.MemberID,
+		Members:      make([]JoinGroupResponseMember, 0, len(r.Members)),
+	}
+
+	for _, member := range r.Members {
+		var meta consumer.Subscription
+		err = protocol.Unmarshal(member.Metadata, consumer.MaxVersionSupported, &meta)
+		if err != nil {
+			return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err)
+		}
+		subscription := GroupProtocolSubscription{
+			Topics:          meta.Topics,
+			UserData:        meta.UserData,
+			OwnedPartitions: make(map[string][]int, len(meta.OwnedPartitions)),
+		}
+		for _, owned := range meta.OwnedPartitions {
+			subscription.OwnedPartitions[owned.Topic] = make([]int, 0, len(owned.Partitions))
+			for _, partition := range owned.Partitions {
+				subscription.OwnedPartitions[owned.Topic] = append(subscription.OwnedPartitions[owned.Topic], int(partition))
+			}
+		}
+		res.Members = append(res.Members, JoinGroupResponseMember{
+			ID:              member.MemberID,
+			GroupInstanceID: member.GroupInstanceID,
+			Metadata:        subscription,
+		})
+	}
+
+	return res, nil
 }
 
 type groupMetadata struct {
@@ -24,17 +201,15 @@ func (t groupMetadata) size() int32 {
 		sizeofBytes(t.UserData)
 }
 
-func (t groupMetadata) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.Version)
-	writeStringArray(w, t.Topics)
-	writeBytes(w, t.UserData)
+func (t groupMetadata) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.Version)
+	wb.writeStringArray(t.Topics)
+	wb.writeBytes(t.UserData)
 }
 
 func (t groupMetadata) bytes() []byte {
 	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
-	t.writeTo(w)
-	w.Flush()
+	t.writeTo(&writeBuffer{w: buf})
 	return buf.Bytes()
 }
 
@@ -61,12 +236,12 @@ func (t joinGroupRequestGroupProtocolV1)
 		sizeofBytes(t.ProtocolMetadata)
 }
 
-func (t joinGroupRequestGroupProtocolV1) writeTo(w *bufio.Writer) {
-	writeString(w, t.ProtocolName)
-	writeBytes(w, t.ProtocolMetadata)
+func (t joinGroupRequestGroupProtocolV1) writeTo(wb *writeBuffer) {
+	wb.writeString(t.ProtocolName)
+	wb.writeBytes(t.ProtocolMetadata)
 }
 
-type joinGroupRequestV1 struct {
+type joinGroupRequest struct {
 	// GroupID holds the unique group identifier
 	GroupID string
 
@@ -89,7 +264,7 @@ type joinGroupRequestV1 struct {
 	GroupProtocols []joinGroupRequestGroupProtocolV1
 }
 
-func (t joinGroupRequestV1) size() int32 {
+func (t joinGroupRequest) size() int32 {
 	return sizeofString(t.GroupID) +
 		sizeofInt32(t.SessionTimeout) +
 		sizeofInt32(t.RebalanceTimeout) +
@@ -98,32 +273,32 @@ func (t joinGroupRequestV1) size() int32
 		sizeofArray(len(t.GroupProtocols), func(i int) int32 { return t.GroupProtocols[i].size() })
 }
 
-func (t joinGroupRequestV1) writeTo(w *bufio.Writer) {
-	writeString(w, t.GroupID)
-	writeInt32(w, t.SessionTimeout)
-	writeInt32(w, t.RebalanceTimeout)
-	writeString(w, t.MemberID)
-	writeString(w, t.ProtocolType)
-	writeArray(w, len(t.GroupProtocols), func(i int) { t.GroupProtocols[i].writeTo(w) })
+func (t joinGroupRequest) writeTo(wb *writeBuffer) {
+	wb.writeString(t.GroupID)
+	wb.writeInt32(t.SessionTimeout)
+	wb.writeInt32(t.RebalanceTimeout)
+	wb.writeString(t.MemberID)
+	wb.writeString(t.ProtocolType)
+	wb.writeArray(len(t.GroupProtocols), func(i int) { t.GroupProtocols[i].writeTo(wb) })
 }
 
-type joinGroupResponseMemberV1 struct {
+type joinGroupResponseMember struct {
 	// MemberID assigned by the group coordinator
 	MemberID       string
 	MemberMetadata []byte
 }
 
-func (t joinGroupResponseMemberV1) size() int32 {
+func (t joinGroupResponseMember) size() int32 {
 	return sizeofString(t.MemberID) +
 		sizeofBytes(t.MemberMetadata)
 }
 
-func (t joinGroupResponseMemberV1) writeTo(w *bufio.Writer) {
-	writeString(w, t.MemberID)
-	writeBytes(w, t.MemberMetadata)
+func (t joinGroupResponseMember) writeTo(wb *writeBuffer) {
+	wb.writeString(t.MemberID)
+	wb.writeBytes(t.MemberMetadata)
 }
 
-func (t *joinGroupResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
+func (t *joinGroupResponseMember) readFrom(r *bufio.Reader, size int) (remain int, err error) {
 	if remain, err = readString(r, size, &t.MemberID); err != nil {
 		return
 	}
@@ -133,7 +308,11 @@ func (t *joinGroupResponseMemberV1) read
 	return
 }
 
-type joinGroupResponseV1 struct {
+type joinGroupResponse struct {
+	v apiVersion // v1, v2
+
+	ThrottleTime int32
+
 	// ErrorCode holds response error code
 	ErrorCode int16
 
@@ -148,29 +327,42 @@ type joinGroupResponseV1 struct {
 
 	// MemberID assigned by the group coordinator
 	MemberID string
-	Members  []joinGroupResponseMemberV1
+	Members  []joinGroupResponseMember
 }
 
-func (t joinGroupResponseV1) size() int32 {
-	return sizeofInt16(t.ErrorCode) +
+func (t joinGroupResponse) size() int32 {
+	sz := sizeofInt16(t.ErrorCode) +
 		sizeofInt32(t.GenerationID) +
 		sizeofString(t.GroupProtocol) +
 		sizeofString(t.LeaderID) +
 		sizeofString(t.MemberID) +
 		sizeofArray(len(t.MemberID), func(i int) int32 { return t.Members[i].size() })
+	if t.v >= v2 {
+		sz += sizeofInt32(t.ThrottleTime)
+	}
+	return sz
 }
 
-func (t joinGroupResponseV1) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.ErrorCode)
-	writeInt32(w, t.GenerationID)
-	writeString(w, t.GroupProtocol)
-	writeString(w, t.LeaderID)
-	writeString(w, t.MemberID)
-	writeArray(w, len(t.Members), func(i int) { t.Members[i].writeTo(w) })
+func (t joinGroupResponse) writeTo(wb *writeBuffer) {
+	if t.v >= v2 {
+		wb.writeInt32(t.ThrottleTime)
+	}
+	wb.writeInt16(t.ErrorCode)
+	wb.writeInt32(t.GenerationID)
+	wb.writeString(t.GroupProtocol)
+	wb.writeString(t.LeaderID)
+	wb.writeString(t.MemberID)
+	wb.writeArray(len(t.Members), func(i int) { t.Members[i].writeTo(wb) })
 }
 
-func (t *joinGroupResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
-	if remain, err = readInt16(r, size, &t.ErrorCode); err != nil {
+func (t *joinGroupResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) {
+	remain = size
+	if t.v >= v2 {
+		if remain, err = readInt32(r, remain, &t.ThrottleTime); err != nil {
+			return
+		}
+	}
+	if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil {
 		return
 	}
 	if remain, err = readInt32(r, remain, &t.GenerationID); err != nil {
@@ -187,7 +379,7 @@ func (t *joinGroupResponseV1) readFrom(r
 	}
 
 	fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
-		var item joinGroupResponseMemberV1
+		var item joinGroupResponseMember
 		if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil {
 			return
 		}
diff -pruN 0.2.1-1.1/joingroup_test.go 0.4.49+ds1-1/joingroup_test.go
--- 0.2.1-1.1/joingroup_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/joingroup_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,10 +3,127 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"errors"
 	"reflect"
 	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
 )
 
+func TestClientJoinGroup(t *testing.T) {
+	topic := makeTopic()
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	err := clientCreateTopic(client, topic, 3)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	groupID := makeGroupID()
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	respc, err := waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     groupID,
+		KeyType: CoordinatorKeyTypeConsumer,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if respc.Error != nil {
+		t.Fatal(err)
+	}
+
+	groupInstanceID := "group-instance-id"
+	if !ktesting.KafkaIsAtLeast("2.4.1") {
+		groupInstanceID = ""
+	}
+	const userData = "user-data"
+
+	req := &JoinGroupRequest{
+		GroupID:          groupID,
+		GroupInstanceID:  groupInstanceID,
+		ProtocolType:     "consumer",
+		SessionTimeout:   time.Minute,
+		RebalanceTimeout: time.Minute,
+		Protocols: []GroupProtocol{
+			{
+				Name: RoundRobinGroupBalancer{}.ProtocolName(),
+				Metadata: GroupProtocolSubscription{
+					Topics:   []string{topic},
+					UserData: []byte(userData),
+					OwnedPartitions: map[string][]int{
+						topic: {0, 1, 2},
+					},
+				},
+			},
+		},
+	}
+
+	var resp *JoinGroupResponse
+
+	for {
+		resp, err = client.JoinGroup(ctx, req)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if errors.Is(resp.Error, MemberIDRequired) {
+			req.MemberID = resp.MemberID
+			time.Sleep(time.Second)
+			continue
+		}
+
+		if resp.Error != nil {
+			t.Fatal(resp.Error)
+		}
+		break
+	}
+
+	if resp.GenerationID != 1 {
+		t.Fatalf("expected generation ID to be 1 but got %v", resp.GenerationID)
+	}
+
+	if resp.MemberID == "" {
+		t.Fatal("expected a member ID in response")
+	}
+
+	if resp.LeaderID != resp.MemberID {
+		t.Fatalf("expected to be group leader but got %v", resp.LeaderID)
+	}
+
+	if len(resp.Members) != 1 {
+		t.Fatalf("expected 1 member got %v", resp.Members)
+	}
+
+	member := resp.Members[0]
+
+	if member.ID != resp.MemberID {
+		t.Fatal("expected to be the only group memmber")
+	}
+
+	if member.GroupInstanceID != groupInstanceID {
+		t.Fatalf("expected the group instance ID to be %v, got %v", groupInstanceID, member.GroupInstanceID)
+	}
+
+	expectedMetadata := GroupProtocolSubscription{
+		Topics:   []string{topic},
+		UserData: []byte(userData),
+		OwnedPartitions: map[string][]int{
+			topic: {0, 1, 2},
+		},
+	}
+
+	if !reflect.DeepEqual(member.Metadata, expectedMetadata) {
+		t.Fatalf("\nexpected assignment to be \n%v\nbut got\n%v", expectedMetadata, member.Metadata)
+	}
+}
+
 func TestSaramaCompatibility(t *testing.T) {
 	var (
 		// sample data from github.com/Shopify/sarama
@@ -80,13 +197,12 @@ func TestMemberMetadata(t *testing.T) {
 		UserData: []byte(`blah`),
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found groupMetadata
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
@@ -101,38 +217,41 @@ func TestMemberMetadata(t *testing.T) {
 	}
 }
 
-func TestJoinGroupResponseV1(t *testing.T) {
-	item := joinGroupResponseV1{
-		ErrorCode:     2,
-		GenerationID:  3,
-		GroupProtocol: "a",
-		LeaderID:      "b",
-		MemberID:      "c",
-		Members: []joinGroupResponseMemberV1{
-			{
-				MemberID:       "d",
-				MemberMetadata: []byte("blah"),
+func TestJoinGroupResponse(t *testing.T) {
+	supportedVersions := []apiVersion{v1, v2}
+	for _, v := range supportedVersions {
+		item := joinGroupResponse{
+			v:             v,
+			ErrorCode:     2,
+			GenerationID:  3,
+			GroupProtocol: "a",
+			LeaderID:      "b",
+			MemberID:      "c",
+			Members: []joinGroupResponseMember{
+				{
+					MemberID:       "d",
+					MemberMetadata: []byte("blah"),
+				},
 			},
-		},
-	}
+		}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
-	item.writeTo(w)
-	w.Flush()
+		b := bytes.NewBuffer(nil)
+		w := &writeBuffer{w: b}
+		item.writeTo(w)
 
-	var found joinGroupResponseV1
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
-	if err != nil {
-		t.Error(err)
-		t.FailNow()
-	}
-	if remain != 0 {
-		t.Errorf("expected 0 remain, got %v", remain)
-		t.FailNow()
-	}
-	if !reflect.DeepEqual(item, found) {
-		t.Error("expected item and found to be the same")
-		t.FailNow()
+		found := joinGroupResponse{v: v}
+		remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
+		if err != nil {
+			t.Error(err)
+			t.FailNow()
+		}
+		if remain != 0 {
+			t.Errorf("expected 0 remain, got %v", remain)
+			t.FailNow()
+		}
+		if !reflect.DeepEqual(item, found) {
+			t.Error("expected item and found to be the same")
+			t.FailNow()
+		}
 	}
 }
diff -pruN 0.2.1-1.1/kafka.go 0.4.49+ds1-1/kafka.go
--- 0.2.1-1.1/kafka.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/kafka.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,100 @@
+package kafka
+
+import "github.com/segmentio/kafka-go/protocol"
+
+// Broker represents a kafka broker in a kafka cluster.
+type Broker struct {
+	Host string
+	Port int
+	ID   int
+	Rack string
+}
+
+// Topic represents a topic in a kafka cluster.
+type Topic struct {
+	// Name of the topic.
+	Name string
+
+	// True if the topic is internal.
+	Internal bool
+
+	// The list of partition currently available on this topic.
+	Partitions []Partition
+
+	// An error that may have occurred while attempting to read the topic
+	// metadata.
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker. Programs may use the standard errors.Is
+	// function to test the error against kafka error codes.
+	Error error
+}
+
+// Partition carries the metadata associated with a kafka partition.
+type Partition struct {
+	// Name of the topic that the partition belongs to, and its index in the
+	// topic.
+	Topic string
+	ID    int
+
+	// Leader, replicas, and ISR for the partition.
+	//
+	// When no physical host is known to be running a broker, the Host and Port
+	// fields will be set to the zero values. The logical broker ID is always
+	// set to the value known to the kafka cluster, even if the broker is not
+	// currently backed by a physical host.
+	Leader   Broker
+	Replicas []Broker
+	Isr      []Broker
+
+	// Available only with metadata API level >= 6:
+	OfflineReplicas []Broker
+
+	// An error that may have occurred while attempting to read the partition
+	// metadata.
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker. Programs may use the standard errors.Is
+	// function to test the error against kafka error codes.
+	Error error
+}
+
+// Marshal encodes v into a binary representation of the value in the kafka data
+// format.
+//
+// If v is a, or contains struct types, the kafka struct fields are interpreted
+// and may contain one of these values:
+//
+//	nullable  valid on bytes and strings, encodes as a nullable value
+//	compact   valid on strings, encodes as a compact string
+//
+// The kafka struct tags should not contain min and max versions. If you need to
+// encode types based on specific versions of kafka APIs, use the Version type
+// instead.
+func Marshal(v interface{}) ([]byte, error) {
+	return protocol.Marshal(-1, v)
+}
+
+// Unmarshal decodes a binary representation from b into v.
+//
+// See Marshal for details.
+func Unmarshal(b []byte, v interface{}) error {
+	return protocol.Unmarshal(b, -1, v)
+}
+
+// Version represents a version number for kafka APIs.
+type Version int16
+
+// Marshal is like the top-level Marshal function, but will only encode struct
+// fields for which n falls within the min and max versions specified on the
+// struct tag.
+func (n Version) Marshal(v interface{}) ([]byte, error) {
+	return protocol.Marshal(int16(n), v)
+}
+
+// Unmarshal is like the top-level Unmarshal function, but will only decode
+// struct fields for which n falls within the min and max versions specified on
+// the struct tag.
+func (n Version) Unmarshal(b []byte, v interface{}) error {
+	return protocol.Unmarshal(b, int16(n), v)
+}
diff -pruN 0.2.1-1.1/kafka_test.go 0.4.49+ds1-1/kafka_test.go
--- 0.2.1-1.1/kafka_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/kafka_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,191 @@
+package kafka
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"testing"
+)
+
+func TestMarshalUnmarshal(t *testing.T) {
+	values := []interface{}{
+		true,
+		false,
+
+		int8(0),
+		int8(1),
+		int8(math.MinInt8),
+		int8(math.MaxInt8),
+
+		int16(0),
+		int16(1),
+		int16(math.MinInt16),
+		int16(math.MaxInt16),
+
+		int32(0),
+		int32(1),
+		int32(math.MinInt32),
+		int32(math.MaxInt32),
+
+		int64(0),
+		int64(1),
+		int64(math.MinInt64),
+		int64(math.MaxInt64),
+
+		"",
+		"hello world!",
+
+		([]byte)(nil),
+		[]byte(""),
+		[]byte("hello world!"),
+
+		([]int32)(nil),
+		[]int32{},
+		[]int32{0, 1, 2, 3, 4},
+
+		struct{}{},
+		struct {
+			A int32
+			B string
+			C []byte
+		}{A: 1, B: "42", C: []byte{}},
+	}
+
+	for _, v := range values {
+		t.Run(fmt.Sprintf("%+v", v), func(t *testing.T) {
+			b, err := Marshal(v)
+			if err != nil {
+				t.Fatal("marshal error:", err)
+			}
+
+			x := reflect.New(reflect.TypeOf(v))
+
+			if err := Unmarshal(b, x.Interface()); err != nil {
+				t.Fatal("unmarshal error:", err)
+			}
+
+			if !reflect.DeepEqual(v, x.Elem().Interface()) {
+				t.Fatalf("values mismatch:\nexpected: %#v\nfound:   %#v\n", v, x.Elem().Interface())
+			}
+		})
+	}
+}
+
+func TestVersionMarshalUnmarshal(t *testing.T) {
+	type T struct {
+		A int32  `kafka:"min=v0,max=v1"`
+		B string `kafka:"min=v1,max=v2"`
+		C []byte `kafka:"min=v2,max=v2,nullable"`
+	}
+
+	tests := []struct {
+		out T
+		ver Version
+	}{
+		{
+			out: T{A: 42},
+			ver: Version(0),
+		},
+	}
+
+	in := T{
+		A: 42,
+		B: "Hello World!",
+		C: []byte("question?"),
+	}
+
+	for _, test := range tests {
+		t.Run(strconv.Itoa(int(test.ver)), func(t *testing.T) {
+			b, err := test.ver.Marshal(in)
+			if err != nil {
+				t.Fatal("marshal error:", err)
+			}
+
+			x1 := test.out
+			x2 := T{}
+
+			if err := test.ver.Unmarshal(b, &x2); err != nil {
+				t.Fatal("unmarshal error:", err)
+			}
+
+			if !reflect.DeepEqual(x1, x2) {
+				t.Fatalf("values mismatch:\nexpected: %#v\nfound:   %#v\n", x1, x2)
+			}
+		})
+	}
+
+}
+
+type Struct struct {
+	A int32
+	B int32
+	C int32
+}
+
+var benchmarkValues = []interface{}{
+	true,
+	int8(1),
+	int16(1),
+	int32(1),
+	int64(1),
+	"Hello World!",
+	[]byte("Hello World!"),
+	[]int32{1, 2, 3},
+	Struct{A: 1, B: 2, C: 3},
+}
+
+func BenchmarkMarshal(b *testing.B) {
+	for _, v := range benchmarkValues {
+		b.Run(fmt.Sprintf("%T", v), func(b *testing.B) {
+			for i := 0; i < b.N; i++ {
+				_, err := Marshal(v)
+				if err != nil {
+					b.Fatal(err)
+				}
+			}
+		})
+	}
+}
+
+func BenchmarkUnmarshal(b *testing.B) {
+	for _, v := range benchmarkValues {
+		b.Run(fmt.Sprintf("%T", v), func(b *testing.B) {
+			data, err := Marshal(v)
+
+			if err != nil {
+				b.Fatal(err)
+			}
+
+			value := reflect.New(reflect.TypeOf(v))
+			ptr := value.Interface()
+			elem := value.Elem()
+			zero := reflect.Zero(reflect.TypeOf(v))
+
+			for i := 0; i < b.N; i++ {
+				if err := Unmarshal(data, ptr); err != nil {
+					b.Fatal(err)
+				}
+				elem.Set(zero)
+			}
+		})
+	}
+}
+
+type testKafkaLogger struct {
+	Prefix string
+	T      *testing.T
+}
+
+func newTestKafkaLogger(t *testing.T, prefix string) Logger {
+	return &testKafkaLogger{Prefix: prefix, T: t}
+}
+
+func (l *testKafkaLogger) Printf(msg string, args ...interface{}) {
+	l.T.Helper()
+	if l.Prefix != "" {
+		l.T.Logf(l.Prefix+" "+msg, args...)
+	} else {
+		l.T.Logf(msg, args...)
+	}
+}
diff -pruN 0.2.1-1.1/leavegroup.go 0.4.49+ds1-1/leavegroup.go
--- 0.2.1-1.1/leavegroup.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/leavegroup.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,114 @@
 package kafka
 
-import "bufio"
+import (
+	"bufio"
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/leavegroup"
+)
+
+// LeaveGroupRequest is the request structure for the LeaveGroup function.
+type LeaveGroupRequest struct {
+	// Address of the kafka broker to sent he request to.
+	Addr net.Addr
+
+	// GroupID of the group to leave.
+	GroupID string
+
+	// List of leaving member identities.
+	Members []LeaveGroupRequestMember
+}
+
+// LeaveGroupRequestMember represents the indentify of a member leaving a group.
+type LeaveGroupRequestMember struct {
+	// The member ID to remove from the group.
+	ID string
+
+	// The group instance ID to remove from the group.
+	GroupInstanceID string
+}
+
+// LeaveGroupResponse is the response structure for the LeaveGroup function.
+type LeaveGroupResponse struct {
+	// An error that may have occurred when attempting to leave the group.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Error error
+
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// List of leaving member responses.
+	Members []LeaveGroupResponseMember
+}
+
+// LeaveGroupResponseMember represents a member leaving the group.
+type LeaveGroupResponseMember struct {
+	// The member ID of the member leaving the group.
+	ID string
+
+	// The group instance ID to remove from the group.
+	GroupInstanceID string
+
+	// An error that may have occured when attempting to remove the member from the group.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Error error
+}
+
+func (c *Client) LeaveGroup(ctx context.Context, req *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+	leaveGroup := leavegroup.Request{
+		GroupID: req.GroupID,
+		Members: make([]leavegroup.RequestMember, 0, len(req.Members)),
+	}
+
+	for _, member := range req.Members {
+		leaveGroup.Members = append(leaveGroup.Members, leavegroup.RequestMember{
+			MemberID:        member.ID,
+			GroupInstanceID: member.GroupInstanceID,
+		})
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &leaveGroup)
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).LeaveGroup: %w", err)
+	}
+
+	r := m.(*leavegroup.Response)
+
+	res := &LeaveGroupResponse{
+		Error:    makeError(r.ErrorCode, ""),
+		Throttle: makeDuration(r.ThrottleTimeMS),
+	}
+
+	if len(r.Members) == 0 {
+		// If we're using a version of the api without the
+		// members array in the response, just add a member
+		// so the api is consistent across versions.
+		r.Members = []leavegroup.ResponseMember{
+			{
+				MemberID:        req.Members[0].ID,
+				GroupInstanceID: req.Members[0].GroupInstanceID,
+			},
+		}
+	}
+
+	res.Members = make([]LeaveGroupResponseMember, 0, len(r.Members))
+	for _, member := range r.Members {
+		res.Members = append(res.Members, LeaveGroupResponseMember{
+			ID:              member.MemberID,
+			GroupInstanceID: member.GroupInstanceID,
+			Error:           makeError(member.ErrorCode, ""),
+		})
+	}
+
+	return res, nil
+}
 
 type leaveGroupRequestV0 struct {
 	// GroupID holds the unique group identifier
@@ -12,13 +120,12 @@ type leaveGroupRequestV0 struct {
 }
 
 func (t leaveGroupRequestV0) size() int32 {
-	return sizeofString(t.GroupID) +
-		sizeofString(t.MemberID)
+	return sizeofString(t.GroupID) + sizeofString(t.MemberID)
 }
 
-func (t leaveGroupRequestV0) writeTo(w *bufio.Writer) {
-	writeString(w, t.GroupID)
-	writeString(w, t.MemberID)
+func (t leaveGroupRequestV0) writeTo(wb *writeBuffer) {
+	wb.writeString(t.GroupID)
+	wb.writeString(t.MemberID)
 }
 
 type leaveGroupResponseV0 struct {
@@ -30,13 +137,11 @@ func (t leaveGroupResponseV0) size() int
 	return sizeofInt16(t.ErrorCode)
 }
 
-func (t leaveGroupResponseV0) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.ErrorCode)
+func (t leaveGroupResponseV0) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.ErrorCode)
 }
 
 func (t *leaveGroupResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) {
-	if remain, err = readInt16(r, size, &t.ErrorCode); err != nil {
-		return
-	}
+	remain, err = readInt16(r, size, &t.ErrorCode)
 	return
 }
diff -pruN 0.2.1-1.1/leavegroup_test.go 0.4.49+ds1-1/leavegroup_test.go
--- 0.2.1-1.1/leavegroup_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/leavegroup_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,22 +3,204 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"errors"
 	"reflect"
 	"testing"
+	"time"
 )
 
+func TestClientLeaveGroup(t *testing.T) {
+	// In order to get to a leave group call we need to first
+	// join a group then sync the group.
+	topic := makeTopic()
+	client, shutdown := newLocalClient()
+	client.Timeout = time.Minute
+	// Although at higher api versions ClientID is nullable
+	// for some reason the SyncGroup API call errors
+	// when ClientID is null.
+	// The Java Kafka Consumer generates a ClientID if one is not
+	// present or if the provided ClientID is empty.
+	client.Transport.(*Transport).ClientID = "test-client"
+	defer shutdown()
+
+	err := clientCreateTopic(client, topic, 3)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	groupID := makeGroupID()
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+	defer cancel()
+	respc, err := waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     groupID,
+		KeyType: CoordinatorKeyTypeConsumer,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if respc.Error != nil {
+		t.Fatal(err)
+	}
+
+	groupInstanceID := "group-instance-id"
+	userData := "user-data"
+
+	var rrGroupBalancer RoundRobinGroupBalancer
+
+	req := &JoinGroupRequest{
+		GroupID:          groupID,
+		GroupInstanceID:  groupInstanceID,
+		ProtocolType:     "consumer",
+		SessionTimeout:   time.Minute,
+		RebalanceTimeout: time.Minute,
+		Protocols: []GroupProtocol{
+			{
+				Name: rrGroupBalancer.ProtocolName(),
+				Metadata: GroupProtocolSubscription{
+					Topics:   []string{topic},
+					UserData: []byte(userData),
+					OwnedPartitions: map[string][]int{
+						topic: {0, 1, 2},
+					},
+				},
+			},
+		},
+	}
+
+	var resp *JoinGroupResponse
+
+	for {
+		resp, err = client.JoinGroup(ctx, req)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if errors.Is(resp.Error, MemberIDRequired) {
+			req.MemberID = resp.MemberID
+			time.Sleep(time.Second)
+			continue
+		}
+
+		if resp.Error != nil {
+			t.Fatal(resp.Error)
+		}
+		break
+	}
+
+	if resp.MemberID != resp.LeaderID {
+		t.Fatal("expected to be group leader")
+	}
+
+	groupMembers := make([]GroupMember, 0, len(resp.Members))
+	groupUserDataLookup := make(map[string]GroupMember)
+	for _, member := range resp.Members {
+		gm := GroupMember{
+			ID:       member.ID,
+			Topics:   member.Metadata.Topics,
+			UserData: member.Metadata.UserData,
+		}
+		groupMembers = append(groupMembers, gm)
+		groupUserDataLookup[member.ID] = gm
+	}
+
+	metaResp, err := client.Metadata(ctx, &MetadataRequest{
+		Topics: []string{topic},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assignments := rrGroupBalancer.AssignGroups(groupMembers, metaResp.Topics[0].Partitions)
+
+	sgRequest := &SyncGroupRequest{
+		GroupID:         groupID,
+		GenerationID:    resp.GenerationID,
+		MemberID:        resp.MemberID,
+		GroupInstanceID: groupInstanceID,
+		ProtocolType:    "consumer",
+		ProtocolName:    rrGroupBalancer.ProtocolName(),
+	}
+
+	for member, assignment := range assignments {
+		sgRequest.Assignments = append(sgRequest.Assignments, SyncGroupRequestAssignment{
+			MemberID: member,
+			Assignment: GroupProtocolAssignment{
+				AssignedPartitions: assignment,
+				UserData:           groupUserDataLookup[member].UserData,
+			},
+		})
+	}
+	sgResp, err := client.SyncGroup(ctx, sgRequest)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if sgResp.Error != nil {
+		t.Fatal(sgResp.Error)
+	}
+
+	expectedAssignment := GroupProtocolAssignment{
+		AssignedPartitions: map[string][]int{
+			topic: {0, 1, 2},
+		},
+		UserData: []byte(userData),
+	}
+
+	if !reflect.DeepEqual(sgResp.Assignment, expectedAssignment) {
+		t.Fatalf("\nexpected assignment to be \n%#v \ngot\n%#v", expectedAssignment, sgResp.Assignment)
+	}
+
+	lgResp, err := client.LeaveGroup(ctx, &LeaveGroupRequest{
+		GroupID: groupID,
+		Members: []LeaveGroupRequestMember{
+			{
+				ID:              resp.MemberID,
+				GroupInstanceID: groupInstanceID,
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if lgResp.Error != nil {
+		t.Fatal(err)
+	}
+
+	if len(lgResp.Members) != 1 {
+		t.Fatalf("expected 1 member in response, got %#v", lgResp.Members)
+	}
+
+	member := lgResp.Members[0]
+
+	if member.Error != nil {
+		t.Fatalf("unexpected member error %v", member.Error)
+	}
+
+	if member.GroupInstanceID != groupInstanceID {
+		t.Fatalf("expected group instance id to be %s got %s", groupInstanceID, member.GroupInstanceID)
+	}
+
+	if member.ID != resp.MemberID {
+		t.Fatalf("expected member id to be %s got %s", resp.MemberID, member.ID)
+	}
+}
+
 func TestLeaveGroupResponseV0(t *testing.T) {
 	item := leaveGroupResponseV0{
 		ErrorCode: 2,
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found leaveGroupResponseV0
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
diff -pruN 0.2.1-1.1/listgroups.go 0.4.49+ds1-1/listgroups.go
--- 0.2.1-1.1/listgroups.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/listgroups.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,8 +2,65 @@ package kafka
 
 import (
 	"bufio"
+	"context"
+	"net"
+
+	"github.com/segmentio/kafka-go/protocol/listgroups"
 )
 
+// ListGroupsRequest is a request to the ListGroups API.
+type ListGroupsRequest struct {
+	// Addr is the address of the kafka broker to send the request to.
+	Addr net.Addr
+}
+
+// ListGroupsResponse is a response from the ListGroups API.
+type ListGroupsResponse struct {
+	// Error is set to a non-nil value if a top-level error occurred while fetching
+	// groups.
+	Error error
+
+	// Groups contains the list of groups.
+	Groups []ListGroupsResponseGroup
+}
+
+// ListGroupsResponseGroup contains the response details for a single group.
+type ListGroupsResponseGroup struct {
+	// GroupID is the ID of the group.
+	GroupID string
+
+	// Coordinator is the ID of the coordinator broker for the group.
+	Coordinator int
+
+	// The group protocol type (eg "consumer", "connect")
+	ProtocolType string
+}
+
+func (c *Client) ListGroups(
+	ctx context.Context,
+	req *ListGroupsRequest,
+) (*ListGroupsResponse, error) {
+	protoResp, err := c.roundTrip(ctx, req.Addr, &listgroups.Request{})
+	if err != nil {
+		return nil, err
+	}
+	apiResp := protoResp.(*listgroups.Response)
+	resp := &ListGroupsResponse{
+		Error: makeError(apiResp.ErrorCode, ""),
+	}
+
+	for _, apiGroupInfo := range apiResp.Groups {
+		resp.Groups = append(resp.Groups, ListGroupsResponseGroup{
+			GroupID:      apiGroupInfo.GroupID,
+			Coordinator:  int(apiGroupInfo.BrokerID),
+			ProtocolType: apiGroupInfo.ProtocolType,
+		})
+	}
+
+	return resp, nil
+}
+
+// TODO: Remove everything below and use protocol-based version above everywhere.
 type listGroupsRequestV1 struct {
 }
 
@@ -11,26 +68,25 @@ func (t listGroupsRequestV1) size() int3
 	return 0
 }
 
-func (t listGroupsRequestV1) writeTo(w *bufio.Writer) {
+func (t listGroupsRequestV1) writeTo(wb *writeBuffer) {
 }
 
-type ListGroupsResponseGroupV1 struct {
+type listGroupsResponseGroupV1 struct {
 	// GroupID holds the unique group identifier
 	GroupID      string
 	ProtocolType string
 }
 
-func (t ListGroupsResponseGroupV1) size() int32 {
-	return sizeofString(t.GroupID) +
-		sizeofString(t.ProtocolType)
+func (t listGroupsResponseGroupV1) size() int32 {
+	return sizeofString(t.GroupID) + sizeofString(t.ProtocolType)
 }
 
-func (t ListGroupsResponseGroupV1) writeTo(w *bufio.Writer) {
-	writeString(w, t.GroupID)
-	writeString(w, t.ProtocolType)
+func (t listGroupsResponseGroupV1) writeTo(wb *writeBuffer) {
+	wb.writeString(t.GroupID)
+	wb.writeString(t.ProtocolType)
 }
 
-func (t *ListGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
+func (t *listGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
 	if remain, err = readString(r, size, &t.GroupID); err != nil {
 		return
 	}
@@ -48,7 +104,7 @@ type listGroupsResponseV1 struct {
 
 	// ErrorCode holds response error code
 	ErrorCode int16
-	Groups    []ListGroupsResponseGroupV1
+	Groups    []listGroupsResponseGroupV1
 }
 
 func (t listGroupsResponseV1) size() int32 {
@@ -57,10 +113,10 @@ func (t listGroupsResponseV1) size() int
 		sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() })
 }
 
-func (t listGroupsResponseV1) writeTo(w *bufio.Writer) {
-	writeInt32(w, t.ThrottleTimeMS)
-	writeInt16(w, t.ErrorCode)
-	writeArray(w, len(t.Groups), func(i int) { t.Groups[i].writeTo(w) })
+func (t listGroupsResponseV1) writeTo(wb *writeBuffer) {
+	wb.writeInt32(t.ThrottleTimeMS)
+	wb.writeInt16(t.ErrorCode)
+	wb.writeArray(len(t.Groups), func(i int) { t.Groups[i].writeTo(wb) })
 }
 
 func (t *listGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
@@ -72,8 +128,8 @@ func (t *listGroupsResponseV1) readFrom(
 	}
 
 	fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) {
-		var item ListGroupsResponseGroupV1
-		if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil {
+		var item listGroupsResponseGroupV1
+		if fnRemain, fnErr = (&item).readFrom(withReader, withSize); fnErr != nil {
 			return
 		}
 		t.Groups = append(t.Groups, item)
diff -pruN 0.2.1-1.1/listgroups_test.go 0.4.49+ds1-1/listgroups_test.go
--- 0.2.1-1.1/listgroups_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/listgroups_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,14 +3,17 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"fmt"
 	"reflect"
 	"testing"
+	"time"
 )
 
 func TestListGroupsResponseV1(t *testing.T) {
 	item := listGroupsResponseV1{
 		ErrorCode: 2,
-		Groups: []ListGroupsResponseGroupV1{
+		Groups: []listGroupsResponseGroupV1{
 			{
 				GroupID:      "a",
 				ProtocolType: "b",
@@ -18,13 +21,12 @@ func TestListGroupsResponseV1(t *testing
 		},
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found listGroupsResponseV1
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
@@ -38,3 +40,77 @@ func TestListGroupsResponseV1(t *testing
 		t.FailNow()
 	}
 }
+
+func TestClientListGroups(t *testing.T) {
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	gid := fmt.Sprintf("%s-test-group", topic)
+
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	w := newTestWriter(WriterConfig{
+		Topic: topic,
+	})
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	err := w.WriteMessages(
+		ctx,
+		Message{
+			Key:   []byte("key"),
+			Value: []byte("value"),
+		},
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	r := NewReader(ReaderConfig{
+		Brokers:  []string{"localhost:9092"},
+		Topic:    topic,
+		GroupID:  gid,
+		MinBytes: 10,
+		MaxBytes: 1000,
+	})
+	_, err = r.ReadMessage(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := client.ListGroups(
+		ctx,
+		&ListGroupsRequest{},
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if resp.Error != nil {
+		t.Error(
+			"Unexpected error in response",
+			"expected", nil,
+			"got", resp.Error,
+		)
+	}
+	hasGroup := false
+	hasProtocol := false
+	for _, group := range resp.Groups {
+		if group.GroupID == gid {
+			hasGroup = true
+			if group.ProtocolType == "consumer" {
+				hasProtocol = true
+			}
+			break
+		}
+	}
+
+	if !hasGroup {
+		t.Error("Group not found in list")
+	}
+	if !hasProtocol {
+		t.Error("Group does not have expected protocol type")
+	}
+}
diff -pruN 0.2.1-1.1/listoffset.go 0.4.49+ds1-1/listoffset.go
--- 0.2.1-1.1/listoffset.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/listoffset.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,187 @@
 package kafka
 
-import "bufio"
+import (
+	"bufio"
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/listoffsets"
+)
+
+// OffsetRequest represents a request to retrieve a single partition offset.
+type OffsetRequest struct {
+	Partition int
+	Timestamp int64
+}
+
+// FirstOffsetOf constructs an OffsetRequest which asks for the first offset of
+// the partition given as argument.
+func FirstOffsetOf(partition int) OffsetRequest {
+	return OffsetRequest{Partition: partition, Timestamp: FirstOffset}
+}
+
+// LastOffsetOf constructs an OffsetRequest which asks for the last offset of
+// the partition given as argument.
+func LastOffsetOf(partition int) OffsetRequest {
+	return OffsetRequest{Partition: partition, Timestamp: LastOffset}
+}
+
+// TimeOffsetOf constructs an OffsetRequest which asks for a partition offset
+// at a given time.
+func TimeOffsetOf(partition int, at time.Time) OffsetRequest {
+	return OffsetRequest{Partition: partition, Timestamp: timestamp(at)}
+}
+
+// PartitionOffsets carries information about offsets available in a topic
+// partition.
+type PartitionOffsets struct {
+	Partition   int
+	FirstOffset int64
+	LastOffset  int64
+	Offsets     map[int64]time.Time
+	Error       error
+}
+
+// ListOffsetsRequest represents a request sent to a kafka broker to list of the
+// offsets of topic partitions.
+type ListOffsetsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// A mapping of topic names to list of partitions that the program wishes to
+	// get the offsets for.
+	Topics map[string][]OffsetRequest
+
+	// The isolation level for the request.
+	//
+	// Defaults to ReadUncommitted.
+	//
+	// This field requires the kafka broker to support the ListOffsets API in
+	// version 2 or above (otherwise the value is ignored).
+	IsolationLevel IsolationLevel
+}
+
+// ListOffsetsResponse represents a response from a kafka broker to a offset
+// listing request.
+type ListOffsetsResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Mappings of topics names to partition offsets, there will be one entry
+	// for each topic in the request.
+	Topics map[string][]PartitionOffsets
+}
+
+// ListOffsets sends an offset request to a kafka broker and returns the
+// response.
+func (c *Client) ListOffsets(ctx context.Context, req *ListOffsetsRequest) (*ListOffsetsResponse, error) {
+	type topicPartition struct {
+		topic     string
+		partition int
+	}
+
+	partitionOffsets := make(map[topicPartition]PartitionOffsets)
+
+	for topicName, requests := range req.Topics {
+		for _, r := range requests {
+			key := topicPartition{
+				topic:     topicName,
+				partition: r.Partition,
+			}
+
+			partition, ok := partitionOffsets[key]
+			if !ok {
+				partition = PartitionOffsets{
+					Partition:   r.Partition,
+					FirstOffset: -1,
+					LastOffset:  -1,
+					Offsets:     make(map[int64]time.Time),
+				}
+			}
+
+			switch r.Timestamp {
+			case FirstOffset:
+				partition.FirstOffset = 0
+			case LastOffset:
+				partition.LastOffset = 0
+			}
+
+			partitionOffsets[topicPartition{
+				topic:     topicName,
+				partition: r.Partition,
+			}] = partition
+		}
+	}
+
+	topics := make([]listoffsets.RequestTopic, 0, len(req.Topics))
+
+	for topicName, requests := range req.Topics {
+		partitions := make([]listoffsets.RequestPartition, len(requests))
+
+		for i, r := range requests {
+			partitions[i] = listoffsets.RequestPartition{
+				Partition:          int32(r.Partition),
+				CurrentLeaderEpoch: -1,
+				Timestamp:          r.Timestamp,
+			}
+		}
+
+		topics = append(topics, listoffsets.RequestTopic{
+			Topic:      topicName,
+			Partitions: partitions,
+		})
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &listoffsets.Request{
+		ReplicaID:      -1,
+		IsolationLevel: int8(req.IsolationLevel),
+		Topics:         topics,
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).ListOffsets: %w", err)
+	}
+
+	res := m.(*listoffsets.Response)
+	ret := &ListOffsetsResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Topics:   make(map[string][]PartitionOffsets, len(res.Topics)),
+	}
+
+	for _, t := range res.Topics {
+		for _, p := range t.Partitions {
+			key := topicPartition{
+				topic:     t.Topic,
+				partition: int(p.Partition),
+			}
+
+			partition := partitionOffsets[key]
+
+			switch p.Timestamp {
+			case FirstOffset:
+				partition.FirstOffset = p.Offset
+			case LastOffset:
+				partition.LastOffset = p.Offset
+			default:
+				partition.Offsets[p.Offset] = makeTime(p.Timestamp)
+			}
+
+			if p.ErrorCode != 0 {
+				partition.Error = Error(p.ErrorCode)
+			}
+
+			partitionOffsets[key] = partition
+		}
+	}
+
+	for key, partition := range partitionOffsets {
+		ret.Topics[key.topic] = append(ret.Topics[key.topic], partition)
+	}
+
+	return ret, nil
+}
 
 type listOffsetRequestV1 struct {
 	ReplicaID int32
@@ -11,9 +192,9 @@ func (r listOffsetRequestV1) size() int3
 	return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
 }
 
-func (r listOffsetRequestV1) writeTo(w *bufio.Writer) {
-	writeInt32(w, r.ReplicaID)
-	writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) })
+func (r listOffsetRequestV1) writeTo(wb *writeBuffer) {
+	wb.writeInt32(r.ReplicaID)
+	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
 }
 
 type listOffsetRequestTopicV1 struct {
@@ -26,9 +207,9 @@ func (t listOffsetRequestTopicV1) size()
 		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
 }
 
-func (t listOffsetRequestTopicV1) writeTo(w *bufio.Writer) {
-	writeString(w, t.TopicName)
-	writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) })
+func (t listOffsetRequestTopicV1) writeTo(wb *writeBuffer) {
+	wb.writeString(t.TopicName)
+	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
 }
 
 type listOffsetRequestPartitionV1 struct {
@@ -40,9 +221,9 @@ func (p listOffsetRequestPartitionV1) si
 	return 4 + 8
 }
 
-func (p listOffsetRequestPartitionV1) writeTo(w *bufio.Writer) {
-	writeInt32(w, p.Partition)
-	writeInt64(w, p.Time)
+func (p listOffsetRequestPartitionV1) writeTo(wb *writeBuffer) {
+	wb.writeInt32(p.Partition)
+	wb.writeInt64(p.Time)
 }
 
 type listOffsetResponseV1 []listOffsetResponseTopicV1
@@ -51,8 +232,8 @@ func (r listOffsetResponseV1) size() int
 	return sizeofArray(len(r), func(i int) int32 { return r[i].size() })
 }
 
-func (r listOffsetResponseV1) writeTo(w *bufio.Writer) {
-	writeArray(w, len(r), func(i int) { r[i].writeTo(w) })
+func (r listOffsetResponseV1) writeTo(wb *writeBuffer) {
+	wb.writeArray(len(r), func(i int) { r[i].writeTo(wb) })
 }
 
 type listOffsetResponseTopicV1 struct {
@@ -65,9 +246,9 @@ func (t listOffsetResponseTopicV1) size(
 		sizeofArray(len(t.PartitionOffsets), func(i int) int32 { return t.PartitionOffsets[i].size() })
 }
 
-func (t listOffsetResponseTopicV1) writeTo(w *bufio.Writer) {
-	writeString(w, t.TopicName)
-	writeArray(w, len(t.PartitionOffsets), func(i int) { t.PartitionOffsets[i].writeTo(w) })
+func (t listOffsetResponseTopicV1) writeTo(wb *writeBuffer) {
+	wb.writeString(t.TopicName)
+	wb.writeArray(len(t.PartitionOffsets), func(i int) { t.PartitionOffsets[i].writeTo(wb) })
 }
 
 type partitionOffsetV1 struct {
@@ -81,11 +262,11 @@ func (p partitionOffsetV1) size() int32
 	return 4 + 2 + 8 + 8
 }
 
-func (p partitionOffsetV1) writeTo(w *bufio.Writer) {
-	writeInt32(w, p.Partition)
-	writeInt16(w, p.ErrorCode)
-	writeInt64(w, p.Timestamp)
-	writeInt64(w, p.Offset)
+func (p partitionOffsetV1) writeTo(wb *writeBuffer) {
+	wb.writeInt32(p.Partition)
+	wb.writeInt16(p.ErrorCode)
+	wb.writeInt64(p.Timestamp)
+	wb.writeInt64(p.Offset)
 }
 
 func (p *partitionOffsetV1) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
diff -pruN 0.2.1-1.1/listoffset_test.go 0.4.49+ds1-1/listoffset_test.go
--- 0.2.1-1.1/listoffset_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/listoffset_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,76 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+	"time"
+)
+
+func TestClientListOffsets(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	now := time.Now()
+
+	_, err := client.Produce(context.Background(), &ProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		Records: NewRecordReader(
+			Record{Time: now, Value: NewBytes([]byte(`hello-1`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-2`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-3`))},
+		),
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	res, err := client.ListOffsets(context.Background(), &ListOffsetsRequest{
+		Topics: map[string][]OffsetRequest{
+			topic: {FirstOffsetOf(0), LastOffsetOf(0)},
+		},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(res.Topics) != 1 {
+		t.Fatal("invalid number of topics found in list offsets response:", len(res.Topics))
+	}
+
+	partitions, ok := res.Topics[topic]
+	if !ok {
+		t.Fatal("missing topic in the list offsets response:", topic)
+	}
+	if len(partitions) != 1 {
+		t.Fatal("invalid number of partitions found in list offsets response:", len(partitions))
+	}
+	partition := partitions[0]
+
+	if partition.Partition != 0 {
+		t.Error("invalid partition id found in list offsets response:", partition.Partition)
+	}
+
+	if partition.FirstOffset != 0 {
+		t.Error("invalid first offset found in list offsets response:", partition.FirstOffset)
+	}
+
+	if partition.LastOffset != 3 {
+		t.Error("invalid last offset found in list offsets response:", partition.LastOffset)
+	}
+
+	if firstOffsetTime := partition.Offsets[partition.FirstOffset]; !firstOffsetTime.IsZero() {
+		t.Error("unexpected first offset time in list offsets response:", partition.Offsets)
+	}
+
+	if lastOffsetTime := partition.Offsets[partition.LastOffset]; !lastOffsetTime.IsZero() {
+		t.Error("unexpected last offset time in list offsets response:", partition.Offsets)
+	}
+
+	if partition.Error != nil {
+		t.Error("unexpected error in list offsets response:", partition.Error)
+	}
+}
diff -pruN 0.2.1-1.1/listpartitionreassignments.go 0.4.49+ds1-1/listpartitionreassignments.go
--- 0.2.1-1.1/listpartitionreassignments.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/listpartitionreassignments.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,135 @@
+package kafka
+
+import (
+	"context"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/listpartitionreassignments"
+)
+
+// ListPartitionReassignmentsRequest is a request to the ListPartitionReassignments API.
+type ListPartitionReassignmentsRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// Topics we want reassignments for, mapped by their name, or nil to list everything.
+	Topics map[string]ListPartitionReassignmentsRequestTopic
+
+	// Timeout is the amount of time to wait for the request to complete.
+	Timeout time.Duration
+}
+
+// ListPartitionReassignmentsRequestTopic contains the requested partitions for a single
+// topic.
+type ListPartitionReassignmentsRequestTopic struct {
+	// The partitions to list partition reassignments for.
+	PartitionIndexes []int
+}
+
+// ListPartitionReassignmentsResponse is a response from the ListPartitionReassignments API.
+type ListPartitionReassignmentsResponse struct {
+	// Error is set to a non-nil value including the code and message if a top-level
+	// error was encountered.
+	Error error
+
+	// Topics contains results for each topic, mapped by their name.
+	Topics map[string]ListPartitionReassignmentsResponseTopic
+}
+
+// ListPartitionReassignmentsResponseTopic contains the detailed result of
+// ongoing reassignments for a topic.
+type ListPartitionReassignmentsResponseTopic struct {
+	// Partitions contains result for topic partitions.
+	Partitions []ListPartitionReassignmentsResponsePartition
+}
+
+// ListPartitionReassignmentsResponsePartition contains the detailed result of
+// ongoing reassignments for a single partition.
+type ListPartitionReassignmentsResponsePartition struct {
+	// PartitionIndex contains index of the partition.
+	PartitionIndex int
+
+	// Replicas contains the current replica set.
+	Replicas []int
+
+	// AddingReplicas contains the set of replicas we are currently adding.
+	AddingReplicas []int
+
+	// RemovingReplicas contains the set of replicas we are currently removing.
+	RemovingReplicas []int
+}
+
+func (c *Client) ListPartitionReassignments(
+	ctx context.Context,
+	req *ListPartitionReassignmentsRequest,
+) (*ListPartitionReassignmentsResponse, error) {
+	apiReq := &listpartitionreassignments.Request{
+		TimeoutMs: int32(req.Timeout.Milliseconds()),
+	}
+
+	for topicName, topicReq := range req.Topics {
+		apiReq.Topics = append(
+			apiReq.Topics,
+			listpartitionreassignments.RequestTopic{
+				Name:             topicName,
+				PartitionIndexes: intToInt32Array(topicReq.PartitionIndexes),
+			},
+		)
+	}
+
+	protoResp, err := c.roundTrip(
+		ctx,
+		req.Addr,
+		apiReq,
+	)
+	if err != nil {
+		return nil, err
+	}
+	apiResp := protoResp.(*listpartitionreassignments.Response)
+
+	resp := &ListPartitionReassignmentsResponse{
+		Error:  makeError(apiResp.ErrorCode, apiResp.ErrorMessage),
+		Topics: make(map[string]ListPartitionReassignmentsResponseTopic),
+	}
+
+	for _, topicResult := range apiResp.Topics {
+		respTopic := ListPartitionReassignmentsResponseTopic{}
+		for _, partitionResult := range topicResult.Partitions {
+			respTopic.Partitions = append(
+				respTopic.Partitions,
+				ListPartitionReassignmentsResponsePartition{
+					PartitionIndex:   int(partitionResult.PartitionIndex),
+					Replicas:         int32ToIntArray(partitionResult.Replicas),
+					AddingReplicas:   int32ToIntArray(partitionResult.AddingReplicas),
+					RemovingReplicas: int32ToIntArray(partitionResult.RemovingReplicas),
+				},
+			)
+		}
+		resp.Topics[topicResult.Name] = respTopic
+	}
+
+	return resp, nil
+}
+
+func intToInt32Array(arr []int) []int32 {
+	if arr == nil {
+		return nil
+	}
+	res := make([]int32, len(arr))
+	for i := range arr {
+		res[i] = int32(arr[i])
+	}
+	return res
+}
+
+func int32ToIntArray(arr []int32) []int {
+	if arr == nil {
+		return nil
+	}
+	res := make([]int, len(arr))
+	for i := range arr {
+		res[i] = int(arr[i])
+	}
+	return res
+}
diff -pruN 0.2.1-1.1/listpartitionreassignments_test.go 0.4.49+ds1-1/listpartitionreassignments_test.go
--- 0.2.1-1.1/listpartitionreassignments_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/listpartitionreassignments_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,50 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientListPartitionReassignments(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.4.0") {
+		return
+	}
+
+	ctx := context.Background()
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	topic := makeTopic()
+	createTopic(t, topic, 2)
+	defer deleteTopic(t, topic)
+
+	// Can't really get an ongoing partition reassignment with local Kafka, so just do a superficial test here.
+	resp, err := client.ListPartitionReassignments(
+		ctx,
+		&ListPartitionReassignmentsRequest{
+			Topics: map[string]ListPartitionReassignmentsRequestTopic{
+				topic: {PartitionIndexes: []int{0, 1}},
+			},
+		},
+	)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	if resp.Error != nil {
+		t.Error(
+			"Unexpected error in response",
+			"expected", nil,
+			"got", resp.Error,
+		)
+	}
+	if len(resp.Topics) != 0 {
+		t.Error(
+			"Unexpected length of topic results",
+			"expected", 0,
+			"got", len(resp.Topics),
+		)
+	}
+}
diff -pruN 0.2.1-1.1/logger.go 0.4.49+ds1-1/logger.go
--- 0.2.1-1.1/logger.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/logger.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,17 @@
+package kafka
+
+// Logger interface API for log.Logger.
+type Logger interface {
+	Printf(string, ...interface{})
+}
+
+// LoggerFunc is a bridge between Logger and any third party logger
+// Usage:
+//   l := NewLogger() // some logger
+//   r := kafka.NewReader(kafka.ReaderConfig{
+//     Logger:      kafka.LoggerFunc(l.Infof),
+//     ErrorLogger: kafka.LoggerFunc(l.Errorf),
+//   })
+type LoggerFunc func(string, ...interface{})
+
+func (f LoggerFunc) Printf(msg string, args ...interface{}) { f(msg, args...) }
diff -pruN 0.2.1-1.1/lz4/lz4.go 0.4.49+ds1-1/lz4/lz4.go
--- 0.2.1-1.1/lz4/lz4.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/lz4/lz4.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,53 +1,16 @@
+// Package lz4 does nothing, it's kept for backward compatibility to avoid
+// breaking the majority of programs that imported it to install the compression
+// codec, which is now always included.
 package lz4
 
-import (
-	"bytes"
-	"io/ioutil"
+import "github.com/segmentio/kafka-go/compress/lz4"
 
-	"github.com/pierrec/lz4"
-	"github.com/segmentio/kafka-go"
+const (
+	Code = 3
 )
 
-func init() {
-	kafka.RegisterCompressionCodec(func() kafka.CompressionCodec {
-		return NewCompressionCodec()
-	})
-}
-
-type CompressionCodec struct{}
-
-const Code = 3
-
-func NewCompressionCodec() CompressionCodec {
-	return CompressionCodec{}
-}
-
-// Code implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Code() int8 {
-	return Code
-}
-
-// Encode implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Encode(src []byte) ([]byte, error) {
-	buf := bytes.Buffer{}
-	buf.Grow(len(src)) // guess a size to avoid repeat allocations.
-	writer := lz4.NewWriter(&buf)
-
-	_, err := writer.Write(src)
-	if err != nil {
-		return nil, err
-	}
-
-	err = writer.Close()
-	if err != nil {
-		return nil, err
-	}
-
-	return buf.Bytes(), err
-}
+type CompressionCodec = lz4.Codec
 
-// Decode implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Decode(src []byte) ([]byte, error) {
-	reader := lz4.NewReader(bytes.NewReader(src))
-	return ioutil.ReadAll(reader)
+func NewCompressionCodec() *CompressionCodec {
+	return &CompressionCodec{}
 }
diff -pruN 0.2.1-1.1/message.go 0.4.49+ds1-1/message.go
--- 0.2.1-1.1/message.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/message.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,47 +1,65 @@
 package kafka
 
 import (
-	"bufio"
-	"bytes"
 	"time"
 )
 
 // Message is a data structure representing kafka messages.
 type Message struct {
-	// Topic is reads only and MUST NOT be set when writing messages
+	// Topic indicates which topic this message was consumed from via Reader.
+	//
+	// When being used with Writer, this can be used to configure the topic if
+	// not already specified on the writer itself.
 	Topic string
 
-	// Partition is reads only and MUST NOT be set when writing messages
-	Partition int
-	Offset    int64
-	Key       []byte
-	Value     []byte
+	// Partition is read-only and MUST NOT be set when writing messages
+	Partition     int
+	Offset        int64
+	HighWaterMark int64
+	Key           []byte
+	Value         []byte
+	Headers       []Header
+
+	// This field is used to hold arbitrary data you wish to include, so it
+	// will be available when handle it on the Writer's `Completion` method,
+	// this support the application can do any post operation on each message.
+	WriterData interface{}
 
 	// If not set at the creation, Time will be automatically set when
 	// writing the message.
 	Time time.Time
 }
 
-func (msg Message) item() messageSetItem {
-	item := messageSetItem{
-		Offset:  msg.Offset,
-		Message: msg.message(),
-	}
-	item.MessageSize = item.Message.size()
-	return item
-}
-
-func (msg Message) message() message {
+func (msg Message) message(cw *crc32Writer) message {
 	m := message{
 		MagicByte: 1,
 		Key:       msg.Key,
 		Value:     msg.Value,
 		Timestamp: timestamp(msg.Time),
 	}
-	m.CRC = m.crc32()
+	if cw != nil {
+		m.CRC = m.crc32(cw)
+	}
 	return m
 }
 
+const timestampSize = 8
+
+func (msg *Message) size() int32 {
+	return 4 + 1 + 1 + sizeofBytes(msg.Key) + sizeofBytes(msg.Value) + timestampSize
+}
+
+func (msg *Message) headerSize() int {
+	return varArrayLen(len(msg.Headers), func(i int) int {
+		h := &msg.Headers[i]
+		return varStringLen(h.Key) + varBytesLen(h.Value)
+	})
+}
+
+func (msg *Message) totalSize() int32 {
+	return int32(msg.headerSize()) + msg.size()
+}
+
 type message struct {
 	CRC        int32
 	MagicByte  int8
@@ -51,27 +69,35 @@ type message struct {
 	Value      []byte
 }
 
-func (m message) crc32() int32 {
-	return int32(crc32OfMessage(m.MagicByte, m.Attributes, m.Timestamp, m.Key, m.Value))
+func (m message) crc32(cw *crc32Writer) int32 {
+	cw.crc32 = 0
+	cw.writeInt8(m.MagicByte)
+	cw.writeInt8(m.Attributes)
+	if m.MagicByte != 0 {
+		cw.writeInt64(m.Timestamp)
+	}
+	cw.writeBytes(m.Key)
+	cw.writeBytes(m.Value)
+	return int32(cw.crc32)
 }
 
 func (m message) size() int32 {
 	size := 4 + 1 + 1 + sizeofBytes(m.Key) + sizeofBytes(m.Value)
 	if m.MagicByte != 0 {
-		size += 8 // Timestamp
+		size += timestampSize
 	}
 	return size
 }
 
-func (m message) writeTo(w *bufio.Writer) {
-	writeInt32(w, m.CRC)
-	writeInt8(w, m.MagicByte)
-	writeInt8(w, m.Attributes)
+func (m message) writeTo(wb *writeBuffer) {
+	wb.writeInt32(m.CRC)
+	wb.writeInt8(m.MagicByte)
+	wb.writeInt8(m.Attributes)
 	if m.MagicByte != 0 {
-		writeInt64(w, m.Timestamp)
+		wb.writeInt64(m.Timestamp)
 	}
-	writeBytes(w, m.Key)
-	writeBytes(w, m.Value)
+	wb.writeBytes(m.Key)
+	wb.writeBytes(m.Value)
 }
 
 type messageSetItem struct {
@@ -84,10 +110,10 @@ func (m messageSetItem) size() int32 {
 	return 8 + 4 + m.Message.size()
 }
 
-func (m messageSetItem) writeTo(w *bufio.Writer) {
-	writeInt64(w, m.Offset)
-	writeInt32(w, m.MessageSize)
-	m.Message.writeTo(w)
+func (m messageSetItem) writeTo(wb *writeBuffer) {
+	wb.writeInt64(m.Offset)
+	wb.writeInt32(m.MessageSize)
+	m.Message.writeTo(wb)
 }
 
 type messageSet []messageSetItem
@@ -99,155 +125,8 @@ func (s messageSet) size() (size int32)
 	return
 }
 
-func (s messageSet) writeTo(w *bufio.Writer) {
+func (s messageSet) writeTo(wb *writeBuffer) {
 	for _, m := range s {
-		m.writeTo(w)
-	}
-}
-
-type messageSetReader struct {
-	*readerStack
-}
-
-type readerStack struct {
-	reader *bufio.Reader
-	remain int
-	base   int64
-	parent *readerStack
-}
-
-func newMessageSetReader(reader *bufio.Reader, remain int) *messageSetReader {
-	return &messageSetReader{&readerStack{
-		reader: reader,
-		remain: remain,
-	}}
-}
-
-func (r *messageSetReader) readMessage(min int64,
-	key func(*bufio.Reader, int, int) (int, error),
-	val func(*bufio.Reader, int, int) (int, error),
-) (offset int64, timestamp int64, err error) {
-	for r.readerStack != nil {
-		if r.remain == 0 {
-			r.readerStack = r.parent
-			continue
-		}
-
-		var attributes int8
-		if offset, attributes, timestamp, r.remain, err = readMessageHeader(r.reader, r.remain); err != nil {
-			return
-		}
-
-		// if the message is compressed, decompress it and push a new reader
-		// onto the stack.
-		code := attributes & compressionCodecMask
-		if code != 0 {
-			var codec CompressionCodec
-			if codec, err = resolveCodec(attributes); err != nil {
-				return
-			}
-
-			// discard next four bytes...will be -1 to indicate null key
-			if r.remain, err = discardN(r.reader, r.remain, 4); err != nil {
-				return
-			}
-
-			// read and decompress the contained message set.
-			var decompressed []byte
-			if r.remain, err = readBytesWith(r.reader, r.remain, func(r *bufio.Reader, sz, n int) (remain int, err error) {
-				var value []byte
-				if value, remain, err = readNewBytes(r, sz, n); err != nil {
-					return
-				}
-				decompressed, err = codec.Decode(value)
-				return
-			}); err != nil {
-				return
-			}
-
-			// the compressed message's offset will be equal to the offset of
-			// the last message in the set.  within the compressed set, the
-			// offsets will be relative, so we have to scan through them to
-			// get the base offset.  for example, if there are four compressed
-			// messages at offsets 10-13, then the container message will have
-			// offset 13 and the contained messages will be 0,1,2,3.  the base
-			// offset for the container, then is 13-3=10.
-			if offset, err = extractOffset(offset, decompressed); err != nil {
-				return
-			}
-
-			r.readerStack = &readerStack{
-				reader: bufio.NewReader(bytes.NewReader(decompressed)),
-				remain: len(decompressed),
-				base:   offset,
-				parent: r.readerStack,
-			}
-			continue
-		}
-
-		// adjust the offset in case we're reading compressed messages.  the
-		// base will be zero otherwise.
-		offset += r.base
-
-		// When the messages are compressed kafka may return messages at an
-		// earlier offset than the one that was requested, it's the client's
-		// responsibility to ignore those.
-		if offset < min {
-			if r.remain, err = discardBytes(r.reader, r.remain); err != nil {
-				return
-			}
-			if r.remain, err = discardBytes(r.reader, r.remain); err != nil {
-				return
-			}
-			continue
-		}
-
-		if r.remain, err = readBytesWith(r.reader, r.remain, key); err != nil {
-			return
-		}
-		r.remain, err = readBytesWith(r.reader, r.remain, val)
-		return
-	}
-
-	err = errShortRead
-	return
-}
-
-func (r *messageSetReader) remaining() (remain int) {
-	for s := r.readerStack; s != nil; s = s.parent {
-		remain += s.remain
-	}
-	return
-}
-
-func (r *messageSetReader) discard() (err error) {
-	if r.readerStack == nil {
-		return
-	}
-	// rewind up to the top-most reader b/c it's the only one that's doing
-	// actual i/o.  the rest are byte buffers that have been pushed on the stack
-	// while reading compressed message sets.
-	for r.parent != nil {
-		r.readerStack = r.parent
-	}
-	r.remain, err = discardN(r.reader, r.remain, r.remain)
-	return
-}
-
-func extractOffset(base int64, msgSet []byte) (offset int64, err error) {
-	r, remain := bufio.NewReader(bytes.NewReader(msgSet)), len(msgSet)
-	for remain > 0 {
-		if remain, err = readInt64(r, remain, &offset); err != nil {
-			return
-		}
-		var sz int32
-		if remain, err = readInt32(r, remain, &sz); err != nil {
-			return
-		}
-		if remain, err = discardN(r, remain, int(sz)); err != nil {
-			return
-		}
+		m.writeTo(wb)
 	}
-	offset = base - offset
-	return
 }
diff -pruN 0.2.1-1.1/message_reader.go 0.4.49+ds1-1/message_reader.go
--- 0.2.1-1.1/message_reader.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/message_reader.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,555 @@
+package kafka
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"log"
+)
+
+type readBytesFunc func(*bufio.Reader, int, int) (int, error)
+
+// messageSetReader processes the messages encoded into a fetch response.
+// The response may contain a mix of Record Batches (newer format) and Messages
+// (older format).
+type messageSetReader struct {
+	*readerStack      // used for decompressing compressed messages and record batches
+	empty        bool // if true, short circuits messageSetReader methods
+	debug        bool // enable debug log messages
+	// How many bytes are expected to remain in the response.
+	//
+	// This is used to detect truncation of the response.
+	lengthRemain int
+
+	decompressed *bytes.Buffer
+}
+
+type readerStack struct {
+	reader *bufio.Reader
+	remain int
+	base   int64
+	parent *readerStack
+	count  int            // how many messages left in the current message set
+	header messagesHeader // the current header for a subset of messages within the set.
+}
+
+// messagesHeader describes a set of records. there may be many messagesHeader's in a message set.
+type messagesHeader struct {
+	firstOffset int64
+	length      int32
+	crc         int32
+	magic       int8
+	// v1 composes attributes specific to v0 and v1 message headers
+	v1 struct {
+		attributes int8
+		timestamp  int64
+	}
+	// v2 composes attributes specific to v2 message headers
+	v2 struct {
+		leaderEpoch     int32
+		attributes      int16
+		lastOffsetDelta int32
+		firstTimestamp  int64
+		lastTimestamp   int64
+		producerID      int64
+		producerEpoch   int16
+		baseSequence    int32
+		count           int32
+	}
+}
+
+func (h messagesHeader) compression() (codec CompressionCodec, err error) {
+	const compressionCodecMask = 0x07
+	var code int8
+	switch h.magic {
+	case 0, 1:
+		code = h.v1.attributes & compressionCodecMask
+	case 2:
+		code = int8(h.v2.attributes & compressionCodecMask)
+	default:
+		err = h.badMagic()
+		return
+	}
+	if code != 0 {
+		codec, err = resolveCodec(code)
+	}
+	return
+}
+
+func (h messagesHeader) badMagic() error {
+	return fmt.Errorf("unsupported magic byte %d in header", h.magic)
+}
+
+func newMessageSetReader(reader *bufio.Reader, remain int) (*messageSetReader, error) {
+	res := &messageSetReader{
+		readerStack: &readerStack{
+			reader: reader,
+			remain: remain,
+		},
+		decompressed: acquireBuffer(),
+	}
+	err := res.readHeader()
+	return res, err
+}
+
+func (r *messageSetReader) remaining() (remain int) {
+	if r.empty {
+		return 0
+	}
+	for s := r.readerStack; s != nil; s = s.parent {
+		remain += s.remain
+	}
+	return
+}
+
+func (r *messageSetReader) discard() (err error) {
+	switch {
+	case r.empty:
+	case r.readerStack == nil:
+	default:
+		// rewind up to the top-most reader b/c it's the only one that's doing
+		// actual i/o.  the rest are byte buffers that have been pushed on the stack
+		// while reading compressed message sets.
+		for r.parent != nil {
+			r.readerStack = r.parent
+		}
+		err = r.discardN(r.remain)
+	}
+	return
+}
+
+func (r *messageSetReader) readMessage(min int64, key readBytesFunc, val readBytesFunc) (
+	offset int64, lastOffset int64, timestamp int64, headers []Header, err error) {
+
+	if r.empty {
+		err = RequestTimedOut
+		return
+	}
+	if err = r.readHeader(); err != nil {
+		return
+	}
+	switch r.header.magic {
+	case 0, 1:
+		offset, timestamp, headers, err = r.readMessageV1(min, key, val)
+		// Set an invalid value so that it can be ignored
+		lastOffset = -1
+	case 2:
+		offset, lastOffset, timestamp, headers, err = r.readMessageV2(min, key, val)
+	default:
+		err = r.header.badMagic()
+	}
+	return
+}
+
+func (r *messageSetReader) readMessageV1(min int64, key readBytesFunc, val readBytesFunc) (
+	offset int64, timestamp int64, headers []Header, err error) {
+
+	for r.readerStack != nil {
+		if r.remain == 0 {
+			r.readerStack = r.parent
+			continue
+		}
+		if err = r.readHeader(); err != nil {
+			return
+		}
+		offset = r.header.firstOffset
+		timestamp = r.header.v1.timestamp
+		var codec CompressionCodec
+		if codec, err = r.header.compression(); err != nil {
+			return
+		}
+		if r.debug {
+			r.log("Reading with codec=%T", codec)
+		}
+		if codec != nil {
+			// discard next four bytes...will be -1 to indicate null key
+			if err = r.discardN(4); err != nil {
+				return
+			}
+
+			// read and decompress the contained message set.
+			r.decompressed.Reset()
+			if err = r.readBytesWith(func(br *bufio.Reader, sz int, n int) (remain int, err error) {
+				// x4 as a guess that the average compression ratio is near 75%
+				r.decompressed.Grow(4 * n)
+				limitReader := io.LimitedReader{R: br, N: int64(n)}
+				codecReader := codec.NewReader(&limitReader)
+				_, err = r.decompressed.ReadFrom(codecReader)
+				remain = sz - (n - int(limitReader.N))
+				codecReader.Close()
+				return
+			}); err != nil {
+				return
+			}
+
+			// the compressed message's offset will be equal to the offset of
+			// the last message in the set.  within the compressed set, the
+			// offsets will be relative, so we have to scan through them to
+			// get the base offset.  for example, if there are four compressed
+			// messages at offsets 10-13, then the container message will have
+			// offset 13 and the contained messages will be 0,1,2,3.  the base
+			// offset for the container, then is 13-3=10.
+			if offset, err = extractOffset(offset, r.decompressed.Bytes()); err != nil {
+				return
+			}
+
+			// mark the outer message as being read
+			r.markRead()
+
+			// then push the decompressed bytes onto the stack.
+			r.readerStack = &readerStack{
+				// Allocate a buffer of size 0, which gets capped at 16 bytes
+				// by the bufio package. We are already reading buffered data
+				// here, no need to reserve another 4KB buffer.
+				reader: bufio.NewReaderSize(r.decompressed, 0),
+				remain: r.decompressed.Len(),
+				base:   offset,
+				parent: r.readerStack,
+			}
+			continue
+		}
+
+		// adjust the offset in case we're reading compressed messages.  the
+		// base will be zero otherwise.
+		offset += r.base
+
+		// When the messages are compressed kafka may return messages at an
+		// earlier offset than the one that was requested, it's the client's
+		// responsibility to ignore those.
+		//
+		// At this point, the message header has been read, so discarding
+		// the rest of the message means we have to discard the key, and then
+		// the value. Each of those are preceded by a 4-byte length. Discarding
+		// them is then reading that length variable and then discarding that
+		// amount.
+		if offset < min {
+			// discard the key
+			if err = r.discardBytes(); err != nil {
+				return
+			}
+			// discard the value
+			if err = r.discardBytes(); err != nil {
+				return
+			}
+			// since we have fully consumed the message, mark as read
+			r.markRead()
+			continue
+		}
+		if err = r.readBytesWith(key); err != nil {
+			return
+		}
+		if err = r.readBytesWith(val); err != nil {
+			return
+		}
+		r.markRead()
+		return
+	}
+	err = errShortRead
+	return
+}
+
+func (r *messageSetReader) readMessageV2(_ int64, key readBytesFunc, val readBytesFunc) (
+	offset int64, lastOffset int64, timestamp int64, headers []Header, err error) {
+	if err = r.readHeader(); err != nil {
+		return
+	}
+	if r.count == int(r.header.v2.count) { // first time reading this set, so check for compression headers.
+		var codec CompressionCodec
+		if codec, err = r.header.compression(); err != nil {
+			return
+		}
+		if codec != nil {
+			batchRemain := int(r.header.length - 49) // TODO: document this magic number
+			if batchRemain > r.remain {
+				err = errShortRead
+				return
+			}
+			if batchRemain < 0 {
+				err = fmt.Errorf("batch remain < 0 (%d)", batchRemain)
+				return
+			}
+			r.decompressed.Reset()
+			// x4 as a guess that the average compression ratio is near 75%
+			r.decompressed.Grow(4 * batchRemain)
+			limitReader := io.LimitedReader{R: r.reader, N: int64(batchRemain)}
+			codecReader := codec.NewReader(&limitReader)
+			_, err = r.decompressed.ReadFrom(codecReader)
+			codecReader.Close()
+			if err != nil {
+				return
+			}
+			r.remain -= batchRemain - int(limitReader.N)
+			r.readerStack = &readerStack{
+				reader: bufio.NewReaderSize(r.decompressed, 0), // the new stack reads from the decompressed buffer
+				remain: r.decompressed.Len(),
+				base:   -1, // base is unused here
+				parent: r.readerStack,
+				header: r.header,
+				count:  r.count,
+			}
+			// all of the messages in this set are in the decompressed set just pushed onto the reader
+			// stack. here we set the parent count to 0 so that when the child set is exhausted, the
+			// reader will then try to read the header of the next message set
+			r.readerStack.parent.count = 0
+		}
+	}
+	remainBefore := r.remain
+	var length int64
+	if err = r.readVarInt(&length); err != nil {
+		return
+	}
+	lengthOfLength := remainBefore - r.remain
+	var attrs int8
+	if err = r.readInt8(&attrs); err != nil {
+		return
+	}
+	var timestampDelta int64
+	if err = r.readVarInt(&timestampDelta); err != nil {
+		return
+	}
+	timestamp = r.header.v2.firstTimestamp + timestampDelta
+	var offsetDelta int64
+	if err = r.readVarInt(&offsetDelta); err != nil {
+		return
+	}
+	offset = r.header.firstOffset + offsetDelta
+	if err = r.runFunc(key); err != nil {
+		return
+	}
+	if err = r.runFunc(val); err != nil {
+		return
+	}
+	var headerCount int64
+	if err = r.readVarInt(&headerCount); err != nil {
+		return
+	}
+	if headerCount > 0 {
+		headers = make([]Header, headerCount)
+		for i := range headers {
+			if err = r.readMessageHeader(&headers[i]); err != nil {
+				return
+			}
+		}
+	}
+	lastOffset = r.header.firstOffset + int64(r.header.v2.lastOffsetDelta)
+	r.lengthRemain -= int(length) + lengthOfLength
+	r.markRead()
+	return
+}
+
+func (r *messageSetReader) discardBytes() (err error) {
+	r.remain, err = discardBytes(r.reader, r.remain)
+	return
+}
+
+func (r *messageSetReader) discardN(sz int) (err error) {
+	r.remain, err = discardN(r.reader, r.remain, sz)
+	return
+}
+
+func (r *messageSetReader) markRead() {
+	if r.count == 0 {
+		panic("markRead: negative count")
+	}
+	r.count--
+	r.unwindStack()
+	if r.debug {
+		r.log("Mark read remain=%d", r.remain)
+	}
+}
+
+func (r *messageSetReader) unwindStack() {
+	for r.count == 0 {
+		if r.remain == 0 {
+			if r.parent != nil {
+				if r.debug {
+					r.log("Popped reader stack")
+				}
+				r.readerStack = r.parent
+				continue
+			}
+		}
+		break
+	}
+}
+
+func (r *messageSetReader) readMessageHeader(header *Header) (err error) {
+	var keyLen int64
+	if err = r.readVarInt(&keyLen); err != nil {
+		return
+	}
+	if header.Key, err = r.readNewString(int(keyLen)); err != nil {
+		return
+	}
+	var valLen int64
+	if err = r.readVarInt(&valLen); err != nil {
+		return
+	}
+	if header.Value, err = r.readNewBytes(int(valLen)); err != nil {
+		return
+	}
+	return nil
+}
+
+func (r *messageSetReader) runFunc(rbFunc readBytesFunc) (err error) {
+	var length int64
+	if err = r.readVarInt(&length); err != nil {
+		return
+	}
+	if r.remain, err = rbFunc(r.reader, r.remain, int(length)); err != nil {
+		return
+	}
+	return
+}
+
+func (r *messageSetReader) readHeader() (err error) {
+	if r.count > 0 {
+		// currently reading a set of messages, no need to read a header until they are exhausted.
+		return
+	}
+	r.header = messagesHeader{}
+	if err = r.readInt64(&r.header.firstOffset); err != nil {
+		return
+	}
+	if err = r.readInt32(&r.header.length); err != nil {
+		return
+	}
+	var crcOrLeaderEpoch int32
+	if err = r.readInt32(&crcOrLeaderEpoch); err != nil {
+		return
+	}
+	if err = r.readInt8(&r.header.magic); err != nil {
+		return
+	}
+	switch r.header.magic {
+	case 0:
+		r.header.crc = crcOrLeaderEpoch
+		if err = r.readInt8(&r.header.v1.attributes); err != nil {
+			return
+		}
+		r.count = 1
+		// Set arbitrary non-zero length so that we always assume the
+		// message is truncated since bytes remain.
+		r.lengthRemain = 1
+		if r.debug {
+			r.log("Read v0 header with offset=%d len=%d magic=%d attributes=%d", r.header.firstOffset, r.header.length, r.header.magic, r.header.v1.attributes)
+		}
+	case 1:
+		r.header.crc = crcOrLeaderEpoch
+		if err = r.readInt8(&r.header.v1.attributes); err != nil {
+			return
+		}
+		if err = r.readInt64(&r.header.v1.timestamp); err != nil {
+			return
+		}
+		r.count = 1
+		// Set arbitrary non-zero length so that we always assume the
+		// message is truncated since bytes remain.
+		r.lengthRemain = 1
+		if r.debug {
+			r.log("Read v1 header with remain=%d offset=%d magic=%d and attributes=%d", r.remain, r.header.firstOffset, r.header.magic, r.header.v1.attributes)
+		}
+	case 2:
+		r.header.v2.leaderEpoch = crcOrLeaderEpoch
+		if err = r.readInt32(&r.header.crc); err != nil {
+			return
+		}
+		if err = r.readInt16(&r.header.v2.attributes); err != nil {
+			return
+		}
+		if err = r.readInt32(&r.header.v2.lastOffsetDelta); err != nil {
+			return
+		}
+		if err = r.readInt64(&r.header.v2.firstTimestamp); err != nil {
+			return
+		}
+		if err = r.readInt64(&r.header.v2.lastTimestamp); err != nil {
+			return
+		}
+		if err = r.readInt64(&r.header.v2.producerID); err != nil {
+			return
+		}
+		if err = r.readInt16(&r.header.v2.producerEpoch); err != nil {
+			return
+		}
+		if err = r.readInt32(&r.header.v2.baseSequence); err != nil {
+			return
+		}
+		if err = r.readInt32(&r.header.v2.count); err != nil {
+			return
+		}
+		r.count = int(r.header.v2.count)
+		// Subtracts the header bytes from the length
+		r.lengthRemain = int(r.header.length) - 49
+		if r.debug {
+			r.log("Read v2 header with count=%d offset=%d len=%d magic=%d attributes=%d", r.count, r.header.firstOffset, r.header.length, r.header.magic, r.header.v2.attributes)
+		}
+	default:
+		err = r.header.badMagic()
+		return
+	}
+	return
+}
+
+func (r *messageSetReader) readNewBytes(len int) (res []byte, err error) {
+	res, r.remain, err = readNewBytes(r.reader, r.remain, len)
+	return
+}
+
+func (r *messageSetReader) readNewString(len int) (res string, err error) {
+	res, r.remain, err = readNewString(r.reader, r.remain, len)
+	return
+}
+
+func (r *messageSetReader) readInt8(val *int8) (err error) {
+	r.remain, err = readInt8(r.reader, r.remain, val)
+	return
+}
+
+func (r *messageSetReader) readInt16(val *int16) (err error) {
+	r.remain, err = readInt16(r.reader, r.remain, val)
+	return
+}
+
+func (r *messageSetReader) readInt32(val *int32) (err error) {
+	r.remain, err = readInt32(r.reader, r.remain, val)
+	return
+}
+
+func (r *messageSetReader) readInt64(val *int64) (err error) {
+	r.remain, err = readInt64(r.reader, r.remain, val)
+	return
+}
+
+func (r *messageSetReader) readVarInt(val *int64) (err error) {
+	r.remain, err = readVarInt(r.reader, r.remain, val)
+	return
+}
+
+func (r *messageSetReader) readBytesWith(fn readBytesFunc) (err error) {
+	r.remain, err = readBytesWith(r.reader, r.remain, fn)
+	return
+}
+
+func (r *messageSetReader) log(msg string, args ...interface{}) {
+	log.Printf("[DEBUG] "+msg, args...)
+}
+
+func extractOffset(base int64, msgSet []byte) (offset int64, err error) {
+	r, remain := bufio.NewReader(bytes.NewReader(msgSet)), len(msgSet)
+	for remain > 0 {
+		if remain, err = readInt64(r, remain, &offset); err != nil {
+			return
+		}
+		var sz int32
+		if remain, err = readInt32(r, remain, &sz); err != nil {
+			return
+		}
+		if remain, err = discardN(r, remain, int(sz)); err != nil {
+			return
+		}
+	}
+	offset = base - offset
+	return
+}
diff -pruN 0.2.1-1.1/message_test.go 0.4.49+ds1-1/message_test.go
--- 0.2.1-1.1/message_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/message_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,756 @@
+package kafka
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/compress/gzip"
+	"github.com/segmentio/kafka-go/compress/lz4"
+	"github.com/segmentio/kafka-go/compress/snappy"
+	"github.com/segmentio/kafka-go/compress/zstd"
+	"github.com/stretchr/testify/require"
+)
+
+// This regression test covers reading messages using offsets that
+// are at the beginning and in the middle of compressed and uncompressed
+// v1 message sets.
+func TestV1BatchOffsets(t *testing.T) {
+	const highWatermark = 5000
+	const topic = "test-topic"
+	var (
+		msg0 = Message{
+			Offset: 0,
+			Key:    []byte("msg-0"),
+			Value:  []byte("key-0"),
+		}
+		msg1 = Message{
+			Offset: 1,
+			Key:    []byte("msg-1"),
+			Value:  []byte("key-1"),
+		}
+		msg2 = Message{
+			Offset: 2,
+			Key:    []byte("msg-2"),
+			Value:  []byte("key-2"),
+		}
+	)
+
+	for _, tc := range []struct {
+		name     string
+		builder  fetchResponseBuilder
+		offset   int64
+		expected []Message
+		debug    bool
+	}{
+		{
+			name:   "num=1 off=0",
+			offset: 0,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						msgs: []Message{msg0},
+					},
+				},
+			},
+			expected: []Message{msg0},
+		},
+		{
+			name:   "num=1 off=0 compressed",
+			offset: 0,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msg0},
+					},
+				},
+			},
+			expected: []Message{msg0},
+		},
+		{
+			name:   "num=1 off=1",
+			offset: 1,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						msgs: []Message{msg1},
+					},
+				},
+			},
+			expected: []Message{msg1},
+		},
+		{
+			name:   "num=1 off=1 compressed",
+			offset: 1,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msg1},
+					},
+				},
+			},
+			expected: []Message{msg1},
+		},
+		{
+			name:   "num=3 off=0",
+			offset: 0,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						msgs: []Message{msg0, msg1, msg2},
+					},
+				},
+			},
+			expected: []Message{msg0, msg1, msg2},
+		},
+		{
+			name:   "num=3 off=0 compressed",
+			offset: 0,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msg0, msg1, msg2},
+					},
+				},
+			},
+			expected: []Message{msg0, msg1, msg2},
+		},
+		{
+			name:   "num=3 off=1",
+			offset: 1,
+			debug:  true,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						msgs: []Message{msg0, msg1, msg2},
+					},
+				},
+			},
+			expected: []Message{msg1, msg2},
+		},
+		{
+			name:   "num=3 off=1 compressed",
+			offset: 1,
+			debug:  true,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msg0, msg1, msg2},
+					},
+				},
+			},
+			expected: []Message{msg1, msg2},
+		},
+		{
+			name:   "num=3 off=2 compressed",
+			offset: 2,
+			debug:  true,
+			builder: fetchResponseBuilder{
+				header: fetchResponseHeader{
+					highWatermarkOffset: highWatermark,
+					lastStableOffset:    highWatermark,
+					topic:               topic,
+				},
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msg0, msg1, msg2},
+					},
+				},
+			},
+			expected: []Message{msg2},
+		},
+	} {
+		t.Run(tc.name, func(t *testing.T) {
+			bs := tc.builder.bytes()
+			r, err := newReaderHelper(t, bs)
+			require.NoError(t, err)
+			r.offset = tc.offset
+			r.debug = tc.debug
+			filter := func(msg Message) (res Message) {
+				res.Offset = msg.Offset
+				res.Key = msg.Key
+				res.Value = msg.Value
+				return res
+			}
+			for _, expected := range tc.expected {
+				msg := filter(r.readMessage())
+				require.EqualValues(t, expected, msg)
+			}
+			// finally, verify no more bytes remain
+			require.EqualValues(t, 0, r.remain)
+			_, err = r.readMessageErr()
+			require.EqualError(t, err, errShortRead.Error())
+		})
+	}
+}
+
+func TestMessageSetReader(t *testing.T) {
+	const startOffset = 1000
+	const highWatermark = 5000
+	const topic = "test-topic"
+	msgs := make([]Message, 100)
+	for i := 0; i < 100; i++ {
+		msgs[i] = Message{
+			Time:   time.Now(),
+			Offset: int64(i + startOffset),
+			Key:    []byte(fmt.Sprintf("key-%d", i)),
+			Value:  []byte(fmt.Sprintf("val-%d", i)),
+			Headers: []Header{
+				{
+					Key:   fmt.Sprintf("header-key-%d", i),
+					Value: []byte(fmt.Sprintf("header-value-%d", i)),
+				},
+			},
+		}
+	}
+	defaultHeader := fetchResponseHeader{
+		highWatermarkOffset: highWatermark,
+		lastStableOffset:    highWatermark,
+		topic:               topic,
+	}
+	for _, tc := range []struct {
+		name    string
+		builder fetchResponseBuilder
+		err     error
+		debug   bool
+	}{
+		{
+			name: "empty",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+			},
+			err: errShortRead,
+		},
+		{
+			name: "v0",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v0MessageSetBuilder{
+						msgs: []Message{msgs[0]},
+					},
+				},
+			},
+		},
+		{
+			name: "v0 compressed",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v0MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msgs[0]},
+					},
+				},
+			},
+		},
+		{
+			name: "v1",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						msgs: []Message{msgs[0]},
+					},
+				},
+			},
+		},
+		{
+			name: "v1 compressed",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v1MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msgs[0]},
+					},
+				},
+			},
+		},
+		{
+			name: "v2",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[0]},
+					},
+				},
+			},
+		},
+		{
+			name: "v2 compressed",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v2MessageSetBuilder{
+						codec: new(zstd.Codec),
+						msgs:  []Message{msgs[0]},
+					},
+				},
+			},
+		},
+		{
+			name: "v2 multiple messages",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[0], msgs[1], msgs[2], msgs[3], msgs[4]},
+					},
+				},
+			},
+		},
+		{
+			name: "v2 multiple messages compressed",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v2MessageSetBuilder{
+						codec: new(snappy.Codec),
+						msgs:  []Message{msgs[0], msgs[1], msgs[2], msgs[3], msgs[4]},
+					},
+				},
+			},
+		},
+		{
+			name: "v2 mix of compressed and uncompressed message sets",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v2MessageSetBuilder{
+						codec: new(snappy.Codec),
+						msgs:  []Message{msgs[0], msgs[1], msgs[2], msgs[3], msgs[4]},
+					},
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[5], msgs[6], msgs[7], msgs[8], msgs[9]},
+					},
+					v2MessageSetBuilder{
+						codec: new(snappy.Codec),
+						msgs:  []Message{msgs[10], msgs[11], msgs[12], msgs[13], msgs[14]},
+					},
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[15], msgs[16], msgs[17], msgs[18], msgs[19]},
+					},
+				},
+			},
+		},
+		{
+			name: "v0 v2 v1 v2 v1 v1 v0 v2",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v0MessageSetBuilder{
+						msgs: []Message{msgs[0]},
+					},
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[1], msgs[2]},
+					},
+					v1MessageSetBuilder{
+						msgs: []Message{msgs[3]},
+					},
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[4], msgs[5]},
+					},
+					v1MessageSetBuilder{
+						msgs: []Message{msgs[6]},
+					},
+					v1MessageSetBuilder{
+						msgs: []Message{msgs[7]},
+					},
+					v0MessageSetBuilder{
+						msgs: []Message{msgs[8]},
+					},
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[9], msgs[10]},
+					},
+				},
+			},
+		},
+		{
+			name: "v0 v2 v1 v2 v1 v1 v0 v2 mixed compression",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v0MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msgs[0]},
+					},
+					v2MessageSetBuilder{
+						codec: new(zstd.Codec),
+						msgs:  []Message{msgs[1], msgs[2]},
+					},
+					v1MessageSetBuilder{
+						codec: new(snappy.Codec),
+						msgs:  []Message{msgs[3]},
+					},
+					v2MessageSetBuilder{
+						codec: new(lz4.Codec),
+						msgs:  []Message{msgs[4], msgs[5]},
+					},
+					v1MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msgs[6]},
+					},
+					v1MessageSetBuilder{
+						codec: new(zstd.Codec),
+						msgs:  []Message{msgs[7]},
+					},
+					v0MessageSetBuilder{
+						codec: new(snappy.Codec),
+						msgs:  []Message{msgs[8]},
+					},
+					v2MessageSetBuilder{
+						codec: new(lz4.Codec),
+						msgs:  []Message{msgs[9], msgs[10]},
+					},
+				},
+			},
+		},
+		{
+			name: "v0 v2 v1 v2 v1 v1 v0 v2 mixed compression with non-compressed",
+			builder: fetchResponseBuilder{
+				header: defaultHeader,
+				msgSets: []messageSetBuilder{
+					v0MessageSetBuilder{
+						codec: new(gzip.Codec),
+						msgs:  []Message{msgs[0]},
+					},
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[1], msgs[2]},
+					},
+					v1MessageSetBuilder{
+						codec: new(snappy.Codec),
+						msgs:  []Message{msgs[3]},
+					},
+					v2MessageSetBuilder{
+						msgs: []Message{msgs[4], msgs[5]},
+					},
+					v1MessageSetBuilder{
+						msgs: []Message{msgs[6]},
+					},
+					v1MessageSetBuilder{
+						codec: new(zstd.Codec),
+						msgs:  []Message{msgs[7]},
+					},
+					v0MessageSetBuilder{
+						msgs: []Message{msgs[8]},
+					},
+					v2MessageSetBuilder{
+						codec: new(lz4.Codec),
+						msgs:  []Message{msgs[9], msgs[10]},
+					},
+				},
+			},
+		},
+	} {
+		t.Run(tc.name, func(t *testing.T) {
+			rh, err := newReaderHelper(t, tc.builder.bytes())
+			require.Equal(t, tc.err, err)
+			if tc.err != nil {
+				return
+			}
+			rh.debug = tc.debug
+			for _, messageSet := range tc.builder.msgSets {
+				for _, expected := range messageSet.messages() {
+					msg := rh.readMessage()
+					require.Equal(t, expected.Offset, msg.Offset)
+					require.Equal(t, string(expected.Key), string(msg.Key))
+					require.Equal(t, string(expected.Value), string(msg.Value))
+					switch messageSet.(type) {
+					case v0MessageSetBuilder, v1MessageSetBuilder:
+						// v0 and v1 message sets do not have headers
+						require.Len(t, msg.Headers, 0)
+					case v2MessageSetBuilder:
+						// v2 message sets can have headers
+						require.EqualValues(t, expected.Headers, msg.Headers)
+					default:
+						t.Fatalf("unknown builder: %T", messageSet)
+					}
+					require.Equal(t, expected.Offset, msg.Offset)
+				}
+			}
+			// verify the reader stack is empty
+			require.EqualValues(t, 0, rh.remain)
+			require.EqualValues(t, 0, rh.count)
+			require.EqualValues(t, 0, rh.remaining())
+			require.Nil(t, rh.readerStack.parent)
+			// any further message is a short read
+			_, err = rh.readMessageErr()
+			require.EqualError(t, err, errShortRead.Error())
+		})
+	}
+
+}
+
+func TestMessageSetReaderEmpty(t *testing.T) {
+	m := messageSetReader{empty: true}
+
+	noop := func(*bufio.Reader, int, int) (int, error) {
+		return 0, nil
+	}
+
+	offset, _, timestamp, headers, err := m.readMessage(0, noop, noop)
+	if offset != 0 {
+		t.Errorf("expected offset of 0, get %d", offset)
+	}
+	if timestamp != 0 {
+		t.Errorf("expected timestamp of 0, get %d", timestamp)
+	}
+	if headers != nil {
+		t.Errorf("expected nil headers, got %v", headers)
+	}
+	if !errors.Is(err, RequestTimedOut) {
+		t.Errorf("expected RequestTimedOut, got %v", err)
+	}
+
+	if m.remaining() != 0 {
+		t.Errorf("expected 0 remaining, got %d", m.remaining())
+	}
+
+	if m.discard() != nil {
+		t.Errorf("unexpected error from discard(): %v", m.discard())
+	}
+}
+
+func TestMessageFixtures(t *testing.T) {
+	type fixtureMessage struct {
+		key   string
+		value string
+	}
+	var fixtureMessages = map[string]fixtureMessage{
+		"a": {key: "alpha", value: `{"count":0,"filler":"aaaaaaaaaa"}`},
+		"b": {key: "beta", value: `{"count":0,"filler":"bbbbbbbbbb"}`},
+		"c": {key: "gamma", value: `{"count":0,"filler":"cccccccccc"}`},
+		"d": {key: "delta", value: `{"count":0,"filler":"dddddddddd"}`},
+		"e": {key: "epsilon", value: `{"count":0,"filler":"eeeeeeeeee"}`},
+		"f": {key: "zeta", value: `{"count":0,"filler":"ffffffffff"}`},
+		"g": {key: "eta", value: `{"count":0,"filler":"gggggggggg"}`},
+		"h": {key: "theta", value: `{"count":0,"filler":"hhhhhhhhhh"}`},
+	}
+
+	for _, tc := range []struct {
+		name     string
+		file     string
+		messages []string
+	}{
+		{
+			name:     "v2 followed by v1",
+			file:     "fixtures/v2b-v1.hex",
+			messages: []string{"a", "b", "a", "b"},
+		},
+		{
+			name:     "v2 compressed followed by v1 compressed",
+			file:     "fixtures/v2bc-v1c.hex",
+			messages: []string{"a", "b", "a", "b"},
+		},
+		{
+			name:     "v2 compressed followed by v1 uncompressed",
+			file:     "fixtures/v2bc-v1.hex",
+			messages: []string{"a", "b", "c", "d"},
+		},
+		{
+			name:     "v2 compressed followed by v1 uncompressed then v1 compressed",
+			file:     "fixtures/v2bc-v1-v1c.hex",
+			messages: []string{"a", "b", "c", "d", "e", "f"},
+		},
+		{
+			name:     "v2 compressed followed by v1 uncompressed then v1 compressed",
+			file:     "fixtures/v2bc-v1-v1c.hex",
+			messages: []string{"a", "b", "c", "d", "e", "f"},
+		},
+		{
+			name:     "v1 followed by v1",
+			file:     "fixtures/v1-v1.hex",
+			messages: []string{"a", "b", "c", "d"},
+		},
+		{
+			name:     "v1 compressed followed by v1 compressed",
+			file:     "fixtures/v1c-v1c.hex",
+			messages: []string{"a", "b", "c", "d"},
+		},
+		{
+			name:     "v1 compressed followed by v1 uncompressed then v1 compressed",
+			file:     "fixtures/v1c-v1-v1c.hex",
+			messages: []string{"a", "b", "c", "d", "e", "f"},
+		},
+		{
+			name:     "v2 followed by v2",
+			file:     "fixtures/v2-v2.hex",
+			messages: []string{"a", "b", "c", "d"},
+		},
+		{
+			name:     "v2 compressed followed by v2 compressed",
+			file:     "fixtures/v2c-v2c.hex",
+			messages: []string{"a", "b", "c", "d"},
+		},
+		{
+			name:     "v2 compressed followed by v2 uncompressed then v2 compressed",
+			file:     "fixtures/v2c-v2-v2c.hex",
+			messages: []string{"a", "b", "c", "d", "e", "f"},
+		},
+		{
+			name:     "v1 followed by v2 followed by v1 with mixture of compressed and uncompressed",
+			file:     "fixtures/v1-v1c-v2-v2c-v2b-v2b-v2b-v2bc-v1b-v1bc.hex",
+			messages: []string{"a", "b", "a", "b", "c", "d", "c", "d", "e", "f", "e", "f", "g", "h", "g", "h", "g", "h", "g", "h"},
+		},
+	} {
+		t.Run(tc.name, func(t *testing.T) {
+			bs, err := os.ReadFile(tc.file)
+			require.NoError(t, err)
+			buf := new(bytes.Buffer)
+			_, err = io.Copy(buf, hex.NewDecoder(bytes.NewReader(bs)))
+			require.NoError(t, err)
+
+			// discard 4 byte len and 4 byte correlation id
+			bs = make([]byte, 8)
+			buf.Read(bs)
+
+			rh, err := newReaderHelper(t, buf.Bytes())
+			require.NoError(t, err)
+			messageCount := 0
+			expectedMessageCount := len(tc.messages)
+			for _, expectedMessageId := range tc.messages {
+				expectedMessage := fixtureMessages[expectedMessageId]
+				msg := rh.readMessage()
+				messageCount++
+				require.Equal(t, expectedMessage.key, string(msg.Key))
+				require.Equal(t, expectedMessage.value, string(msg.Value))
+				t.Logf("Message %d key & value are what we expected: %s -> %s\n",
+					messageCount, string(msg.Key), string(msg.Value))
+			}
+			require.Equal(t, expectedMessageCount, messageCount)
+		})
+	}
+}
+
+func TestMessageSize(t *testing.T) {
+	rand.Seed(time.Now().UnixNano())
+	for i := 0; i < 20; i++ {
+		t.Run("Run", func(t *testing.T) {
+			msg := Message{
+				Key:   make([]byte, rand.Intn(200)),
+				Value: make([]byte, rand.Intn(200)),
+				Time:  randate(),
+			}
+			expSize := msg.message(nil).size()
+			gotSize := msg.size()
+			if expSize != gotSize {
+				t.Errorf("Expected size %d, but got size %d", expSize, gotSize)
+			}
+		})
+	}
+
+}
+
+// https://stackoverflow.com/questions/43495745/how-to-generate-random-date-in-go-lang/43497333#43497333
+func randate() time.Time {
+	min := time.Date(1970, 1, 0, 0, 0, 0, 0, time.UTC).Unix()
+	max := time.Date(2070, 1, 0, 0, 0, 0, 0, time.UTC).Unix()
+	delta := max - min
+
+	sec := rand.Int63n(delta) + min
+	return time.Unix(sec, 0)
+}
+
+// readerHelper composes a messageSetReader to provide convenience methods to read
+// messages.
+type readerHelper struct {
+	t *testing.T
+	*messageSetReader
+	offset int64
+}
+
+func newReaderHelper(t *testing.T, bs []byte) (r *readerHelper, err error) {
+	bufReader := bufio.NewReader(bytes.NewReader(bs))
+	_, _, remain, err := readFetchResponseHeaderV10(bufReader, len(bs))
+	require.NoError(t, err)
+	var msgs *messageSetReader
+	msgs, err = newMessageSetReader(bufReader, remain)
+	if err != nil {
+		return
+	}
+	r = &readerHelper{t: t, messageSetReader: msgs}
+	require.Truef(t, msgs.remaining() > 0, "remaining should be > 0 but was %d", msgs.remaining())
+	return
+}
+
+func (r *readerHelper) readMessageErr() (msg Message, err error) {
+	keyFunc := func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
+		msg.Key, remain, err = readNewBytes(r, size, nbytes)
+		return
+	}
+	valueFunc := func(r *bufio.Reader, size int, nbytes int) (remain int, err error) {
+		msg.Value, remain, err = readNewBytes(r, size, nbytes)
+		return
+	}
+	var timestamp int64
+	var headers []Header
+	r.offset, _, timestamp, headers, err = r.messageSetReader.readMessage(r.offset, keyFunc, valueFunc)
+	if err != nil {
+		return
+	}
+	msg.Offset = r.offset
+	msg.Time = time.Unix(timestamp/1000, (timestamp%1000)*1000000)
+	msg.Headers = headers
+	return
+}
+
+func (r *readerHelper) readMessage() (msg Message) {
+	var err error
+	msg, err = r.readMessageErr()
+	require.NoError(r.t, err)
+	return
+}
diff -pruN 0.2.1-1.1/metadata.go 0.4.49+ds1-1/metadata.go
--- 0.2.1-1.1/metadata.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/metadata.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,111 @@
 package kafka
 
-import "bufio"
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	metadataAPI "github.com/segmentio/kafka-go/protocol/metadata"
+)
+
+// MetadataRequest represents a request sent to a kafka broker to retrieve its
+// cluster metadata.
+type MetadataRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The list of topics to retrieve metadata for.
+	Topics []string
+}
+
+// MetadataResponse represents a response from a kafka broker to a metadata
+// request.
+type MetadataResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Name of the kafka cluster that client retrieved metadata from.
+	ClusterID string
+
+	// The broker which is currently the controller for the cluster.
+	Controller Broker
+
+	// The list of brokers registered to the cluster.
+	Brokers []Broker
+
+	// The list of topics available on the cluster.
+	Topics []Topic
+}
+
+// Metadata sends a metadata request to a kafka broker and returns the response.
+func (c *Client) Metadata(ctx context.Context, req *MetadataRequest) (*MetadataResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &metadataAPI.Request{
+		TopicNames: req.Topics,
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).Metadata: %w", err)
+	}
+
+	res := m.(*metadataAPI.Response)
+	ret := &MetadataResponse{
+		Throttle:  makeDuration(res.ThrottleTimeMs),
+		Brokers:   make([]Broker, len(res.Brokers)),
+		Topics:    make([]Topic, len(res.Topics)),
+		ClusterID: res.ClusterID,
+	}
+
+	brokers := make(map[int32]Broker, len(res.Brokers))
+
+	for i, b := range res.Brokers {
+		broker := Broker{
+			Host: b.Host,
+			Port: int(b.Port),
+			ID:   int(b.NodeID),
+			Rack: b.Rack,
+		}
+
+		ret.Brokers[i] = broker
+		brokers[b.NodeID] = broker
+
+		if b.NodeID == res.ControllerID {
+			ret.Controller = broker
+		}
+	}
+
+	for i, t := range res.Topics {
+		ret.Topics[i] = Topic{
+			Name:       t.Name,
+			Internal:   t.IsInternal,
+			Partitions: make([]Partition, len(t.Partitions)),
+			Error:      makeError(t.ErrorCode, ""),
+		}
+
+		for j, p := range t.Partitions {
+			partition := Partition{
+				Topic:    t.Name,
+				ID:       int(p.PartitionIndex),
+				Leader:   brokers[p.LeaderID],
+				Replicas: make([]Broker, len(p.ReplicaNodes)),
+				Isr:      make([]Broker, len(p.IsrNodes)),
+				Error:    makeError(p.ErrorCode, ""),
+			}
+
+			for i, id := range p.ReplicaNodes {
+				partition.Replicas[i] = brokers[id]
+			}
+
+			for i, id := range p.IsrNodes {
+				partition.Isr[i] = brokers[id]
+			}
+
+			ret.Topics[i].Partitions[j] = partition
+		}
+	}
+
+	return ret, nil
+}
 
 type topicMetadataRequestV1 []string
 
@@ -8,8 +113,15 @@ func (r topicMetadataRequestV1) size() i
 	return sizeofStringArray([]string(r))
 }
 
-func (r topicMetadataRequestV1) writeTo(w *bufio.Writer) {
-	writeStringArray(w, []string(r))
+func (r topicMetadataRequestV1) writeTo(wb *writeBuffer) {
+	// communicate nil-ness to the broker by passing -1 as the array length.
+	// for this particular request, the broker interpets a zero length array
+	// as a request for no topics whereas a nil array is for all topics.
+	if r == nil {
+		wb.writeArrayLen(-1)
+	} else {
+		wb.writeStringArray([]string(r))
+	}
 }
 
 type metadataResponseV1 struct {
@@ -24,10 +136,10 @@ func (r metadataResponseV1) size() int32
 	return 4 + n1 + n2
 }
 
-func (r metadataResponseV1) writeTo(w *bufio.Writer) {
-	writeArray(w, len(r.Brokers), func(i int) { r.Brokers[i].writeTo(w) })
-	writeInt32(w, r.ControllerID)
-	writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) })
+func (r metadataResponseV1) writeTo(wb *writeBuffer) {
+	wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) })
+	wb.writeInt32(r.ControllerID)
+	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
 }
 
 type brokerMetadataV1 struct {
@@ -41,11 +153,11 @@ func (b brokerMetadataV1) size() int32 {
 	return 4 + 4 + sizeofString(b.Host) + sizeofString(b.Rack)
 }
 
-func (b brokerMetadataV1) writeTo(w *bufio.Writer) {
-	writeInt32(w, b.NodeID)
-	writeString(w, b.Host)
-	writeInt32(w, b.Port)
-	writeString(w, b.Rack)
+func (b brokerMetadataV1) writeTo(wb *writeBuffer) {
+	wb.writeInt32(b.NodeID)
+	wb.writeString(b.Host)
+	wb.writeInt32(b.Port)
+	wb.writeString(b.Rack)
 }
 
 type topicMetadataV1 struct {
@@ -61,11 +173,11 @@ func (t topicMetadataV1) size() int32 {
 		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
 }
 
-func (t topicMetadataV1) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.TopicErrorCode)
-	writeString(w, t.TopicName)
-	writeBool(w, t.Internal)
-	writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) })
+func (t topicMetadataV1) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.TopicErrorCode)
+	wb.writeString(t.TopicName)
+	wb.writeBool(t.Internal)
+	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
 }
 
 type partitionMetadataV1 struct {
@@ -80,10 +192,96 @@ func (p partitionMetadataV1) size() int3
 	return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr)
 }
 
-func (p partitionMetadataV1) writeTo(w *bufio.Writer) {
-	writeInt16(w, p.PartitionErrorCode)
-	writeInt32(w, p.PartitionID)
-	writeInt32(w, p.Leader)
-	writeInt32Array(w, p.Replicas)
-	writeInt32Array(w, p.Isr)
+func (p partitionMetadataV1) writeTo(wb *writeBuffer) {
+	wb.writeInt16(p.PartitionErrorCode)
+	wb.writeInt32(p.PartitionID)
+	wb.writeInt32(p.Leader)
+	wb.writeInt32Array(p.Replicas)
+	wb.writeInt32Array(p.Isr)
+}
+
+type topicMetadataRequestV6 struct {
+	Topics                 []string
+	AllowAutoTopicCreation bool
+}
+
+func (r topicMetadataRequestV6) size() int32 {
+	return sizeofStringArray([]string(r.Topics)) + 1
+}
+
+func (r topicMetadataRequestV6) writeTo(wb *writeBuffer) {
+	// communicate nil-ness to the broker by passing -1 as the array length.
+	// for this particular request, the broker interpets a zero length array
+	// as a request for no topics whereas a nil array is for all topics.
+	if r.Topics == nil {
+		wb.writeArrayLen(-1)
+	} else {
+		wb.writeStringArray([]string(r.Topics))
+	}
+	wb.writeBool(r.AllowAutoTopicCreation)
+}
+
+type metadataResponseV6 struct {
+	ThrottleTimeMs int32
+	Brokers        []brokerMetadataV1
+	ClusterId      string
+	ControllerID   int32
+	Topics         []topicMetadataV6
+}
+
+func (r metadataResponseV6) size() int32 {
+	n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() })
+	n2 := sizeofNullableString(&r.ClusterId)
+	n3 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
+	return 4 + 4 + n1 + n2 + n3
+}
+
+func (r metadataResponseV6) writeTo(wb *writeBuffer) {
+	wb.writeInt32(r.ThrottleTimeMs)
+	wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) })
+	wb.writeString(r.ClusterId)
+	wb.writeInt32(r.ControllerID)
+	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
+}
+
+type topicMetadataV6 struct {
+	TopicErrorCode int16
+	TopicName      string
+	Internal       bool
+	Partitions     []partitionMetadataV6
+}
+
+func (t topicMetadataV6) size() int32 {
+	return 2 + 1 +
+		sizeofString(t.TopicName) +
+		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
+}
+
+func (t topicMetadataV6) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.TopicErrorCode)
+	wb.writeString(t.TopicName)
+	wb.writeBool(t.Internal)
+	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
+}
+
+type partitionMetadataV6 struct {
+	PartitionErrorCode int16
+	PartitionID        int32
+	Leader             int32
+	Replicas           []int32
+	Isr                []int32
+	OfflineReplicas    []int32
+}
+
+func (p partitionMetadataV6) size() int32 {
+	return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) + sizeofInt32Array(p.OfflineReplicas)
+}
+
+func (p partitionMetadataV6) writeTo(wb *writeBuffer) {
+	wb.writeInt16(p.PartitionErrorCode)
+	wb.writeInt32(p.PartitionID)
+	wb.writeInt32(p.Leader)
+	wb.writeInt32Array(p.Replicas)
+	wb.writeInt32Array(p.Isr)
+	wb.writeInt32Array(p.OfflineReplicas)
 }
diff -pruN 0.2.1-1.1/metadata_test.go 0.4.49+ds1-1/metadata_test.go
--- 0.2.1-1.1/metadata_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/metadata_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,75 @@
+package kafka
+
+import (
+	"context"
+	"testing"
+)
+
+func TestClientMetadata(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	metadata, err := client.Metadata(context.Background(), &MetadataRequest{
+		Topics: []string{topic},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(metadata.Brokers) == 0 {
+		t.Error("no brokers were returned in the metadata response")
+	}
+
+	for _, b := range metadata.Brokers {
+		if b == (Broker{}) {
+			t.Error("unexpected broker with zero-value in metadata response")
+		}
+	}
+
+	if len(metadata.Topics) == 0 {
+		t.Error("no topics were returned in the metadata response")
+	} else {
+		topicMetadata := metadata.Topics[0]
+
+		if topicMetadata.Name != topic {
+			t.Error("invalid topic name:", topicMetadata.Name)
+		}
+
+		if len(topicMetadata.Partitions) == 0 {
+			t.Error("no partitions were returned in the topic metadata response")
+		} else {
+			partitionMetadata := topicMetadata.Partitions[0]
+
+			if partitionMetadata.Topic != topic {
+				t.Error("invalid partition topic name:", partitionMetadata.Topic)
+			}
+
+			if partitionMetadata.ID != 0 {
+				t.Error("invalid partition index:", partitionMetadata.ID)
+			}
+
+			if partitionMetadata.Leader == (Broker{}) {
+				t.Error("no partition leader was returned in the partition metadata response")
+			}
+
+			if partitionMetadata.Error != nil {
+				t.Error("unexpected error found in the partition metadata response:", partitionMetadata.Error)
+			}
+
+			// assume newLocalClientAndTopic creates the topic with one
+			// partition
+			if len(topicMetadata.Partitions) > 1 {
+				t.Error("too many partitions were returned in the topic metadata response")
+			}
+		}
+
+		if topicMetadata.Error != nil {
+			t.Error("unexpected error found in the topic metadata response:", topicMetadata.Error)
+		}
+
+		if len(metadata.Topics) > 1 {
+			t.Error("too many topics were returned in the metadata response")
+		}
+	}
+}
diff -pruN 0.2.1-1.1/offsetcommit.go 0.4.49+ds1-1/offsetcommit.go
--- 0.2.1-1.1/offsetcommit.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/offsetcommit.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,141 @@
 package kafka
 
-import "bufio"
+import (
+	"bufio"
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/offsetcommit"
+)
+
+// OffsetCommit represent the commit of an offset to a partition.
+//
+// The extra metadata is opaque to the kafka protocol, it is intended to hold
+// information like an identifier for the process that committed the offset,
+// or the time at which the commit was made.
+type OffsetCommit struct {
+	Partition int
+	Offset    int64
+	Metadata  string
+}
+
+// OffsetCommitRequest represents a request sent to a kafka broker to commit
+// offsets for a partition.
+type OffsetCommitRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// ID of the consumer group to publish the offsets for.
+	GroupID string
+
+	// ID of the consumer group generation.
+	GenerationID int
+
+	// ID of the group member submitting the offsets.
+	MemberID string
+
+	// ID of the group instance.
+	InstanceID string
+
+	// Set of topic partitions to publish the offsets for.
+	//
+	// Not that offset commits need to be submitted to the broker acting as the
+	// group coordinator. This will be automatically resolved by the transport.
+	Topics map[string][]OffsetCommit
+}
+
+// OffsetFetchResponse represents a response from a kafka broker to an offset
+// commit request.
+type OffsetCommitResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Set of topic partitions that the kafka broker has accepted offset commits
+	// for.
+	Topics map[string][]OffsetCommitPartition
+}
+
+// OffsetFetchPartition represents the state of a single partition in responses
+// to committing offsets.
+type OffsetCommitPartition struct {
+	// ID of the partition.
+	Partition int
+
+	// An error that may have occurred while attempting to publish consumer
+	// group offsets for this partition.
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker. Programs may use the standard errors.Is
+	// function to test the error against kafka error codes.
+	Error error
+}
+
+// OffsetCommit sends an offset commit request to a kafka broker and returns the
+// response.
+func (c *Client) OffsetCommit(ctx context.Context, req *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+	now := time.Now().UnixNano() / int64(time.Millisecond)
+	topics := make([]offsetcommit.RequestTopic, 0, len(req.Topics))
+
+	for topicName, commits := range req.Topics {
+		partitions := make([]offsetcommit.RequestPartition, len(commits))
+
+		for i, c := range commits {
+			partitions[i] = offsetcommit.RequestPartition{
+				PartitionIndex:    int32(c.Partition),
+				CommittedOffset:   c.Offset,
+				CommittedMetadata: c.Metadata,
+				// This field existed in v1 of the OffsetCommit API, setting it
+				// to the current timestamp is probably a safe thing to do, but
+				// it is hard to tell.
+				CommitTimestamp: now,
+			}
+		}
+
+		topics = append(topics, offsetcommit.RequestTopic{
+			Name:       topicName,
+			Partitions: partitions,
+		})
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &offsetcommit.Request{
+		GroupID:         req.GroupID,
+		GenerationID:    int32(req.GenerationID),
+		MemberID:        req.MemberID,
+		GroupInstanceID: req.InstanceID,
+		Topics:          topics,
+		// Hardcoded retention; this field existed between v2 and v4 of the
+		// OffsetCommit API, we would have to figure out a way to give the
+		// client control over the API version being used to support configuring
+		// it in the request object.
+		RetentionTimeMs: int64((24 * time.Hour) / time.Millisecond),
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).OffsetCommit: %w", err)
+	}
+	r := m.(*offsetcommit.Response)
+
+	res := &OffsetCommitResponse{
+		Throttle: makeDuration(r.ThrottleTimeMs),
+		Topics:   make(map[string][]OffsetCommitPartition, len(r.Topics)),
+	}
+
+	for _, topic := range r.Topics {
+		partitions := make([]OffsetCommitPartition, len(topic.Partitions))
+
+		for i, p := range topic.Partitions {
+			partitions[i] = OffsetCommitPartition{
+				Partition: int(p.PartitionIndex),
+				Error:     makeError(p.ErrorCode, ""),
+			}
+		}
+
+		res.Topics[topic.Name] = partitions
+	}
+
+	return res, nil
+}
 
 type offsetCommitRequestV2Partition struct {
 	// Partition ID
@@ -19,10 +154,10 @@ func (t offsetCommitRequestV2Partition)
 		sizeofString(t.Metadata)
 }
 
-func (t offsetCommitRequestV2Partition) writeTo(w *bufio.Writer) {
-	writeInt32(w, t.Partition)
-	writeInt64(w, t.Offset)
-	writeString(w, t.Metadata)
+func (t offsetCommitRequestV2Partition) writeTo(wb *writeBuffer) {
+	wb.writeInt32(t.Partition)
+	wb.writeInt64(t.Offset)
+	wb.writeString(t.Metadata)
 }
 
 type offsetCommitRequestV2Topic struct {
@@ -38,9 +173,9 @@ func (t offsetCommitRequestV2Topic) size
 		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
 }
 
-func (t offsetCommitRequestV2Topic) writeTo(w *bufio.Writer) {
-	writeString(w, t.Topic)
-	writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) })
+func (t offsetCommitRequestV2Topic) writeTo(wb *writeBuffer) {
+	wb.writeString(t.Topic)
+	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
 }
 
 type offsetCommitRequestV2 struct {
@@ -68,12 +203,12 @@ func (t offsetCommitRequestV2) size() in
 		sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() })
 }
 
-func (t offsetCommitRequestV2) writeTo(w *bufio.Writer) {
-	writeString(w, t.GroupID)
-	writeInt32(w, t.GenerationID)
-	writeString(w, t.MemberID)
-	writeInt64(w, t.RetentionTime)
-	writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) })
+func (t offsetCommitRequestV2) writeTo(wb *writeBuffer) {
+	wb.writeString(t.GroupID)
+	wb.writeInt32(t.GenerationID)
+	wb.writeString(t.MemberID)
+	wb.writeInt64(t.RetentionTime)
+	wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) })
 }
 
 type offsetCommitResponseV2PartitionResponse struct {
@@ -88,9 +223,9 @@ func (t offsetCommitResponseV2PartitionR
 		sizeofInt16(t.ErrorCode)
 }
 
-func (t offsetCommitResponseV2PartitionResponse) writeTo(w *bufio.Writer) {
-	writeInt32(w, t.Partition)
-	writeInt16(w, t.ErrorCode)
+func (t offsetCommitResponseV2PartitionResponse) writeTo(wb *writeBuffer) {
+	wb.writeInt32(t.Partition)
+	wb.writeInt16(t.ErrorCode)
 }
 
 func (t *offsetCommitResponseV2PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) {
@@ -113,9 +248,9 @@ func (t offsetCommitResponseV2Response)
 		sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() })
 }
 
-func (t offsetCommitResponseV2Response) writeTo(w *bufio.Writer) {
-	writeString(w, t.Topic)
-	writeArray(w, len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(w) })
+func (t offsetCommitResponseV2Response) writeTo(wb *writeBuffer) {
+	wb.writeString(t.Topic)
+	wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) })
 }
 
 func (t *offsetCommitResponseV2Response) readFrom(r *bufio.Reader, size int) (remain int, err error) {
@@ -146,8 +281,8 @@ func (t offsetCommitResponseV2) size() i
 	return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() })
 }
 
-func (t offsetCommitResponseV2) writeTo(w *bufio.Writer) {
-	writeArray(w, len(t.Responses), func(i int) { t.Responses[i].writeTo(w) })
+func (t offsetCommitResponseV2) writeTo(wb *writeBuffer) {
+	wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) })
 }
 
 func (t *offsetCommitResponseV2) readFrom(r *bufio.Reader, size int) (remain int, err error) {
diff -pruN 0.2.1-1.1/offsetcommit_test.go 0.4.49+ds1-1/offsetcommit_test.go
--- 0.2.1-1.1/offsetcommit_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/offsetcommit_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,8 +3,13 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"log"
+	"os"
 	"reflect"
+	"strconv"
 	"testing"
+	"time"
 )
 
 func TestOffsetCommitResponseV2(t *testing.T) {
@@ -22,13 +27,12 @@ func TestOffsetCommitResponseV2(t *testi
 		},
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found offsetCommitResponseV2
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
@@ -42,3 +46,114 @@ func TestOffsetCommitResponseV2(t *testi
 		t.FailNow()
 	}
 }
+
+func TestClientOffsetCommit(t *testing.T) {
+	topic := makeTopic()
+	client, shutdown := newLocalClientWithTopic(topic, 3)
+	defer shutdown()
+	now := time.Now()
+
+	const N = 10 * 3
+	records := make([]Record, 0, N)
+	for i := 0; i < N; i++ {
+		records = append(records, Record{
+			Time:  now,
+			Value: NewBytes([]byte("test-message-" + strconv.Itoa(i))),
+		})
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	res, err := client.Produce(ctx, &ProduceRequest{
+		Topic:        topic,
+		RequiredAcks: RequireAll,
+		Records:      NewRecordReader(records...),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res.Error != nil {
+		t.Error(res.Error)
+	}
+
+	for index, err := range res.RecordErrors {
+		t.Fatalf("record at index %d produced an error: %v", index, err)
+	}
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	groupID := makeGroupID()
+
+	group, err := NewConsumerGroup(ConsumerGroupConfig{
+		ID:                groupID,
+		Topics:            []string{topic},
+		Brokers:           []string{"localhost:9092"},
+		HeartbeatInterval: 2 * time.Second,
+		RebalanceTimeout:  2 * time.Second,
+		RetentionTime:     time.Hour,
+		Logger:            log.New(os.Stdout, "cg-test: ", 0),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer group.Close()
+
+	gen, err := group.Next(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ocr, err := client.OffsetCommit(ctx, &OffsetCommitRequest{
+		Addr:         nil,
+		GroupID:      groupID,
+		GenerationID: int(gen.ID),
+		MemberID:     gen.MemberID,
+		Topics: map[string][]OffsetCommit{
+			topic: {
+				{Partition: 0, Offset: 10},
+				{Partition: 1, Offset: 10},
+				{Partition: 2, Offset: 10},
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resps := ocr.Topics[topic]
+	if len(resps) != 3 {
+		t.Fatalf("expected 3 offsetcommitpartition responses; got %d", len(resps))
+	}
+
+	for _, resp := range resps {
+		if resp.Error != nil {
+			t.Fatal(resp.Error)
+		}
+	}
+
+	ofr, err := client.OffsetFetch(ctx, &OffsetFetchRequest{
+		GroupID: groupID,
+		Topics:  map[string][]int{topic: {0, 1, 2}},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ofr.Error != nil {
+		t.Error(res.Error)
+	}
+
+	fetresps := ofr.Topics[topic]
+	if len(fetresps) != 3 {
+		t.Fatalf("expected 3 offsetfetchpartition responses; got %d", len(resps))
+	}
+
+	for _, r := range fetresps {
+		if r.Error != nil {
+			t.Fatal(r.Error)
+		}
+
+		if r.CommittedOffset != 10 {
+			t.Fatalf("expected committed offset to be 10; got: %v for partition: %v", r.CommittedOffset, r.Partition)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/offsetdelete.go 0.4.49+ds1-1/offsetdelete.go
--- 0.2.1-1.1/offsetdelete.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/offsetdelete.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,106 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/offsetdelete"
+)
+
+// OffsetDelete deletes the offset for a consumer group on a particular topic
+// for a particular partition.
+type OffsetDelete struct {
+	Topic     string
+	Partition int
+}
+
+// OffsetDeleteRequest represents a request sent to a kafka broker to delete
+// the offsets for a partition on a given topic associated with a consumer group.
+type OffsetDeleteRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// ID of the consumer group to delete the offsets for.
+	GroupID string
+
+	// Set of topic partitions to delete offsets for.
+	Topics map[string][]int
+}
+
+// OffsetDeleteResponse represents a response from a kafka broker to a delete
+// offset request.
+type OffsetDeleteResponse struct {
+	// An error that may have occurred while attempting to delete an offset
+	Error error
+
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Set of topic partitions that the kafka broker has additional info (error?)
+	// for.
+	Topics map[string][]OffsetDeletePartition
+}
+
+// OffsetDeletePartition represents the state of a status of a partition in response
+// to deleting offsets.
+type OffsetDeletePartition struct {
+	// ID of the partition.
+	Partition int
+
+	// An error that may have occurred while attempting to delete an offset for
+	// this partition.
+	Error error
+}
+
+// OffsetDelete sends a delete offset request to a kafka broker and returns the
+// response.
+func (c *Client) OffsetDelete(ctx context.Context, req *OffsetDeleteRequest) (*OffsetDeleteResponse, error) {
+	topics := make([]offsetdelete.RequestTopic, 0, len(req.Topics))
+
+	for topicName, partitionIndexes := range req.Topics {
+		partitions := make([]offsetdelete.RequestPartition, len(partitionIndexes))
+
+		for i, c := range partitionIndexes {
+			partitions[i] = offsetdelete.RequestPartition{
+				PartitionIndex: int32(c),
+			}
+		}
+
+		topics = append(topics, offsetdelete.RequestTopic{
+			Name:       topicName,
+			Partitions: partitions,
+		})
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &offsetdelete.Request{
+		GroupID: req.GroupID,
+		Topics:  topics,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).OffsetDelete: %w", err)
+	}
+	r := m.(*offsetdelete.Response)
+
+	res := &OffsetDeleteResponse{
+		Error:    makeError(r.ErrorCode, ""),
+		Throttle: makeDuration(r.ThrottleTimeMs),
+		Topics:   make(map[string][]OffsetDeletePartition, len(r.Topics)),
+	}
+
+	for _, topic := range r.Topics {
+		partitions := make([]OffsetDeletePartition, len(topic.Partitions))
+
+		for i, p := range topic.Partitions {
+			partitions[i] = OffsetDeletePartition{
+				Partition: int(p.PartitionIndex),
+				Error:     makeError(p.ErrorCode, ""),
+			}
+		}
+
+		res.Topics[topic.Name] = partitions
+	}
+
+	return res, nil
+}
diff -pruN 0.2.1-1.1/offsetdelete_test.go 0.4.49+ds1-1/offsetdelete_test.go
--- 0.2.1-1.1/offsetdelete_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/offsetdelete_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,160 @@
+package kafka
+
+import (
+	"context"
+	"log"
+	"os"
+	"strconv"
+	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientDeleteOffset(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("2.4.0") {
+		return
+	}
+
+	topic := makeTopic()
+	client, shutdown := newLocalClientWithTopic(topic, 3)
+	defer shutdown()
+	now := time.Now()
+
+	const N = 10 * 3
+	records := make([]Record, 0, N)
+	for i := 0; i < N; i++ {
+		records = append(records, Record{
+			Time:  now,
+			Value: NewBytes([]byte("test-message-" + strconv.Itoa(i))),
+		})
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	res, err := client.Produce(ctx, &ProduceRequest{
+		Topic:        topic,
+		RequiredAcks: RequireAll,
+		Records:      NewRecordReader(records...),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res.Error != nil {
+		t.Error(res.Error)
+	}
+
+	for index, err := range res.RecordErrors {
+		t.Fatalf("record at index %d produced an error: %v", index, err)
+	}
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	groupID := makeGroupID()
+
+	group, err := NewConsumerGroup(ConsumerGroupConfig{
+		ID:                groupID,
+		Topics:            []string{topic},
+		Brokers:           []string{"localhost:9092"},
+		HeartbeatInterval: 2 * time.Second,
+		RebalanceTimeout:  2 * time.Second,
+		RetentionTime:     time.Hour,
+		Logger:            log.New(os.Stdout, "cg-test: ", 0),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	gen, err := group.Next(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ocr, err := client.OffsetCommit(ctx, &OffsetCommitRequest{
+		Addr:         nil,
+		GroupID:      groupID,
+		GenerationID: int(gen.ID),
+		MemberID:     gen.MemberID,
+		Topics: map[string][]OffsetCommit{
+			topic: {
+				{Partition: 0, Offset: 10},
+				{Partition: 1, Offset: 10},
+				{Partition: 2, Offset: 10},
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	group.Close()
+
+	resps := ocr.Topics[topic]
+	if len(resps) != 3 {
+		t.Fatalf("expected 3 offsetcommitpartition responses; got %d", len(resps))
+	}
+
+	for _, resp := range resps {
+		if resp.Error != nil {
+			t.Fatal(resp.Error)
+		}
+	}
+
+	ofr, err := client.OffsetFetch(ctx, &OffsetFetchRequest{
+		GroupID: groupID,
+		Topics:  map[string][]int{topic: {0, 1, 2}},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ofr.Error != nil {
+		t.Error(res.Error)
+	}
+
+	fetresps := ofr.Topics[topic]
+	if len(fetresps) != 3 {
+		t.Fatalf("expected 3 offsetfetchpartition responses; got %d", len(resps))
+	}
+
+	for _, r := range fetresps {
+		if r.Error != nil {
+			t.Fatal(r.Error)
+		}
+
+		if r.CommittedOffset != 10 {
+			t.Fatalf("expected committed offset to be 10; got: %v for partition: %v", r.CommittedOffset, r.Partition)
+		}
+	}
+
+	// Remove offsets
+	odr, err := client.OffsetDelete(ctx, &OffsetDeleteRequest{
+		GroupID: groupID,
+		Topics:  map[string][]int{topic: {0, 1, 2}},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if odr.Error != nil {
+		t.Error(odr.Error)
+	}
+
+	// Fetch the offsets again
+	ofr, err = client.OffsetFetch(ctx, &OffsetFetchRequest{
+		GroupID: groupID,
+		Topics:  map[string][]int{topic: {0, 1, 2}},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ofr.Error != nil {
+		t.Error(res.Error)
+	}
+
+	for _, r := range ofr.Topics[topic] {
+		if r.CommittedOffset != -1 {
+			t.Fatalf("expected committed offset to be -1; got: %v for partition: %v", r.CommittedOffset, r.Partition)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/offsetfetch.go 0.4.49+ds1-1/offsetfetch.go
--- 0.2.1-1.1/offsetfetch.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/offsetfetch.go	2025-08-21 19:15:53.000000000 +0000
@@ -2,8 +2,128 @@ package kafka
 
 import (
 	"bufio"
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/offsetfetch"
 )
 
+// OffsetFetchRequest represents a request sent to a kafka broker to read the
+// currently committed offsets of topic partitions.
+type OffsetFetchRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// ID of the consumer group to retrieve the offsets for.
+	GroupID string
+
+	// Set of topic partitions to retrieve the offsets for.
+	Topics map[string][]int
+}
+
+// OffsetFetchResponse represents a response from a kafka broker to an offset
+// fetch request.
+type OffsetFetchResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Set of topic partitions that the kafka broker has returned offsets for.
+	Topics map[string][]OffsetFetchPartition
+
+	// An error that may have occurred while attempting to retrieve consumer
+	// group offsets.
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker. Programs may use the standard errors.Is
+	// function to test the error against kafka error codes.
+	Error error
+}
+
+// OffsetFetchPartition represents the state of a single partition in a consumer
+// group.
+type OffsetFetchPartition struct {
+	// ID of the partition.
+	Partition int
+
+	// Last committed offsets on the partition when the request was served by
+	// the kafka broker.
+	CommittedOffset int64
+
+	// Consumer group metadata for this partition.
+	Metadata string
+
+	// An error that may have occurred while attempting to retrieve consumer
+	// group offsets for this partition.
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker. Programs may use the standard errors.Is
+	// function to test the error against kafka error codes.
+	Error error
+}
+
+// OffsetFetch sends an offset fetch request to a kafka broker and returns the
+// response.
+func (c *Client) OffsetFetch(ctx context.Context, req *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+
+	// Kafka version 0.10.2.x and above allow null Topics map for OffsetFetch API
+	// which will return the result for all topics with the desired consumer group:
+	// https://kafka.apache.org/0102/protocol.html#The_Messages_OffsetFetch
+	// For Kafka version below 0.10.2.x this call will result in an error
+	var topics []offsetfetch.RequestTopic
+
+	if len(req.Topics) > 0 {
+		topics = make([]offsetfetch.RequestTopic, 0, len(req.Topics))
+
+		for topicName, partitions := range req.Topics {
+			indexes := make([]int32, len(partitions))
+
+			for i, p := range partitions {
+				indexes[i] = int32(p)
+			}
+
+			topics = append(topics, offsetfetch.RequestTopic{
+				Name:             topicName,
+				PartitionIndexes: indexes,
+			})
+		}
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &offsetfetch.Request{
+		GroupID: req.GroupID,
+		Topics:  topics,
+	})
+
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).OffsetFetch: %w", err)
+	}
+
+	res := m.(*offsetfetch.Response)
+	ret := &OffsetFetchResponse{
+		Throttle: makeDuration(res.ThrottleTimeMs),
+		Topics:   make(map[string][]OffsetFetchPartition, len(res.Topics)),
+		Error:    makeError(res.ErrorCode, ""),
+	}
+
+	for _, t := range res.Topics {
+		partitions := make([]OffsetFetchPartition, len(t.Partitions))
+
+		for i, p := range t.Partitions {
+			partitions[i] = OffsetFetchPartition{
+				Partition:       int(p.PartitionIndex),
+				CommittedOffset: p.CommittedOffset,
+				Metadata:        p.Metadata,
+				Error:           makeError(p.ErrorCode, ""),
+			}
+		}
+
+		ret.Topics[t.Name] = partitions
+	}
+
+	return ret, nil
+}
+
 type offsetFetchRequestV1Topic struct {
 	// Topic name
 	Topic string
@@ -17,9 +137,9 @@ func (t offsetFetchRequestV1Topic) size(
 		sizeofInt32Array(t.Partitions)
 }
 
-func (t offsetFetchRequestV1Topic) writeTo(w *bufio.Writer) {
-	writeString(w, t.Topic)
-	writeInt32Array(w, t.Partitions)
+func (t offsetFetchRequestV1Topic) writeTo(wb *writeBuffer) {
+	wb.writeString(t.Topic)
+	wb.writeInt32Array(t.Partitions)
 }
 
 type offsetFetchRequestV1 struct {
@@ -35,9 +155,9 @@ func (t offsetFetchRequestV1) size() int
 		sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() })
 }
 
-func (t offsetFetchRequestV1) writeTo(w *bufio.Writer) {
-	writeString(w, t.GroupID)
-	writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) })
+func (t offsetFetchRequestV1) writeTo(wb *writeBuffer) {
+	wb.writeString(t.GroupID)
+	wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) })
 }
 
 type offsetFetchResponseV1PartitionResponse struct {
@@ -61,11 +181,11 @@ func (t offsetFetchResponseV1PartitionRe
 		sizeofInt16(t.ErrorCode)
 }
 
-func (t offsetFetchResponseV1PartitionResponse) writeTo(w *bufio.Writer) {
-	writeInt32(w, t.Partition)
-	writeInt64(w, t.Offset)
-	writeString(w, t.Metadata)
-	writeInt16(w, t.ErrorCode)
+func (t offsetFetchResponseV1PartitionResponse) writeTo(wb *writeBuffer) {
+	wb.writeInt32(t.Partition)
+	wb.writeInt64(t.Offset)
+	wb.writeString(t.Metadata)
+	wb.writeInt16(t.ErrorCode)
 }
 
 func (t *offsetFetchResponseV1PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) {
@@ -97,9 +217,9 @@ func (t offsetFetchResponseV1Response) s
 		sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() })
 }
 
-func (t offsetFetchResponseV1Response) writeTo(w *bufio.Writer) {
-	writeString(w, t.Topic)
-	writeArray(w, len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(w) })
+func (t offsetFetchResponseV1Response) writeTo(wb *writeBuffer) {
+	wb.writeString(t.Topic)
+	wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) })
 }
 
 func (t *offsetFetchResponseV1Response) readFrom(r *bufio.Reader, size int) (remain int, err error) {
@@ -109,7 +229,7 @@ func (t *offsetFetchResponseV1Response)
 
 	fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) {
 		item := offsetFetchResponseV1PartitionResponse{}
-		if fnRemain, fnErr = (&item).readFrom(r, size); err != nil {
+		if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil {
 			return
 		}
 		t.PartitionResponses = append(t.PartitionResponses, item)
@@ -131,8 +251,8 @@ func (t offsetFetchResponseV1) size() in
 	return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() })
 }
 
-func (t offsetFetchResponseV1) writeTo(w *bufio.Writer) {
-	writeArray(w, len(t.Responses), func(i int) { t.Responses[i].writeTo(w) })
+func (t offsetFetchResponseV1) writeTo(wb *writeBuffer) {
+	wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) })
 }
 
 func (t *offsetFetchResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) {
@@ -150,19 +270,3 @@ func (t *offsetFetchResponseV1) readFrom
 
 	return
 }
-
-func findOffset(topic string, partition int32, response offsetFetchResponseV1) (int64, bool) {
-	for _, r := range response.Responses {
-		if r.Topic != topic {
-			continue
-		}
-
-		for _, pr := range r.PartitionResponses {
-			if pr.Partition == partition {
-				return pr.Offset, true
-			}
-		}
-	}
-
-	return 0, false
-}
diff -pruN 0.2.1-1.1/offsetfetch_test.go 0.4.49+ds1-1/offsetfetch_test.go
--- 0.2.1-1.1/offsetfetch_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/offsetfetch_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,8 +3,12 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
 	"reflect"
 	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
 )
 
 func TestOffsetFetchResponseV1(t *testing.T) {
@@ -24,13 +28,12 @@ func TestOffsetFetchResponseV1(t *testin
 		},
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found offsetFetchResponseV1
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
@@ -44,3 +47,121 @@ func TestOffsetFetchResponseV1(t *testin
 		t.FailNow()
 	}
 }
+
+func TestOffsetFetchRequestWithNoTopic(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("0.10.2.0") {
+		t.Logf("Test %s is not applicable for kafka versions below 0.10.2.0", t.Name())
+		t.SkipNow()
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+	topic1 := makeTopic()
+	defer deleteTopic(t, topic1)
+	topic2 := makeTopic()
+	defer deleteTopic(t, topic2)
+	consumeGroup := makeGroupID()
+	numMsgs := 50
+	defer cancel()
+	r1 := NewReader(ReaderConfig{
+		Brokers:  []string{"localhost:9092"},
+		Topic:    topic1,
+		GroupID:  consumeGroup,
+		MinBytes: 1,
+		MaxBytes: 100,
+		MaxWait:  100 * time.Millisecond,
+	})
+	defer r1.Close()
+	prepareReader(t, ctx, r1, makeTestSequence(numMsgs)...)
+	r2 := NewReader(ReaderConfig{
+		Brokers:  []string{"localhost:9092"},
+		Topic:    topic2,
+		GroupID:  consumeGroup,
+		MinBytes: 1,
+		MaxBytes: 100,
+		MaxWait:  100 * time.Millisecond,
+	})
+	defer r2.Close()
+	prepareReader(t, ctx, r2, makeTestSequence(numMsgs)...)
+
+	for i := 0; i < numMsgs; i++ {
+		if _, err := r1.ReadMessage(ctx); err != nil {
+			t.Fatal(err)
+		}
+	}
+	for i := 0; i < numMsgs; i++ {
+		if _, err := r2.ReadMessage(ctx); err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	client := Client{Addr: TCP("localhost:9092")}
+
+	topicOffsets, err := client.OffsetFetch(ctx, &OffsetFetchRequest{GroupID: consumeGroup})
+
+	if err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+
+	if len(topicOffsets.Topics) != 2 {
+		t.Error(err)
+		t.FailNow()
+	}
+
+}
+
+func TestOffsetFetchRequestWithOneTopic(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+	topic1 := makeTopic()
+	defer deleteTopic(t, topic1)
+	topic2 := makeTopic()
+	defer deleteTopic(t, topic2)
+	consumeGroup := makeGroupID()
+	numMsgs := 50
+	defer cancel()
+	r1 := NewReader(ReaderConfig{
+		Brokers:  []string{"localhost:9092"},
+		Topic:    topic1,
+		GroupID:  consumeGroup,
+		MinBytes: 1,
+		MaxBytes: 100,
+		MaxWait:  100 * time.Millisecond,
+	})
+	defer r1.Close()
+	prepareReader(t, ctx, r1, makeTestSequence(numMsgs)...)
+	r2 := NewReader(ReaderConfig{
+		Brokers:  []string{"localhost:9092"},
+		Topic:    topic2,
+		GroupID:  consumeGroup,
+		MinBytes: 1,
+		MaxBytes: 100,
+		MaxWait:  100 * time.Millisecond,
+	})
+	defer r2.Close()
+	prepareReader(t, ctx, r2, makeTestSequence(numMsgs)...)
+
+	for i := 0; i < numMsgs; i++ {
+		if _, err := r1.ReadMessage(ctx); err != nil {
+			t.Fatal(err)
+		}
+	}
+	for i := 0; i < numMsgs; i++ {
+		if _, err := r2.ReadMessage(ctx); err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	client := Client{Addr: TCP("localhost:9092")}
+	topicOffsets, err := client.OffsetFetch(ctx, &OffsetFetchRequest{GroupID: consumeGroup, Topics: map[string][]int{
+		topic1: {0},
+	}})
+
+	if err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+
+	if len(topicOffsets.Topics) != 1 {
+		t.Error(err)
+		t.FailNow()
+	}
+}
diff -pruN 0.2.1-1.1/produce.go 0.4.49+ds1-1/produce.go
--- 0.2.1-1.1/produce.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/produce.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,6 +1,206 @@
 package kafka
 
-import "bufio"
+import (
+	"bufio"
+	"context"
+	"encoding"
+	"errors"
+	"fmt"
+	"net"
+	"strconv"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	produceAPI "github.com/segmentio/kafka-go/protocol/produce"
+)
+
+type RequiredAcks int
+
+const (
+	RequireNone RequiredAcks = 0
+	RequireOne  RequiredAcks = 1
+	RequireAll  RequiredAcks = -1
+)
+
+func (acks RequiredAcks) String() string {
+	switch acks {
+	case RequireNone:
+		return "none"
+	case RequireOne:
+		return "one"
+	case RequireAll:
+		return "all"
+	default:
+		return "unknown"
+	}
+}
+
+func (acks RequiredAcks) MarshalText() ([]byte, error) {
+	return []byte(acks.String()), nil
+}
+
+func (acks *RequiredAcks) UnmarshalText(b []byte) error {
+	switch string(b) {
+	case "none":
+		*acks = RequireNone
+	case "one":
+		*acks = RequireOne
+	case "all":
+		*acks = RequireAll
+	default:
+		x, err := strconv.ParseInt(string(b), 10, 64)
+		parsed := RequiredAcks(x)
+		if err != nil || (parsed != RequireNone && parsed != RequireOne && parsed != RequireAll) {
+			return fmt.Errorf("required acks must be one of none, one, or all, not %q", b)
+		}
+		*acks = parsed
+	}
+	return nil
+}
+
+var (
+	_ encoding.TextMarshaler   = RequiredAcks(0)
+	_ encoding.TextUnmarshaler = (*RequiredAcks)(nil)
+)
+
+// ProduceRequest represents a request sent to a kafka broker to produce records
+// to a topic partition.
+type ProduceRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The topic to produce the records to.
+	Topic string
+
+	// The partition to produce the records to.
+	Partition int
+
+	// The level of required acknowledgements to ask the kafka broker for.
+	RequiredAcks RequiredAcks
+
+	// The message format version used when encoding the records.
+	//
+	// By default, the client automatically determine which version should be
+	// used based on the version of the Produce API supported by the server.
+	MessageVersion int
+
+	// An optional transaction id when producing to the kafka broker is part of
+	// a transaction.
+	TransactionalID string
+
+	// The sequence of records to produce to the topic partition.
+	Records RecordReader
+
+	// An optional compression algorithm to apply to the batch of records sent
+	// to the kafka broker.
+	Compression Compression
+}
+
+// ProduceResponse represents a response from a kafka broker to a produce
+// request.
+type ProduceResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// An error that may have occurred while attempting to produce the records.
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker. Programs may use the standard errors.Is
+	// function to test the error against kafka error codes.
+	Error error
+
+	// Offset of the first record that was written to the topic partition.
+	//
+	// This field will be zero if the kafka broker did not support Produce API
+	// version 3 or above.
+	BaseOffset int64
+
+	// Time at which the broker wrote the records to the topic partition.
+	//
+	// This field will be zero if the kafka broker did not support Produce API
+	// version 2 or above.
+	LogAppendTime time.Time
+
+	// First offset in the topic partition that the records were written to.
+	//
+	// This field will be zero if the kafka broker did not support Produce
+	// API version 5 or above (or if the first offset is zero).
+	LogStartOffset int64
+
+	// If errors occurred writing specific records, they will be reported in
+	// this map.
+	//
+	// This field will always be empty if the kafka broker did not support the
+	// Produce API in version 8 or above.
+	RecordErrors map[int]error
+}
+
+// Produce sends a produce request to a kafka broker and returns the response.
+//
+// If the request contained no records, an error wrapping protocol.ErrNoRecord
+// is returned.
+//
+// When the request is configured with RequiredAcks=none, both the response and
+// the error will be nil on success.
+func (c *Client) Produce(ctx context.Context, req *ProduceRequest) (*ProduceResponse, error) {
+	attributes := protocol.Attributes(req.Compression) & 0x7
+
+	m, err := c.roundTrip(ctx, req.Addr, &produceAPI.Request{
+		TransactionalID: req.TransactionalID,
+		Acks:            int16(req.RequiredAcks),
+		Timeout:         c.timeoutMs(ctx, defaultProduceTimeout),
+		Topics: []produceAPI.RequestTopic{{
+			Topic: req.Topic,
+			Partitions: []produceAPI.RequestPartition{{
+				Partition: int32(req.Partition),
+				RecordSet: protocol.RecordSet{
+					Attributes: attributes,
+					Records:    req.Records,
+				},
+			}},
+		}},
+	})
+
+	switch {
+	case err == nil:
+	case errors.Is(err, protocol.ErrNoRecord):
+		return new(ProduceResponse), nil
+	default:
+		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", err)
+	}
+
+	if req.RequiredAcks == RequireNone {
+		return nil, nil
+	}
+
+	res := m.(*produceAPI.Response)
+	if len(res.Topics) == 0 {
+		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoTopic)
+	}
+	topic := &res.Topics[0]
+	if len(topic.Partitions) == 0 {
+		return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoPartition)
+	}
+	partition := &topic.Partitions[0]
+
+	ret := &ProduceResponse{
+		Throttle:       makeDuration(res.ThrottleTimeMs),
+		Error:          makeError(partition.ErrorCode, partition.ErrorMessage),
+		BaseOffset:     partition.BaseOffset,
+		LogAppendTime:  makeTime(partition.LogAppendTime),
+		LogStartOffset: partition.LogStartOffset,
+	}
+
+	if len(partition.RecordErrors) != 0 {
+		ret.RecordErrors = make(map[int]error, len(partition.RecordErrors))
+
+		for _, recErr := range partition.RecordErrors {
+			ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage)
+		}
+	}
+
+	return ret, nil
+}
 
 type produceRequestV2 struct {
 	RequiredAcks int16
@@ -12,10 +212,10 @@ func (r produceRequestV2) size() int32 {
 	return 2 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
 }
 
-func (r produceRequestV2) writeTo(w *bufio.Writer) {
-	writeInt16(w, r.RequiredAcks)
-	writeInt32(w, r.Timeout)
-	writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) })
+func (r produceRequestV2) writeTo(wb *writeBuffer) {
+	wb.writeInt16(r.RequiredAcks)
+	wb.writeInt32(r.Timeout)
+	wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) })
 }
 
 type produceRequestTopicV2 struct {
@@ -28,9 +228,9 @@ func (t produceRequestTopicV2) size() in
 		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
 }
 
-func (t produceRequestTopicV2) writeTo(w *bufio.Writer) {
-	writeString(w, t.TopicName)
-	writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) })
+func (t produceRequestTopicV2) writeTo(wb *writeBuffer) {
+	wb.writeString(t.TopicName)
+	wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) })
 }
 
 type produceRequestPartitionV2 struct {
@@ -43,39 +243,10 @@ func (p produceRequestPartitionV2) size(
 	return 4 + 4 + p.MessageSet.size()
 }
 
-func (p produceRequestPartitionV2) writeTo(w *bufio.Writer) {
-	writeInt32(w, p.Partition)
-	writeInt32(w, p.MessageSetSize)
-	p.MessageSet.writeTo(w)
-}
-
-type produceResponseV2 struct {
-	ThrottleTime int32
-	Topics       []produceResponseTopicV2
-}
-
-func (r produceResponseV2) size() int32 {
-	return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() })
-}
-
-func (r produceResponseV2) writeTo(w *bufio.Writer) {
-	writeInt32(w, r.ThrottleTime)
-	writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) })
-}
-
-type produceResponseTopicV2 struct {
-	TopicName  string
-	Partitions []produceResponsePartitionV2
-}
-
-func (t produceResponseTopicV2) size() int32 {
-	return sizeofString(t.TopicName) +
-		sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() })
-}
-
-func (t produceResponseTopicV2) writeTo(w *bufio.Writer) {
-	writeString(w, t.TopicName)
-	writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) })
+func (p produceRequestPartitionV2) writeTo(wb *writeBuffer) {
+	wb.writeInt32(p.Partition)
+	wb.writeInt32(p.MessageSetSize)
+	p.MessageSet.writeTo(wb)
 }
 
 type produceResponsePartitionV2 struct {
@@ -89,11 +260,11 @@ func (p produceResponsePartitionV2) size
 	return 4 + 2 + 8 + 8
 }
 
-func (p produceResponsePartitionV2) writeTo(w *bufio.Writer) {
-	writeInt32(w, p.Partition)
-	writeInt16(w, p.ErrorCode)
-	writeInt64(w, p.Offset)
-	writeInt64(w, p.Timestamp)
+func (p produceResponsePartitionV2) writeTo(wb *writeBuffer) {
+	wb.writeInt32(p.Partition)
+	wb.writeInt16(p.ErrorCode)
+	wb.writeInt64(p.Offset)
+	wb.writeInt64(p.Timestamp)
 }
 
 func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
@@ -110,4 +281,43 @@ func (p *produceResponsePartitionV2) rea
 		return
 	}
 	return
+}
+
+type produceResponsePartitionV7 struct {
+	Partition   int32
+	ErrorCode   int16
+	Offset      int64
+	Timestamp   int64
+	StartOffset int64
+}
+
+func (p produceResponsePartitionV7) size() int32 {
+	return 4 + 2 + 8 + 8 + 8
+}
+
+func (p produceResponsePartitionV7) writeTo(wb *writeBuffer) {
+	wb.writeInt32(p.Partition)
+	wb.writeInt16(p.ErrorCode)
+	wb.writeInt64(p.Offset)
+	wb.writeInt64(p.Timestamp)
+	wb.writeInt64(p.StartOffset)
+}
+
+func (p *produceResponsePartitionV7) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
+	if remain, err = readInt32(r, sz, &p.Partition); err != nil {
+		return
+	}
+	if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil {
+		return
+	}
+	if remain, err = readInt64(r, remain, &p.Offset); err != nil {
+		return
+	}
+	if remain, err = readInt64(r, remain, &p.Timestamp); err != nil {
+		return
+	}
+	if remain, err = readInt64(r, remain, &p.StartOffset); err != nil {
+		return
+	}
+	return
 }
diff -pruN 0.2.1-1.1/produce_test.go 0.4.49+ds1-1/produce_test.go
--- 0.2.1-1.1/produce_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/produce_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,135 @@
+package kafka
+
+import (
+	"context"
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/compress"
+)
+
+func TestRequiredAcks(t *testing.T) {
+	for _, acks := range []RequiredAcks{
+		RequireNone,
+		RequireOne,
+		RequireAll,
+	} {
+		t.Run(acks.String(), func(t *testing.T) {
+			a := strconv.Itoa(int(acks))
+			x := RequiredAcks(-2)
+			y := RequiredAcks(-2)
+			b, err := acks.MarshalText()
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			if err := x.UnmarshalText([]byte(a)); err != nil {
+				t.Fatal(err)
+			}
+			if err := y.UnmarshalText(b); err != nil {
+				t.Fatal(err)
+			}
+
+			if x != acks {
+				t.Errorf("required acks mismatch after marshal/unmarshal text: want=%s got=%s", acks, x)
+			}
+			if y != acks {
+				t.Errorf("required acks mismatch after marshal/unmarshal value: want=%s got=%s", acks, y)
+			}
+		})
+	}
+}
+
+func TestClientProduce(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	now := time.Now()
+
+	res, err := client.Produce(context.Background(), &ProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		Records: NewRecordReader(
+			Record{Time: now, Value: NewBytes([]byte(`hello-1`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-2`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-3`))},
+		),
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res.Error != nil {
+		t.Error(res.Error)
+	}
+
+	for index, err := range res.RecordErrors {
+		t.Errorf("record at index %d produced an error: %v", index, err)
+	}
+}
+
+func TestClientProduceCompressed(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	now := time.Now()
+
+	res, err := client.Produce(context.Background(), &ProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		Compression:  compress.Gzip,
+		Records: NewRecordReader(
+			Record{Time: now, Value: NewBytes([]byte(`hello-1`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-2`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-3`))},
+		),
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res.Error != nil {
+		t.Error(res.Error)
+	}
+
+	for index, err := range res.RecordErrors {
+		t.Errorf("record at index %d produced an error: %v", index, err)
+	}
+}
+
+func TestClientProduceNilRecords(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	_, err := client.Produce(context.Background(), &ProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		Records:      nil,
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestClientProduceEmptyRecords(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	_, err := client.Produce(context.Background(), &ProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		Records:      NewRecordReader(),
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff -pruN 0.2.1-1.1/protocol/addoffsetstotxn/addoffsetstotxn.go 0.4.49+ds1-1/protocol/addoffsetstotxn/addoffsetstotxn.go
--- 0.2.1-1.1/protocol/addoffsetstotxn/addoffsetstotxn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/addoffsetstotxn/addoffsetstotxn.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,35 @@
+package addoffsetstotxn
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	ProducerID      int64  `kafka:"min=v0,max=v3"`
+	ProducerEpoch   int16  `kafka:"min=v0,max=v3"`
+	GroupID         string `kafka:"min=v0,max=v3|min=v3,max=v3,compact"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn }
+
+func (r *Request) Transaction() string { return r.TransactionalID }
+
+var _ protocol.TransactionalMessage = (*Request)(nil)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
+	ErrorCode      int16 `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn }
diff -pruN 0.2.1-1.1/protocol/addoffsetstotxn/addoffsetstotxn_test.go 0.4.49+ds1-1/protocol/addoffsetstotxn/addoffsetstotxn_test.go
--- 0.2.1-1.1/protocol/addoffsetstotxn/addoffsetstotxn_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/addoffsetstotxn/addoffsetstotxn_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,28 @@
+package addoffsetstotxn_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/addoffsetstotxn"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestAddOffsetsToTxnRequest(t *testing.T) {
+	for _, version := range []int16{0, 1, 2, 3} {
+		prototest.TestRequest(t, version, &addoffsetstotxn.Request{
+			TransactionalID: "transactional-id-0",
+			ProducerID:      1,
+			ProducerEpoch:   10,
+			GroupID:         "group-id-0",
+		})
+	}
+}
+
+func TestAddOffsetsToTxnResponse(t *testing.T) {
+	for _, version := range []int16{0, 1, 2, 3} {
+		prototest.TestResponse(t, version, &addoffsetstotxn.Response{
+			ThrottleTimeMs: 10,
+			ErrorCode:      1,
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/addpartitionstotxn/addpartitionstotxn.go 0.4.49+ds1-1/protocol/addpartitionstotxn/addpartitionstotxn.go
--- 0.2.1-1.1/protocol/addpartitionstotxn/addpartitionstotxn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/addpartitionstotxn/addpartitionstotxn.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,62 @@
+package addpartitionstotxn
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	TransactionalID string         `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	ProducerID      int64          `kafka:"min=v0,max=v3"`
+	ProducerEpoch   int16          `kafka:"min=v0,max=v3"`
+	Topics          []RequestTopic `kafka:"min=v0,max=v3"`
+}
+
+type RequestTopic struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	Name       string  `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	Partitions []int32 `kafka:"min=v0,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn }
+
+func (r *Request) Transaction() string { return r.TransactionalID }
+
+var _ protocol.TransactionalMessage = (*Request)(nil)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	ThrottleTimeMs int32            `kafka:"min=v0,max=v3"`
+	Results        []ResponseResult `kafka:"min=v0,max=v3"`
+}
+
+type ResponseResult struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	Name    string              `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	Results []ResponsePartition `kafka:"min=v0,max=v3"`
+}
+
+type ResponsePartition struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	PartitionIndex int32 `kafka:"min=v0,max=v3"`
+	ErrorCode      int16 `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn }
diff -pruN 0.2.1-1.1/protocol/addpartitionstotxn/addpartitionstotxn_test.go 0.4.49+ds1-1/protocol/addpartitionstotxn/addpartitionstotxn_test.go
--- 0.2.1-1.1/protocol/addpartitionstotxn/addpartitionstotxn_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/addpartitionstotxn/addpartitionstotxn_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,60 @@
+package addpartitionstotxn_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/addpartitionstotxn"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestAddPartitionsToTxnRequest(t *testing.T) {
+	for _, version := range []int16{0, 1, 2, 3} {
+		prototest.TestRequest(t, version, &addpartitionstotxn.Request{
+			TransactionalID: "transaction-id-0",
+			ProducerID:      10,
+			ProducerEpoch:   100,
+			Topics: []addpartitionstotxn.RequestTopic{
+				{
+					Name:       "topic-1",
+					Partitions: []int32{0, 1, 2, 3},
+				},
+				{
+					Name:       "topic-2",
+					Partitions: []int32{0, 1, 2},
+				},
+			},
+		})
+	}
+}
+
+func TestAddPartitionsToTxnResponse(t *testing.T) {
+	for _, version := range []int16{0, 1, 2, 3} {
+		prototest.TestResponse(t, version, &addpartitionstotxn.Response{
+			ThrottleTimeMs: 20,
+			Results: []addpartitionstotxn.ResponseResult{
+				{
+					Name: "topic-1",
+					Results: []addpartitionstotxn.ResponsePartition{
+						{
+							PartitionIndex: 0,
+							ErrorCode:      19,
+						},
+						{
+							PartitionIndex: 1,
+							ErrorCode:      0,
+						},
+					},
+				},
+				{
+					Name: "topic-2",
+					Results: []addpartitionstotxn.ResponsePartition{
+						{
+							PartitionIndex: 0,
+							ErrorCode:      0,
+						},
+					},
+				},
+			},
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/alterclientquotas/alterclientquotas.go 0.4.49+ds1-1/protocol/alterclientquotas/alterclientquotas.go
--- 0.2.1-1.1/protocol/alterclientquotas/alterclientquotas.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/alterclientquotas/alterclientquotas.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,68 @@
+package alterclientquotas
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterClientQuotas
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_            struct{} `kafka:"min=v1,max=v1,tag"`
+	Entries      []Entry  `kafka:"min=v0,max=v1"`
+	ValidateOnly bool     `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterClientQuotas }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type Entry struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_        struct{} `kafka:"min=v1,max=v1,tag"`
+	Entities []Entity `kafka:"min=v0,max=v1"`
+	Ops      []Ops    `kafka:"min=v0,max=v1"`
+}
+
+type Entity struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_          struct{} `kafka:"min=v1,max=v1,tag"`
+	EntityType string   `kafka:"min=v0,max=v0|min=v1,max=v1,compact"`
+	EntityName string   `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"`
+}
+
+type Ops struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_      struct{} `kafka:"min=v1,max=v1,tag"`
+	Key    string   `kafka:"min=v0,max=v0|min=v1,max=v1,compact"`
+	Value  float64  `kafka:"min=v0,max=v1"`
+	Remove bool     `kafka:"min=v0,max=v1"`
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_              struct{}         `kafka:"min=v1,max=v1,tag"`
+	ThrottleTimeMs int32            `kafka:"min=v0,max=v1"`
+	Results        []ResponseQuotas `kafka:"min=v0,max=v1"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterClientQuotas }
+
+type ResponseQuotas struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_            struct{} `kafka:"min=v1,max=v1,tag"`
+	ErrorCode    int16    `kafka:"min=v0,max=v1"`
+	ErrorMessage string   `kafka:"min=v0,max=v1,nullable"`
+	Entities     []Entity `kafka:"min=v0,max=v1"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/alterclientquotas/alterclientquotas_test.go 0.4.49+ds1-1/protocol/alterclientquotas/alterclientquotas_test.go
--- 0.2.1-1.1/protocol/alterclientquotas/alterclientquotas_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/alterclientquotas/alterclientquotas_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,91 @@
+package alterclientquotas_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/alterclientquotas"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+)
+
+func TestAlterClientQuotasRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &alterclientquotas.Request{
+		ValidateOnly: true,
+		Entries: []alterclientquotas.Entry{
+			{
+				Entities: []alterclientquotas.Entity{
+					{
+						EntityType: "client-id",
+						EntityName: "my-client-id",
+					},
+				},
+				Ops: []alterclientquotas.Ops{
+					{
+						Key:    "producer_byte_rate",
+						Value:  1.0,
+						Remove: false,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v1, &alterclientquotas.Request{
+		ValidateOnly: true,
+		Entries: []alterclientquotas.Entry{
+			{
+				Entities: []alterclientquotas.Entity{
+					{
+						EntityType: "client-id",
+						EntityName: "my-client-id",
+					},
+				},
+				Ops: []alterclientquotas.Ops{
+					{
+						Key:    "producer_byte_rate",
+						Value:  1.0,
+						Remove: false,
+					},
+				},
+			},
+		},
+	})
+}
+
+func TestAlterClientQuotasResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &alterclientquotas.Response{
+		ThrottleTimeMs: 500,
+		Results: []alterclientquotas.ResponseQuotas{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+				Entities: []alterclientquotas.Entity{
+					{
+						EntityType: "client-id",
+						EntityName: "my-client-id",
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &alterclientquotas.Response{
+		ThrottleTimeMs: 500,
+		Results: []alterclientquotas.ResponseQuotas{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+				Entities: []alterclientquotas.Entity{
+					{
+						EntityType: "client-id",
+						EntityName: "my-client-id",
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/alterconfigs/alterconfigs.go 0.4.49+ds1-1/protocol/alterconfigs/alterconfigs.go
--- 0.2.1-1.1/protocol/alterconfigs/alterconfigs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/alterconfigs/alterconfigs.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,48 @@
+package alterconfigs
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterConfigs
+type Request struct {
+	Resources    []RequestResources `kafka:"min=v0,max=v1"`
+	ValidateOnly bool               `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterConfigs }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestResources struct {
+	ResourceType int8            `kafka:"min=v0,max=v1"`
+	ResourceName string          `kafka:"min=v0,max=v1"`
+	Configs      []RequestConfig `kafka:"min=v0,max=v1"`
+}
+
+type RequestConfig struct {
+	Name  string `kafka:"min=v0,max=v1"`
+	Value string `kafka:"min=v0,max=v1,nullable"`
+}
+
+type Response struct {
+	ThrottleTimeMs int32               `kafka:"min=v0,max=v1"`
+	Responses      []ResponseResponses `kafka:"min=v0,max=v1"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterConfigs }
+
+type ResponseResponses struct {
+	ErrorCode    int16  `kafka:"min=v0,max=v1"`
+	ErrorMessage string `kafka:"min=v0,max=v1,nullable"`
+	ResourceType int8   `kafka:"min=v0,max=v1"`
+	ResourceName string `kafka:"min=v0,max=v1"`
+}
+
+var (
+	_ protocol.BrokerMessage = (*Request)(nil)
+)
diff -pruN 0.2.1-1.1/protocol/alterconfigs/alterconfigs_test.go 0.4.49+ds1-1/protocol/alterconfigs/alterconfigs_test.go
--- 0.2.1-1.1/protocol/alterconfigs/alterconfigs_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/alterconfigs/alterconfigs_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,73 @@
+package alterconfigs_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/alterconfigs"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+)
+
+func TestAlterConfigsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &alterconfigs.Request{
+		ValidateOnly: true,
+		Resources: []alterconfigs.RequestResources{
+			{
+				ResourceType: 1,
+				ResourceName: "foo",
+				Configs: []alterconfigs.RequestConfig{
+					{
+						Name:  "foo",
+						Value: "foo",
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v1, &alterconfigs.Request{
+		ValidateOnly: true,
+		Resources: []alterconfigs.RequestResources{
+			{
+				ResourceType: 1,
+				ResourceName: "foo",
+				Configs: []alterconfigs.RequestConfig{
+					{
+						Name:  "foo",
+						Value: "foo",
+					},
+				},
+			},
+		},
+	})
+}
+
+func TestAlterConfigsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &alterconfigs.Response{
+		ThrottleTimeMs: 500,
+		Responses: []alterconfigs.ResponseResponses{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+				ResourceType: 1,
+				ResourceName: "foo",
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &alterconfigs.Response{
+		ThrottleTimeMs: 500,
+		Responses: []alterconfigs.ResponseResponses{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+				ResourceType: 1,
+				ResourceName: "foo",
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/alterpartitionreassignments/alterpartitionreassignments.go 0.4.49+ds1-1/protocol/alterpartitionreassignments/alterpartitionreassignments.go
--- 0.2.1-1.1/protocol/alterpartitionreassignments/alterpartitionreassignments.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/alterpartitionreassignments/alterpartitionreassignments.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,61 @@
+package alterpartitionreassignments
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterPartitionReassignments
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	TimeoutMs int32          `kafka:"min=v0,max=v0"`
+	Topics    []RequestTopic `kafka:"min=v0,max=v0"`
+}
+
+type RequestTopic struct {
+	Name       string             `kafka:"min=v0,max=v0"`
+	Partitions []RequestPartition `kafka:"min=v0,max=v0"`
+}
+
+type RequestPartition struct {
+	PartitionIndex int32   `kafka:"min=v0,max=v0"`
+	Replicas       []int32 `kafka:"min=v0,max=v0,nullable"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey {
+	return protocol.AlterPartitionReassignments
+}
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	ThrottleTimeMs int32            `kafka:"min=v0,max=v0"`
+	ErrorCode      int16            `kafka:"min=v0,max=v0"`
+	ErrorMessage   string           `kafka:"min=v0,max=v0,nullable"`
+	Results        []ResponseResult `kafka:"min=v0,max=v0"`
+}
+
+type ResponseResult struct {
+	Name       string              `kafka:"min=v0,max=v0"`
+	Partitions []ResponsePartition `kafka:"min=v0,max=v0"`
+}
+
+type ResponsePartition struct {
+	PartitionIndex int32  `kafka:"min=v0,max=v0"`
+	ErrorCode      int16  `kafka:"min=v0,max=v0"`
+	ErrorMessage   string `kafka:"min=v0,max=v0,nullable"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey {
+	return protocol.AlterPartitionReassignments
+}
diff -pruN 0.2.1-1.1/protocol/alterpartitionreassignments/alterpartitionreassignments_test.go 0.4.49+ds1-1/protocol/alterpartitionreassignments/alterpartitionreassignments_test.go
--- 0.2.1-1.1/protocol/alterpartitionreassignments/alterpartitionreassignments_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/alterpartitionreassignments/alterpartitionreassignments_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,55 @@
+package alterpartitionreassignments_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/alterpartitionreassignments"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+)
+
+func TestAlterPartitionReassignmentsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &alterpartitionreassignments.Request{
+		TimeoutMs: 1,
+		Topics: []alterpartitionreassignments.RequestTopic{
+			{
+				Name: "topic-1",
+				Partitions: []alterpartitionreassignments.RequestPartition{
+					{
+						PartitionIndex: 1,
+						Replicas:       []int32{1, 2, 3},
+					},
+					{
+						PartitionIndex: 2,
+					},
+				},
+			},
+		},
+	})
+}
+
+func TestAlterPartitionReassignmentsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &alterpartitionreassignments.Response{
+		ErrorCode:      1,
+		ErrorMessage:   "error",
+		ThrottleTimeMs: 1,
+		Results: []alterpartitionreassignments.ResponseResult{
+			{
+				Name: "topic-1",
+				Partitions: []alterpartitionreassignments.ResponsePartition{
+					{
+						PartitionIndex: 1,
+						ErrorMessage:   "error",
+						ErrorCode:      1,
+					},
+					{
+						PartitionIndex: 2,
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/alteruserscramcredentials/alteruserscramcredentials.go 0.4.49+ds1-1/protocol/alteruserscramcredentials/alteruserscramcredentials.go
--- 0.2.1-1.1/protocol/alteruserscramcredentials/alteruserscramcredentials.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/alteruserscramcredentials/alteruserscramcredentials.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,66 @@
+package alteruserscramcredentials
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	Deletions  []RequestUserScramCredentialsDeletion  `kafka:"min=v0,max=v0"`
+	Upsertions []RequestUserScramCredentialsUpsertion `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterUserScramCredentials }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestUserScramCredentialsDeletion struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	Name      string `kafka:"min=v0,max=v0,compact"`
+	Mechanism int8   `kafka:"min=v0,max=v0"`
+}
+
+type RequestUserScramCredentialsUpsertion struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	Name           string `kafka:"min=v0,max=v0,compact"`
+	Mechanism      int8   `kafka:"min=v0,max=v0"`
+	Iterations     int32  `kafka:"min=v0,max=v0"`
+	Salt           []byte `kafka:"min=v0,max=v0,compact"`
+	SaltedPassword []byte `kafka:"min=v0,max=v0,compact"`
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	ThrottleTimeMs int32                          `kafka:"min=v0,max=v0"`
+	Results        []ResponseUserScramCredentials `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterUserScramCredentials }
+
+type ResponseUserScramCredentials struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	User         string `kafka:"min=v0,max=v0,compact"`
+	ErrorCode    int16  `kafka:"min=v0,max=v0"`
+	ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/alteruserscramcredentials/alteruserscramcredentials_test.go 0.4.49+ds1-1/protocol/alteruserscramcredentials/alteruserscramcredentials_test.go
--- 0.2.1-1.1/protocol/alteruserscramcredentials/alteruserscramcredentials_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/alteruserscramcredentials/alteruserscramcredentials_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,45 @@
+package alteruserscramcredentials_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/alteruserscramcredentials"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+)
+
+func TestAlterUserScramCredentialsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &alteruserscramcredentials.Request{
+		Deletions: []alteruserscramcredentials.RequestUserScramCredentialsDeletion{
+			{
+				Name:      "foo-1",
+				Mechanism: 1,
+			},
+		},
+		Upsertions: []alteruserscramcredentials.RequestUserScramCredentialsUpsertion{
+			{
+				Name:           "foo-2",
+				Mechanism:      2,
+				Iterations:     15000,
+				Salt:           []byte("my-salt"),
+				SaltedPassword: []byte("my-salted-password"),
+			},
+		},
+	})
+}
+
+func TestAlterUserScramCredentialsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &alteruserscramcredentials.Response{
+		ThrottleTimeMs: 500,
+		Results: []alteruserscramcredentials.ResponseUserScramCredentials{
+			{
+				User:         "foo",
+				ErrorCode:    1,
+				ErrorMessage: "foo-error",
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/apiversions/apiversions.go 0.4.49+ds1-1/protocol/apiversions/apiversions.go
--- 0.2.1-1.1/protocol/apiversions/apiversions.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/apiversions/apiversions.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,27 @@
+package apiversions
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	_ struct{} `kafka:"min=v0,max=v2"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.ApiVersions }
+
+type Response struct {
+	ErrorCode      int16            `kafka:"min=v0,max=v2"`
+	ApiKeys        []ApiKeyResponse `kafka:"min=v0,max=v2"`
+	ThrottleTimeMs int32            `kafka:"min=v1,max=v2"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.ApiVersions }
+
+type ApiKeyResponse struct {
+	ApiKey     int16 `kafka:"min=v0,max=v2"`
+	MinVersion int16 `kafka:"min=v0,max=v2"`
+	MaxVersion int16 `kafka:"min=v0,max=v2"`
+}
diff -pruN 0.2.1-1.1/protocol/apiversions/apiversions_test.go 0.4.49+ds1-1/protocol/apiversions/apiversions_test.go
--- 0.2.1-1.1/protocol/apiversions/apiversions_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/apiversions/apiversions_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,59 @@
+package apiversions_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/apiversions"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+	v2 = 2
+)
+
+func TestApiversionsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &apiversions.Request{})
+
+	prototest.TestRequest(t, v1, &apiversions.Request{})
+
+	prototest.TestRequest(t, v2, &apiversions.Request{})
+}
+
+func TestApiversionsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &apiversions.Response{
+		ErrorCode: 0,
+		ApiKeys: []apiversions.ApiKeyResponse{
+			{
+				ApiKey:     0,
+				MinVersion: 0,
+				MaxVersion: 2,
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &apiversions.Response{
+		ErrorCode: 0,
+		ApiKeys: []apiversions.ApiKeyResponse{
+			{
+				ApiKey:     0,
+				MinVersion: 0,
+				MaxVersion: 2,
+			},
+		},
+		ThrottleTimeMs: 10,
+	})
+
+	prototest.TestResponse(t, v2, &apiversions.Response{
+		ErrorCode: 0,
+		ApiKeys: []apiversions.ApiKeyResponse{
+			{
+				ApiKey:     0,
+				MinVersion: 0,
+				MaxVersion: 2,
+			},
+		},
+		ThrottleTimeMs: 50,
+	})
+}
diff -pruN 0.2.1-1.1/protocol/buffer.go 0.4.49+ds1-1/protocol/buffer.go
--- 0.2.1-1.1/protocol/buffer.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/buffer.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,634 @@
+package protocol
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+// Bytes is an interface implemented by types that represent immutable
+// sequences of bytes.
+//
+// Bytes values are used to abstract the location where record keys and
+// values are read from (e.g. in-memory buffers, network sockets, files).
+//
+// The Close method should be called to release resources held by the object
+// when the program is done with it.
+//
+// Bytes values are generally not safe to use concurrently from multiple
+// goroutines.
+type Bytes interface {
+	io.ReadCloser
+	// Returns the number of bytes remaining to be read from the payload.
+	Len() int
+}
+
+// NewBytes constructs a Bytes value from b.
+//
+// The returned value references b, it does not make a copy of the backing
+// array.
+//
+// If b is nil, nil is returned to represent a null BYTES value in the kafka
+// protocol.
+func NewBytes(b []byte) Bytes {
+	if b == nil {
+		return nil
+	}
+	r := new(bytesReader)
+	r.Reset(b)
+	return r
+}
+
+// ReadAll is similar to ioutil.ReadAll, but it takes advantage of knowing the
+// length of b to minimize the memory footprint.
+//
+// The function returns a nil slice if b is nil.
+func ReadAll(b Bytes) ([]byte, error) {
+	if b == nil {
+		return nil, nil
+	}
+	s := make([]byte, b.Len())
+	_, err := io.ReadFull(b, s)
+	return s, err
+}
+
+type bytesReader struct{ bytes.Reader }
+
+func (*bytesReader) Close() error { return nil }
+
+type refCount uintptr
+
+func (rc *refCount) ref() { atomic.AddUintptr((*uintptr)(rc), 1) }
+
+func (rc *refCount) unref(onZero func()) {
+	if atomic.AddUintptr((*uintptr)(rc), ^uintptr(0)) == 0 {
+		onZero()
+	}
+}
+
+const (
+	// Size of the memory buffer for a single page. We use a farily
+	// large size here (64 KiB) because batches exchanged with kafka
+	// tend to be multiple kilobytes in size, sometimes hundreds.
+	// Using large pages amortizes the overhead of the page metadata
+	// and algorithms to manage the pages.
+	pageSize = 65536
+)
+
+type page struct {
+	refc   refCount
+	offset int64
+	length int
+	buffer *[pageSize]byte
+}
+
+func newPage(offset int64) *page {
+	p, _ := pagePool.Get().(*page)
+	if p != nil {
+		p.offset = offset
+		p.length = 0
+		p.ref()
+	} else {
+		p = &page{
+			refc:   1,
+			offset: offset,
+			buffer: &[pageSize]byte{},
+		}
+	}
+	return p
+}
+
+func (p *page) ref() { p.refc.ref() }
+
+func (p *page) unref() { p.refc.unref(func() { pagePool.Put(p) }) }
+
+func (p *page) slice(begin, end int64) []byte {
+	i, j := begin-p.offset, end-p.offset
+
+	if i < 0 {
+		i = 0
+	} else if i > pageSize {
+		i = pageSize
+	}
+
+	if j < 0 {
+		j = 0
+	} else if j > pageSize {
+		j = pageSize
+	}
+
+	if i < j {
+		return p.buffer[i:j]
+	}
+
+	return nil
+}
+
+func (p *page) Cap() int { return pageSize }
+
+func (p *page) Len() int { return p.length }
+
+func (p *page) Size() int64 { return int64(p.length) }
+
+func (p *page) Truncate(n int) {
+	if n < p.length {
+		p.length = n
+	}
+}
+
+func (p *page) ReadAt(b []byte, off int64) (int, error) {
+	if off -= p.offset; off < 0 || off > pageSize {
+		panic("offset out of range")
+	}
+	if off > int64(p.length) {
+		return 0, nil
+	}
+	return copy(b, p.buffer[off:p.length]), nil
+}
+
+func (p *page) ReadFrom(r io.Reader) (int64, error) {
+	n, err := io.ReadFull(r, p.buffer[p.length:])
+	if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
+		err = nil
+	}
+	p.length += n
+	return int64(n), err
+}
+
+func (p *page) WriteAt(b []byte, off int64) (int, error) {
+	if off -= p.offset; off < 0 || off > pageSize {
+		panic("offset out of range")
+	}
+	n := copy(p.buffer[off:], b)
+	if end := int(off) + n; end > p.length {
+		p.length = end
+	}
+	return n, nil
+}
+
+func (p *page) Write(b []byte) (int, error) {
+	return p.WriteAt(b, p.offset+int64(p.length))
+}
+
+var (
+	_ io.ReaderAt   = (*page)(nil)
+	_ io.ReaderFrom = (*page)(nil)
+	_ io.Writer     = (*page)(nil)
+	_ io.WriterAt   = (*page)(nil)
+)
+
+type pageBuffer struct {
+	refc   refCount
+	pages  contiguousPages
+	length int
+	cursor int
+}
+
+func newPageBuffer() *pageBuffer {
+	b, _ := pageBufferPool.Get().(*pageBuffer)
+	if b != nil {
+		b.cursor = 0
+		b.refc.ref()
+	} else {
+		b = &pageBuffer{
+			refc:  1,
+			pages: make(contiguousPages, 0, 16),
+		}
+	}
+	return b
+}
+
+func (pb *pageBuffer) refTo(ref *pageRef, begin, end int64) {
+	length := end - begin
+
+	if length > math.MaxUint32 {
+		panic("reference to contiguous buffer pages exceeds the maximum size of 4 GB")
+	}
+
+	ref.pages = append(ref.buffer[:0], pb.pages.slice(begin, end)...)
+	ref.pages.ref()
+	ref.offset = begin
+	ref.length = uint32(length)
+}
+
+func (pb *pageBuffer) ref(begin, end int64) *pageRef {
+	ref := new(pageRef)
+	pb.refTo(ref, begin, end)
+	return ref
+}
+
+func (pb *pageBuffer) unref() {
+	pb.refc.unref(func() {
+		pb.pages.unref()
+		pb.pages.clear()
+		pb.pages = pb.pages[:0]
+		pb.length = 0
+		pageBufferPool.Put(pb)
+	})
+}
+
+func (pb *pageBuffer) newPage() *page {
+	return newPage(int64(pb.length))
+}
+
+func (pb *pageBuffer) Close() error {
+	return nil
+}
+
+func (pb *pageBuffer) Len() int {
+	return pb.length - pb.cursor
+}
+
+func (pb *pageBuffer) Size() int64 {
+	return int64(pb.length)
+}
+
+func (pb *pageBuffer) Discard(n int) (int, error) {
+	remain := pb.length - pb.cursor
+	if remain < n {
+		n = remain
+	}
+	pb.cursor += n
+	return n, nil
+}
+
+func (pb *pageBuffer) Truncate(n int) {
+	if n < pb.length {
+		pb.length = n
+
+		if n < pb.cursor {
+			pb.cursor = n
+		}
+
+		for i := range pb.pages {
+			if p := pb.pages[i]; p.length <= n {
+				n -= p.length
+			} else {
+				if n > 0 {
+					pb.pages[i].Truncate(n)
+					i++
+				}
+				pb.pages[i:].unref()
+				pb.pages[i:].clear()
+				pb.pages = pb.pages[:i]
+				break
+			}
+		}
+	}
+}
+
+func (pb *pageBuffer) Seek(offset int64, whence int) (int64, error) {
+	c, err := seek(int64(pb.cursor), int64(pb.length), offset, whence)
+	if err != nil {
+		return -1, err
+	}
+	pb.cursor = int(c)
+	return c, nil
+}
+
+func (pb *pageBuffer) ReadByte() (byte, error) {
+	b := [1]byte{}
+	_, err := pb.Read(b[:])
+	return b[0], err
+}
+
+func (pb *pageBuffer) Read(b []byte) (int, error) {
+	if pb.cursor >= pb.length {
+		return 0, io.EOF
+	}
+	n, err := pb.ReadAt(b, int64(pb.cursor))
+	pb.cursor += n
+	return n, err
+}
+
+func (pb *pageBuffer) ReadAt(b []byte, off int64) (int, error) {
+	return pb.pages.ReadAt(b, off)
+}
+
+func (pb *pageBuffer) ReadFrom(r io.Reader) (int64, error) {
+	if len(pb.pages) == 0 {
+		pb.pages = append(pb.pages, pb.newPage())
+	}
+
+	rn := int64(0)
+
+	for {
+		tail := pb.pages[len(pb.pages)-1]
+		free := tail.Cap() - tail.Len()
+
+		if free == 0 {
+			tail = pb.newPage()
+			free = pageSize
+			pb.pages = append(pb.pages, tail)
+		}
+
+		n, err := tail.ReadFrom(r)
+		pb.length += int(n)
+		rn += n
+		if n < int64(free) {
+			return rn, err
+		}
+	}
+}
+
+func (pb *pageBuffer) WriteString(s string) (int, error) {
+	return pb.Write([]byte(s))
+}
+
+func (pb *pageBuffer) Write(b []byte) (int, error) {
+	wn := len(b)
+	if wn == 0 {
+		return 0, nil
+	}
+
+	if len(pb.pages) == 0 {
+		pb.pages = append(pb.pages, pb.newPage())
+	}
+
+	for len(b) != 0 {
+		tail := pb.pages[len(pb.pages)-1]
+		free := tail.Cap() - tail.Len()
+
+		if len(b) <= free {
+			tail.Write(b)
+			pb.length += len(b)
+			break
+		}
+
+		tail.Write(b[:free])
+		b = b[free:]
+
+		pb.length += free
+		pb.pages = append(pb.pages, pb.newPage())
+	}
+
+	return wn, nil
+}
+
+func (pb *pageBuffer) WriteAt(b []byte, off int64) (int, error) {
+	n, err := pb.pages.WriteAt(b, off)
+	if err != nil {
+		return n, err
+	}
+	if n < len(b) {
+		pb.Write(b[n:])
+	}
+	return len(b), nil
+}
+
+func (pb *pageBuffer) WriteTo(w io.Writer) (int64, error) {
+	var wn int
+	var err error
+	pb.pages.scan(int64(pb.cursor), int64(pb.length), func(b []byte) bool {
+		var n int
+		n, err = w.Write(b)
+		wn += n
+		return err == nil
+	})
+	pb.cursor += wn
+	return int64(wn), err
+}
+
+var (
+	_ io.ReaderAt     = (*pageBuffer)(nil)
+	_ io.ReaderFrom   = (*pageBuffer)(nil)
+	_ io.StringWriter = (*pageBuffer)(nil)
+	_ io.Writer       = (*pageBuffer)(nil)
+	_ io.WriterAt     = (*pageBuffer)(nil)
+	_ io.WriterTo     = (*pageBuffer)(nil)
+
+	pagePool       sync.Pool
+	pageBufferPool sync.Pool
+)
+
+type contiguousPages []*page
+
+func (pages contiguousPages) ref() {
+	for _, p := range pages {
+		p.ref()
+	}
+}
+
+func (pages contiguousPages) unref() {
+	for _, p := range pages {
+		p.unref()
+	}
+}
+
+func (pages contiguousPages) clear() {
+	for i := range pages {
+		pages[i] = nil
+	}
+}
+
+func (pages contiguousPages) ReadAt(b []byte, off int64) (int, error) {
+	rn := 0
+
+	for _, p := range pages.slice(off, off+int64(len(b))) {
+		n, _ := p.ReadAt(b, off)
+		b = b[n:]
+		rn += n
+		off += int64(n)
+	}
+
+	return rn, nil
+}
+
+func (pages contiguousPages) WriteAt(b []byte, off int64) (int, error) {
+	wn := 0
+
+	for _, p := range pages.slice(off, off+int64(len(b))) {
+		n, _ := p.WriteAt(b, off)
+		b = b[n:]
+		wn += n
+		off += int64(n)
+	}
+
+	return wn, nil
+}
+
+func (pages contiguousPages) slice(begin, end int64) contiguousPages {
+	i := pages.indexOf(begin)
+	j := pages.indexOf(end)
+	if j < len(pages) {
+		j++
+	}
+	return pages[i:j]
+}
+
+func (pages contiguousPages) indexOf(offset int64) int {
+	if len(pages) == 0 {
+		return 0
+	}
+	return int((offset - pages[0].offset) / pageSize)
+}
+
+func (pages contiguousPages) scan(begin, end int64, f func([]byte) bool) {
+	for _, p := range pages.slice(begin, end) {
+		if !f(p.slice(begin, end)) {
+			break
+		}
+	}
+}
+
+var (
+	_ io.ReaderAt = contiguousPages{}
+	_ io.WriterAt = contiguousPages{}
+)
+
+type pageRef struct {
+	buffer [2]*page
+	pages  contiguousPages
+	offset int64
+	cursor int64
+	length uint32
+	once   uint32
+}
+
+func (ref *pageRef) unref() {
+	if atomic.CompareAndSwapUint32(&ref.once, 0, 1) {
+		ref.pages.unref()
+		ref.pages.clear()
+		ref.pages = nil
+		ref.offset = 0
+		ref.cursor = 0
+		ref.length = 0
+	}
+}
+
+func (ref *pageRef) Len() int { return int(ref.Size() - ref.cursor) }
+
+func (ref *pageRef) Size() int64 { return int64(ref.length) }
+
+func (ref *pageRef) Close() error { ref.unref(); return nil }
+
+func (ref *pageRef) String() string {
+	return fmt.Sprintf("[offset=%d cursor=%d length=%d]", ref.offset, ref.cursor, ref.length)
+}
+
+func (ref *pageRef) Seek(offset int64, whence int) (int64, error) {
+	c, err := seek(ref.cursor, int64(ref.length), offset, whence)
+	if err != nil {
+		return -1, err
+	}
+	ref.cursor = c
+	return c, nil
+}
+
+func (ref *pageRef) ReadByte() (byte, error) {
+	var c byte
+	var ok bool
+	ref.scan(ref.cursor, func(b []byte) bool {
+		c, ok = b[0], true
+		return false
+	})
+	if ok {
+		ref.cursor++
+	} else {
+		return 0, io.EOF
+	}
+	return c, nil
+}
+
+func (ref *pageRef) Read(b []byte) (int, error) {
+	if ref.cursor >= int64(ref.length) {
+		return 0, io.EOF
+	}
+	n, err := ref.ReadAt(b, ref.cursor)
+	ref.cursor += int64(n)
+	return n, err
+}
+
+func (ref *pageRef) ReadAt(b []byte, off int64) (int, error) {
+	limit := ref.offset + int64(ref.length)
+	off += ref.offset
+
+	if off >= limit {
+		return 0, io.EOF
+	}
+
+	if off+int64(len(b)) > limit {
+		b = b[:limit-off]
+	}
+
+	if len(b) == 0 {
+		return 0, nil
+	}
+
+	n, err := ref.pages.ReadAt(b, off)
+	if n == 0 && err == nil {
+		err = io.EOF
+	}
+	return n, err
+}
+
+func (ref *pageRef) WriteTo(w io.Writer) (wn int64, err error) {
+	ref.scan(ref.cursor, func(b []byte) bool {
+		var n int
+		n, err = w.Write(b)
+		wn += int64(n)
+		return err == nil
+	})
+	ref.cursor += wn
+	return
+}
+
+func (ref *pageRef) scan(off int64, f func([]byte) bool) {
+	begin := ref.offset + off
+	end := ref.offset + int64(ref.length)
+	ref.pages.scan(begin, end, f)
+}
+
+var (
+	_ io.Closer   = (*pageRef)(nil)
+	_ io.Seeker   = (*pageRef)(nil)
+	_ io.Reader   = (*pageRef)(nil)
+	_ io.ReaderAt = (*pageRef)(nil)
+	_ io.WriterTo = (*pageRef)(nil)
+)
+
+type pageRefAllocator struct {
+	refs []pageRef
+	head int
+	size int
+}
+
+func (a *pageRefAllocator) newPageRef() *pageRef {
+	if a.head == len(a.refs) {
+		a.refs = make([]pageRef, a.size)
+		a.head = 0
+	}
+	ref := &a.refs[a.head]
+	a.head++
+	return ref
+}
+
+func seek(cursor, limit, offset int64, whence int) (int64, error) {
+	switch whence {
+	case io.SeekStart:
+		// absolute offset
+	case io.SeekCurrent:
+		offset = cursor + offset
+	case io.SeekEnd:
+		offset = limit - offset
+	default:
+		return -1, fmt.Errorf("seek: invalid whence value: %d", whence)
+	}
+	if offset < 0 {
+		offset = 0
+	}
+	if offset > limit {
+		offset = limit
+	}
+	return offset, nil
+}
+
+func closeBytes(b Bytes) {
+	if b != nil {
+		b.Close()
+	}
+}
diff -pruN 0.2.1-1.1/protocol/buffer_test.go 0.4.49+ds1-1/protocol/buffer_test.go
--- 0.2.1-1.1/protocol/buffer_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/buffer_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,108 @@
+package protocol
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+	"testing"
+)
+
+func TestPageBufferWriteReadSeek(t *testing.T) {
+	buffer := newPageBuffer()
+	defer buffer.unref()
+
+	io.WriteString(buffer, "Hello World!")
+
+	if n := buffer.Size(); n != 12 {
+		t.Fatal("invalid size:", n)
+	}
+
+	for i := 0; i < 3; i++ {
+		if n := buffer.Len(); n != 12 {
+			t.Fatal("invalid length before read:", n)
+		}
+
+		b, err := ioutil.ReadAll(buffer)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if n := buffer.Len(); n != 0 {
+			t.Fatal("invalid length after read:", n)
+		}
+
+		if string(b) != "Hello World!" {
+			t.Fatalf("invalid content after read #%d: %q", i, b)
+		}
+
+		offset, err := buffer.Seek(0, io.SeekStart)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if offset != 0 {
+			t.Fatalf("invalid offset after seek #%d: %d", i, offset)
+		}
+	}
+}
+
+func TestPageRefWriteReadSeek(t *testing.T) {
+	buffer := newPageBuffer()
+	defer buffer.unref()
+
+	io.WriteString(buffer, "Hello World!")
+
+	ref := buffer.ref(1, 11)
+	defer ref.unref()
+
+	if n := ref.Size(); n != 10 {
+		t.Fatal("invalid size:", n)
+	}
+
+	for i := 0; i < 3; i++ {
+		if n := ref.Len(); n != 10 {
+			t.Fatal("invalid length before read:", n)
+		}
+
+		b, err := ioutil.ReadAll(ref)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if n := ref.Len(); n != 0 {
+			t.Fatal("invalid length after read:", n)
+		}
+
+		if string(b) != "ello World" {
+			t.Fatalf("invalid content after read #%d: %q", i, b)
+		}
+
+		offset, err := ref.Seek(0, io.SeekStart)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if offset != 0 {
+			t.Fatalf("invalid offset after seek #%d: %d", i, offset)
+		}
+	}
+}
+
+func TestPageRefReadByte(t *testing.T) {
+	buffer := newPageBuffer()
+	defer buffer.unref()
+
+	content := bytes.Repeat([]byte("1234567890"), 10e3)
+	buffer.Write(content)
+
+	ref := buffer.ref(0, buffer.Size())
+	defer ref.unref()
+
+	for i, c := range content {
+		b, err := ref.ReadByte()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if b != c {
+			t.Fatalf("byte at offset %d mismatch, expected '%c' but got '%c'", i, c, b)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/protocol/cluster.go 0.4.49+ds1-1/protocol/cluster.go
--- 0.2.1-1.1/protocol/cluster.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/cluster.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,143 @@
+package protocol
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+	"text/tabwriter"
+)
+
+type Cluster struct {
+	ClusterID  string
+	Controller int32
+	Brokers    map[int32]Broker
+	Topics     map[string]Topic
+}
+
+func (c Cluster) BrokerIDs() []int32 {
+	brokerIDs := make([]int32, 0, len(c.Brokers))
+	for id := range c.Brokers {
+		brokerIDs = append(brokerIDs, id)
+	}
+	sort.Slice(brokerIDs, func(i, j int) bool {
+		return brokerIDs[i] < brokerIDs[j]
+	})
+	return brokerIDs
+}
+
+func (c Cluster) TopicNames() []string {
+	topicNames := make([]string, 0, len(c.Topics))
+	for name := range c.Topics {
+		topicNames = append(topicNames, name)
+	}
+	sort.Strings(topicNames)
+	return topicNames
+}
+
+func (c Cluster) IsZero() bool {
+	return c.ClusterID == "" && c.Controller == 0 && len(c.Brokers) == 0 && len(c.Topics) == 0
+}
+
+func (c Cluster) Format(w fmt.State, _ rune) {
+	tw := new(tabwriter.Writer)
+	fmt.Fprintf(w, "CLUSTER: %q\n\n", c.ClusterID)
+
+	tw.Init(w, 0, 8, 2, ' ', 0)
+	fmt.Fprint(tw, "  BROKER\tHOST\tPORT\tRACK\tCONTROLLER\n")
+
+	for _, id := range c.BrokerIDs() {
+		broker := c.Brokers[id]
+		fmt.Fprintf(tw, "  %d\t%s\t%d\t%s\t%t\n", broker.ID, broker.Host, broker.Port, broker.Rack, broker.ID == c.Controller)
+	}
+
+	tw.Flush()
+	fmt.Fprintln(w)
+
+	tw.Init(w, 0, 8, 2, ' ', 0)
+	fmt.Fprint(tw, "  TOPIC\tPARTITIONS\tBROKERS\n")
+	topicNames := c.TopicNames()
+	brokers := make(map[int32]struct{}, len(c.Brokers))
+	brokerIDs := make([]int32, 0, len(c.Brokers))
+
+	for _, name := range topicNames {
+		topic := c.Topics[name]
+
+		for _, p := range topic.Partitions {
+			for _, id := range p.Replicas {
+				brokers[id] = struct{}{}
+			}
+		}
+
+		for id := range brokers {
+			brokerIDs = append(brokerIDs, id)
+		}
+
+		fmt.Fprintf(tw, "  %s\t%d\t%s\n", topic.Name, len(topic.Partitions), formatBrokerIDs(brokerIDs, -1))
+
+		for id := range brokers {
+			delete(brokers, id)
+		}
+
+		brokerIDs = brokerIDs[:0]
+	}
+
+	tw.Flush()
+	fmt.Fprintln(w)
+
+	if w.Flag('+') {
+		for _, name := range topicNames {
+			fmt.Fprintf(w, "  TOPIC: %q\n\n", name)
+
+			tw.Init(w, 0, 8, 2, ' ', 0)
+			fmt.Fprint(tw, "    PARTITION\tREPLICAS\tISR\tOFFLINE\n")
+
+			for _, p := range c.Topics[name].Partitions {
+				fmt.Fprintf(tw, "    %d\t%s\t%s\t%s\n", p.ID,
+					formatBrokerIDs(p.Replicas, -1),
+					formatBrokerIDs(p.ISR, p.Leader),
+					formatBrokerIDs(p.Offline, -1),
+				)
+			}
+
+			tw.Flush()
+			fmt.Fprintln(w)
+		}
+	}
+}
+
+func formatBrokerIDs(brokerIDs []int32, leader int32) string {
+	if len(brokerIDs) == 0 {
+		return ""
+	}
+
+	if len(brokerIDs) == 1 {
+		return itoa(brokerIDs[0])
+	}
+
+	sort.Slice(brokerIDs, func(i, j int) bool {
+		id1 := brokerIDs[i]
+		id2 := brokerIDs[j]
+
+		if id1 == leader {
+			return true
+		}
+
+		if id2 == leader {
+			return false
+		}
+
+		return id1 < id2
+	})
+
+	brokerNames := make([]string, len(brokerIDs))
+
+	for i, id := range brokerIDs {
+		brokerNames[i] = itoa(id)
+	}
+
+	return strings.Join(brokerNames, ",")
+}
+
+var (
+	_ fmt.Formatter = Cluster{}
+)
diff -pruN 0.2.1-1.1/protocol/conn.go 0.4.49+ds1-1/protocol/conn.go
--- 0.2.1-1.1/protocol/conn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/conn.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,100 @@
+package protocol
+
+import (
+	"bufio"
+	"fmt"
+	"net"
+	"sync/atomic"
+	"time"
+)
+
+type Conn struct {
+	buffer   *bufio.Reader
+	conn     net.Conn
+	clientID string
+	idgen    int32
+	versions atomic.Value // map[ApiKey]int16
+}
+
+func NewConn(conn net.Conn, clientID string) *Conn {
+	return &Conn{
+		buffer:   bufio.NewReader(conn),
+		conn:     conn,
+		clientID: clientID,
+	}
+}
+
+func (c *Conn) String() string {
+	return fmt.Sprintf("kafka://%s@%s->%s", c.clientID, c.LocalAddr(), c.RemoteAddr())
+}
+
+func (c *Conn) Close() error {
+	return c.conn.Close()
+}
+
+func (c *Conn) Discard(n int) (int, error) {
+	return c.buffer.Discard(n)
+}
+
+func (c *Conn) Peek(n int) ([]byte, error) {
+	return c.buffer.Peek(n)
+}
+
+func (c *Conn) Read(b []byte) (int, error) {
+	return c.buffer.Read(b)
+}
+
+func (c *Conn) Write(b []byte) (int, error) {
+	return c.conn.Write(b)
+}
+
+func (c *Conn) LocalAddr() net.Addr {
+	return c.conn.LocalAddr()
+}
+
+func (c *Conn) RemoteAddr() net.Addr {
+	return c.conn.RemoteAddr()
+}
+
+func (c *Conn) SetDeadline(t time.Time) error {
+	return c.conn.SetDeadline(t)
+}
+
+func (c *Conn) SetReadDeadline(t time.Time) error {
+	return c.conn.SetReadDeadline(t)
+}
+
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+	return c.conn.SetWriteDeadline(t)
+}
+
+func (c *Conn) SetVersions(versions map[ApiKey]int16) {
+	connVersions := make(map[ApiKey]int16, len(versions))
+
+	for k, v := range versions {
+		connVersions[k] = v
+	}
+
+	c.versions.Store(connVersions)
+}
+
+func (c *Conn) RoundTrip(msg Message) (Message, error) {
+	correlationID := atomic.AddInt32(&c.idgen, +1)
+	versions, _ := c.versions.Load().(map[ApiKey]int16)
+	apiVersion := versions[msg.ApiKey()]
+
+	if p, _ := msg.(PreparedMessage); p != nil {
+		p.Prepare(apiVersion)
+	}
+
+	if raw, ok := msg.(RawExchanger); ok && raw.Required(versions) {
+		return raw.RawExchange(c)
+	}
+
+	return RoundTrip(c, apiVersion, correlationID, c.clientID, msg)
+}
+
+var (
+	_ net.Conn       = (*Conn)(nil)
+	_ bufferedReader = (*Conn)(nil)
+)
diff -pruN 0.2.1-1.1/protocol/consumer/consumer.go 0.4.49+ds1-1/protocol/consumer/consumer.go
--- 0.2.1-1.1/protocol/consumer/consumer.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/consumer/consumer.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,21 @@
+package consumer
+
+const MaxVersionSupported = 1
+
+type Subscription struct {
+	Version         int16            `kafka:"min=v0,max=v1"`
+	Topics          []string         `kafka:"min=v0,max=v1"`
+	UserData        []byte           `kafka:"min=v0,max=v1,nullable"`
+	OwnedPartitions []TopicPartition `kafka:"min=v1,max=v1"`
+}
+
+type Assignment struct {
+	Version            int16            `kafka:"min=v0,max=v1"`
+	AssignedPartitions []TopicPartition `kafka:"min=v0,max=v1"`
+	UserData           []byte           `kafka:"min=v0,max=v1,nullable"`
+}
+
+type TopicPartition struct {
+	Topic      string  `kafka:"min=v0,max=v1"`
+	Partitions []int32 `kafka:"min=v0,max=v1"`
+}
diff -pruN 0.2.1-1.1/protocol/consumer/consumer_test.go 0.4.49+ds1-1/protocol/consumer/consumer_test.go
--- 0.2.1-1.1/protocol/consumer/consumer_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/consumer/consumer_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,40 @@
+package consumer_test
+
+import (
+	"reflect"
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/consumer"
+)
+
+func TestSubscription(t *testing.T) {
+	subscription := consumer.Subscription{
+		Topics:   []string{"topic-1", "topic-2"},
+		UserData: []byte("user-data"),
+		OwnedPartitions: []consumer.TopicPartition{
+			{
+				Topic:      "topic-1",
+				Partitions: []int32{1, 2, 3},
+			},
+		},
+	}
+
+	for _, version := range []int16{1, 0} {
+		if version == 0 {
+			subscription.OwnedPartitions = nil
+		}
+		data, err := protocol.Marshal(version, subscription)
+		if err != nil {
+			t.Fatal(err)
+		}
+		var gotSubscription consumer.Subscription
+		err = protocol.Unmarshal(data, version, &gotSubscription)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !reflect.DeepEqual(subscription, gotSubscription) {
+			t.Fatalf("unexpected result after marshal/unmarshal \nexpected\n %#v\ngot\n %#v", subscription, gotSubscription)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/protocol/createacls/createacls.go 0.4.49+ds1-1/protocol/createacls/createacls.go
--- 0.2.1-1.1/protocol/createacls/createacls.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/createacls/createacls.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,57 @@
+package createacls
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	Creations []RequestACLs `kafka:"min=v0,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateAcls }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestACLs struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ResourceType        int8   `kafka:"min=v0,max=v3"`
+	ResourceName        string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	ResourcePatternType int8   `kafka:"min=v1,max=v3"`
+	Principal           string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	Host                string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	Operation           int8   `kafka:"min=v0,max=v3"`
+	PermissionType      int8   `kafka:"min=v0,max=v3"`
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ThrottleTimeMs int32          `kafka:"min=v0,max=v3"`
+	Results        []ResponseACLs `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateAcls }
+
+type ResponseACLs struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ErrorCode    int16  `kafka:"min=v0,max=v3"`
+	ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/createacls/createacls_test.go 0.4.49+ds1-1/protocol/createacls/createacls_test.go
--- 0.2.1-1.1/protocol/createacls/createacls_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/createacls/createacls_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,115 @@
+package createacls_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/createacls"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+	v2 = 2
+	v3 = 3
+)
+
+func TestCreateACLsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &createacls.Request{
+		Creations: []createacls.RequestACLs{
+			{
+				Principal:      "User:alice",
+				PermissionType: 3,
+				Operation:      3,
+				ResourceType:   2,
+				ResourceName:   "fake-topic-for-alice",
+				Host:           "*",
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v1, &createacls.Request{
+		Creations: []createacls.RequestACLs{
+			{
+				Principal:           "User:alice",
+				PermissionType:      3,
+				Operation:           3,
+				ResourceType:        2,
+				ResourcePatternType: 3,
+				ResourceName:        "fake-topic-for-alice",
+				Host:                "*",
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v2, &createacls.Request{
+		Creations: []createacls.RequestACLs{
+			{
+				Principal:           "User:alice",
+				PermissionType:      3,
+				Operation:           3,
+				ResourceType:        2,
+				ResourcePatternType: 3,
+				ResourceName:        "fake-topic-for-alice",
+				Host:                "*",
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v3, &createacls.Request{
+		Creations: []createacls.RequestACLs{
+			{
+				Principal:           "User:alice",
+				PermissionType:      3,
+				Operation:           3,
+				ResourceType:        2,
+				ResourcePatternType: 3,
+				ResourceName:        "fake-topic-for-alice",
+				Host:                "*",
+			},
+		},
+	})
+}
+
+func TestCreateACLsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &createacls.Response{
+		ThrottleTimeMs: 1,
+		Results: []createacls.ResponseACLs{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &createacls.Response{
+		ThrottleTimeMs: 1,
+		Results: []createacls.ResponseACLs{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v2, &createacls.Response{
+		ThrottleTimeMs: 1,
+		Results: []createacls.ResponseACLs{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v3, &createacls.Response{
+		ThrottleTimeMs: 1,
+		Results: []createacls.ResponseACLs{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+			},
+		},
+	})
+
+}
diff -pruN 0.2.1-1.1/protocol/createpartitions/createpartitions.go 0.4.49+ds1-1/protocol/createpartitions/createpartitions.go
--- 0.2.1-1.1/protocol/createpartitions/createpartitions.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/createpartitions/createpartitions.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,46 @@
+package createpartitions
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_CreatePartitions.
+// TODO: Support version 2.
+type Request struct {
+	Topics       []RequestTopic `kafka:"min=v0,max=v1"`
+	TimeoutMs    int32          `kafka:"min=v0,max=v1"`
+	ValidateOnly bool           `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreatePartitions }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestTopic struct {
+	Name        string              `kafka:"min=v0,max=v1"`
+	Count       int32               `kafka:"min=v0,max=v1"`
+	Assignments []RequestAssignment `kafka:"min=v0,max=v1,nullable"`
+}
+
+type RequestAssignment struct {
+	BrokerIDs []int32 `kafka:"min=v0,max=v1"`
+}
+
+type Response struct {
+	ThrottleTimeMs int32            `kafka:"min=v0,max=v1"`
+	Results        []ResponseResult `kafka:"min=v0,max=v1"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreatePartitions }
+
+type ResponseResult struct {
+	Name         string `kafka:"min=v0,max=v1"`
+	ErrorCode    int16  `kafka:"min=v0,max=v1"`
+	ErrorMessage string `kafka:"min=v0,max=v1,nullable"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/createpartitions/createpartitions_test.go 0.4.49+ds1-1/protocol/createpartitions/createpartitions_test.go
--- 0.2.1-1.1/protocol/createpartitions/createpartitions_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/createpartitions/createpartitions_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,71 @@
+package createpartitions_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/createpartitions"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+)
+
+func TestCreatePartitionsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &createpartitions.Request{
+		Topics: []createpartitions.RequestTopic{
+			{
+				Name:  "foo",
+				Count: 1,
+				Assignments: []createpartitions.RequestAssignment{
+					{
+						BrokerIDs: []int32{1, 2, 3},
+					},
+				},
+			},
+		},
+		TimeoutMs:    500,
+		ValidateOnly: false,
+	})
+
+	prototest.TestRequest(t, v1, &createpartitions.Request{
+		Topics: []createpartitions.RequestTopic{
+			{
+				Name:  "foo",
+				Count: 1,
+				Assignments: []createpartitions.RequestAssignment{
+					{
+						BrokerIDs: []int32{1, 2, 3},
+					},
+				},
+			},
+		},
+		TimeoutMs:    500,
+		ValidateOnly: false,
+	})
+}
+
+func TestCreatePartitionsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &createpartitions.Response{
+		ThrottleTimeMs: 500,
+		Results: []createpartitions.ResponseResult{
+			{
+				Name:         "foo",
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &createpartitions.Response{
+		ThrottleTimeMs: 500,
+		Results: []createpartitions.ResponseResult{
+			{
+				Name:         "foo",
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/createtopics/createtopics.go 0.4.49+ds1-1/protocol/createtopics/createtopics.go
--- 0.2.1-1.1/protocol/createtopics/createtopics.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/createtopics/createtopics.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,74 @@
+package createtopics
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that v5+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v5,max=v5,tag"`
+
+	Topics       []RequestTopic `kafka:"min=v0,max=v5"`
+	TimeoutMs    int32          `kafka:"min=v0,max=v5"`
+	ValidateOnly bool           `kafka:"min=v1,max=v5"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateTopics }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestTopic struct {
+	Name              string              `kafka:"min=v0,max=v5"`
+	NumPartitions     int32               `kafka:"min=v0,max=v5"`
+	ReplicationFactor int16               `kafka:"min=v0,max=v5"`
+	Assignments       []RequestAssignment `kafka:"min=v0,max=v5"`
+	Configs           []RequestConfig     `kafka:"min=v0,max=v5"`
+}
+
+type RequestAssignment struct {
+	PartitionIndex int32   `kafka:"min=v0,max=v5"`
+	BrokerIDs      []int32 `kafka:"min=v0,max=v5"`
+}
+
+type RequestConfig struct {
+	Name  string `kafka:"min=v0,max=v5"`
+	Value string `kafka:"min=v0,max=v5,nullable"`
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that v5+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v5,max=v5,tag"`
+
+	ThrottleTimeMs int32           `kafka:"min=v2,max=v5"`
+	Topics         []ResponseTopic `kafka:"min=v0,max=v5"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateTopics }
+
+type ResponseTopic struct {
+	Name              string `kafka:"min=v0,max=v5"`
+	ErrorCode         int16  `kafka:"min=v0,max=v5"`
+	ErrorMessage      string `kafka:"min=v1,max=v5,nullable"`
+	NumPartitions     int32  `kafka:"min=v5,max=v5"`
+	ReplicationFactor int16  `kafka:"min=v5,max=v5"`
+
+	Configs []ResponseTopicConfig `kafka:"min=v5,max=v5"`
+}
+
+type ResponseTopicConfig struct {
+	Name         string `kafka:"min=v5,max=v5"`
+	Value        string `kafka:"min=v5,max=v5,nullable"`
+	ReadOnly     bool   `kafka:"min=v5,max=v5"`
+	ConfigSource int8   `kafka:"min=v5,max=v5"`
+	IsSensitive  bool   `kafka:"min=v5,max=v5"`
+}
+
+var (
+	_ protocol.BrokerMessage = (*Request)(nil)
+)
diff -pruN 0.2.1-1.1/protocol/decode.go 0.4.49+ds1-1/protocol/decode.go
--- 0.2.1-1.1/protocol/decode.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/decode.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,537 @@
+package protocol
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"io/ioutil"
+	"math"
+	"reflect"
+	"sync"
+	"sync/atomic"
+)
+
+type discarder interface {
+	Discard(int) (int, error)
+}
+
+type decoder struct {
+	reader io.Reader
+	remain int
+	buffer [8]byte
+	err    error
+	table  *crc32.Table
+	crc32  uint32
+}
+
+func (d *decoder) Reset(r io.Reader, n int) {
+	d.reader = r
+	d.remain = n
+	d.buffer = [8]byte{}
+	d.err = nil
+	d.table = nil
+	d.crc32 = 0
+}
+
+func (d *decoder) Read(b []byte) (int, error) {
+	if d.err != nil {
+		return 0, d.err
+	}
+	if d.remain == 0 {
+		return 0, io.EOF
+	}
+	if len(b) > d.remain {
+		b = b[:d.remain]
+	}
+	n, err := d.reader.Read(b)
+	if n > 0 && d.table != nil {
+		d.crc32 = crc32.Update(d.crc32, d.table, b[:n])
+	}
+	d.remain -= n
+	return n, err
+}
+
+func (d *decoder) ReadByte() (byte, error) {
+	c := d.readByte()
+	return c, d.err
+}
+
+func (d *decoder) done() bool {
+	return d.remain == 0 || d.err != nil
+}
+
+func (d *decoder) setCRC(table *crc32.Table) {
+	d.table, d.crc32 = table, 0
+}
+
+func (d *decoder) decodeBool(v value) {
+	v.setBool(d.readBool())
+}
+
+func (d *decoder) decodeInt8(v value) {
+	v.setInt8(d.readInt8())
+}
+
+func (d *decoder) decodeInt16(v value) {
+	v.setInt16(d.readInt16())
+}
+
+func (d *decoder) decodeInt32(v value) {
+	v.setInt32(d.readInt32())
+}
+
+func (d *decoder) decodeInt64(v value) {
+	v.setInt64(d.readInt64())
+}
+
+func (d *decoder) decodeFloat64(v value) {
+	v.setFloat64(d.readFloat64())
+}
+
+func (d *decoder) decodeString(v value) {
+	v.setString(d.readString())
+}
+
+func (d *decoder) decodeCompactString(v value) {
+	v.setString(d.readCompactString())
+}
+
+func (d *decoder) decodeBytes(v value) {
+	v.setBytes(d.readBytes())
+}
+
+func (d *decoder) decodeCompactBytes(v value) {
+	v.setBytes(d.readCompactBytes())
+}
+
+func (d *decoder) decodeArray(v value, elemType reflect.Type, decodeElem decodeFunc) {
+	if n := d.readInt32(); n < 0 {
+		v.setArray(array{})
+	} else {
+		a := makeArray(elemType, int(n))
+		for i := 0; i < int(n) && d.remain > 0; i++ {
+			decodeElem(d, a.index(i))
+		}
+		v.setArray(a)
+	}
+}
+
+func (d *decoder) decodeCompactArray(v value, elemType reflect.Type, decodeElem decodeFunc) {
+	if n := d.readUnsignedVarInt(); n < 1 {
+		v.setArray(array{})
+	} else {
+		a := makeArray(elemType, int(n-1))
+		for i := 0; i < int(n-1) && d.remain > 0; i++ {
+			decodeElem(d, a.index(i))
+		}
+		v.setArray(a)
+	}
+}
+
+func (d *decoder) discardAll() {
+	d.discard(d.remain)
+}
+
+func (d *decoder) discard(n int) {
+	if n > d.remain {
+		n = d.remain
+	}
+	var err error
+	if r, _ := d.reader.(discarder); r != nil {
+		n, err = r.Discard(n)
+		d.remain -= n
+	} else {
+		_, err = io.Copy(ioutil.Discard, d)
+	}
+	d.setError(err)
+}
+
+func (d *decoder) read(n int) []byte {
+	b := make([]byte, n)
+	n, err := io.ReadFull(d, b)
+	b = b[:n]
+	d.setError(err)
+	return b
+}
+
+func (d *decoder) writeTo(w io.Writer, n int) {
+	limit := d.remain
+	if n < limit {
+		d.remain = n
+	}
+	c, err := io.Copy(w, d)
+	if int(c) < n && err == nil {
+		err = io.ErrUnexpectedEOF
+	}
+	d.remain = limit - int(c)
+	d.setError(err)
+}
+
+func (d *decoder) setError(err error) {
+	if d.err == nil && err != nil {
+		d.err = err
+		d.discardAll()
+	}
+}
+
+func (d *decoder) readFull(b []byte) bool {
+	n, err := io.ReadFull(d, b)
+	d.setError(err)
+	return n == len(b)
+}
+
+func (d *decoder) readByte() byte {
+	if d.readFull(d.buffer[:1]) {
+		return d.buffer[0]
+	}
+	return 0
+}
+
+func (d *decoder) readBool() bool {
+	return d.readByte() != 0
+}
+
+func (d *decoder) readInt8() int8 {
+	if d.readFull(d.buffer[:1]) {
+		return readInt8(d.buffer[:1])
+	}
+	return 0
+}
+
+func (d *decoder) readInt16() int16 {
+	if d.readFull(d.buffer[:2]) {
+		return readInt16(d.buffer[:2])
+	}
+	return 0
+}
+
+func (d *decoder) readInt32() int32 {
+	if d.readFull(d.buffer[:4]) {
+		return readInt32(d.buffer[:4])
+	}
+	return 0
+}
+
+func (d *decoder) readInt64() int64 {
+	if d.readFull(d.buffer[:8]) {
+		return readInt64(d.buffer[:8])
+	}
+	return 0
+}
+
+func (d *decoder) readFloat64() float64 {
+	if d.readFull(d.buffer[:8]) {
+		return readFloat64(d.buffer[:8])
+	}
+	return 0
+}
+
+func (d *decoder) readString() string {
+	if n := d.readInt16(); n < 0 {
+		return ""
+	} else {
+		return bytesToString(d.read(int(n)))
+	}
+}
+
+func (d *decoder) readVarString() string {
+	if n := d.readVarInt(); n < 0 {
+		return ""
+	} else {
+		return bytesToString(d.read(int(n)))
+	}
+}
+
+func (d *decoder) readCompactString() string {
+	if n := d.readUnsignedVarInt(); n < 1 {
+		return ""
+	} else {
+		return bytesToString(d.read(int(n - 1)))
+	}
+}
+
+func (d *decoder) readBytes() []byte {
+	if n := d.readInt32(); n < 0 {
+		return nil
+	} else {
+		return d.read(int(n))
+	}
+}
+
+func (d *decoder) readVarBytes() []byte {
+	if n := d.readVarInt(); n < 0 {
+		return nil
+	} else {
+		return d.read(int(n))
+	}
+}
+
+func (d *decoder) readCompactBytes() []byte {
+	if n := d.readUnsignedVarInt(); n < 1 {
+		return nil
+	} else {
+		return d.read(int(n - 1))
+	}
+}
+
+func (d *decoder) readVarInt() int64 {
+	n := 11 // varints are at most 11 bytes
+
+	if n > d.remain {
+		n = d.remain
+	}
+
+	x := uint64(0)
+	s := uint(0)
+
+	for n > 0 {
+		b := d.readByte()
+
+		if (b & 0x80) == 0 {
+			x |= uint64(b) << s
+			return int64(x>>1) ^ -(int64(x) & 1)
+		}
+
+		x |= uint64(b&0x7f) << s
+		s += 7
+		n--
+	}
+
+	d.setError(fmt.Errorf("cannot decode varint from input stream"))
+	return 0
+}
+
+func (d *decoder) readUnsignedVarInt() uint64 {
+	n := 11 // varints are at most 11 bytes
+
+	if n > d.remain {
+		n = d.remain
+	}
+
+	x := uint64(0)
+	s := uint(0)
+
+	for n > 0 {
+		b := d.readByte()
+
+		if (b & 0x80) == 0 {
+			x |= uint64(b) << s
+			return x
+		}
+
+		x |= uint64(b&0x7f) << s
+		s += 7
+		n--
+	}
+
+	d.setError(fmt.Errorf("cannot decode unsigned varint from input stream"))
+	return 0
+}
+
+type decodeFunc func(*decoder, value)
+
+var (
+	_ io.Reader     = (*decoder)(nil)
+	_ io.ByteReader = (*decoder)(nil)
+
+	readerFrom = reflect.TypeOf((*io.ReaderFrom)(nil)).Elem()
+)
+
+func decodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc {
+	if reflect.PtrTo(typ).Implements(readerFrom) {
+		return readerDecodeFuncOf(typ)
+	}
+	switch typ.Kind() {
+	case reflect.Bool:
+		return (*decoder).decodeBool
+	case reflect.Int8:
+		return (*decoder).decodeInt8
+	case reflect.Int16:
+		return (*decoder).decodeInt16
+	case reflect.Int32:
+		return (*decoder).decodeInt32
+	case reflect.Int64:
+		return (*decoder).decodeInt64
+	case reflect.Float64:
+		return (*decoder).decodeFloat64
+	case reflect.String:
+		return stringDecodeFuncOf(flexible, tag)
+	case reflect.Struct:
+		return structDecodeFuncOf(typ, version, flexible)
+	case reflect.Slice:
+		if typ.Elem().Kind() == reflect.Uint8 { // []byte
+			return bytesDecodeFuncOf(flexible, tag)
+		}
+		return arrayDecodeFuncOf(typ, version, flexible, tag)
+	default:
+		panic("unsupported type: " + typ.String())
+	}
+}
+
+func stringDecodeFuncOf(flexible bool, tag structTag) decodeFunc {
+	if flexible {
+		// In flexible messages, all strings are compact
+		return (*decoder).decodeCompactString
+	}
+	return (*decoder).decodeString
+}
+
+func bytesDecodeFuncOf(flexible bool, tag structTag) decodeFunc {
+	if flexible {
+		// In flexible messages, all arrays are compact
+		return (*decoder).decodeCompactBytes
+	}
+	return (*decoder).decodeBytes
+}
+
+func structDecodeFuncOf(typ reflect.Type, version int16, flexible bool) decodeFunc {
+	type field struct {
+		decode decodeFunc
+		index  index
+		tagID  int
+	}
+
+	var fields []field
+	taggedFields := map[int]*field{}
+
+	forEachStructField(typ, func(typ reflect.Type, index index, tag string) {
+		forEachStructTag(tag, func(tag structTag) bool {
+			if tag.MinVersion <= version && version <= tag.MaxVersion {
+				f := field{
+					decode: decodeFuncOf(typ, version, flexible, tag),
+					index:  index,
+					tagID:  tag.TagID,
+				}
+
+				if tag.TagID < -1 {
+					// Normal required field
+					fields = append(fields, f)
+				} else {
+					// Optional tagged field (flexible messages only)
+					taggedFields[tag.TagID] = &f
+				}
+				return false
+			}
+			return true
+		})
+	})
+
+	return func(d *decoder, v value) {
+		for i := range fields {
+			f := &fields[i]
+			f.decode(d, v.fieldByIndex(f.index))
+		}
+
+		if flexible {
+			// See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
+			// for details of tag buffers in "flexible" messages.
+			n := int(d.readUnsignedVarInt())
+
+			for i := 0; i < n; i++ {
+				tagID := int(d.readUnsignedVarInt())
+				size := int(d.readUnsignedVarInt())
+
+				f, ok := taggedFields[tagID]
+				if ok {
+					f.decode(d, v.fieldByIndex(f.index))
+				} else {
+					d.read(size)
+				}
+			}
+		}
+	}
+}
+
+func arrayDecodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc {
+	elemType := typ.Elem()
+	elemFunc := decodeFuncOf(elemType, version, flexible, tag)
+	if flexible {
+		// In flexible messages, all arrays are compact
+		return func(d *decoder, v value) { d.decodeCompactArray(v, elemType, elemFunc) }
+	}
+
+	return func(d *decoder, v value) { d.decodeArray(v, elemType, elemFunc) }
+}
+
+func readerDecodeFuncOf(typ reflect.Type) decodeFunc {
+	typ = reflect.PtrTo(typ)
+	return func(d *decoder, v value) {
+		if d.err == nil {
+			_, err := v.iface(typ).(io.ReaderFrom).ReadFrom(d)
+			if err != nil {
+				d.setError(err)
+			}
+		}
+	}
+}
+
+func readInt8(b []byte) int8 {
+	return int8(b[0])
+}
+
+func readInt16(b []byte) int16 {
+	return int16(binary.BigEndian.Uint16(b))
+}
+
+func readInt32(b []byte) int32 {
+	return int32(binary.BigEndian.Uint32(b))
+}
+
+func readInt64(b []byte) int64 {
+	return int64(binary.BigEndian.Uint64(b))
+}
+
+func readFloat64(b []byte) float64 {
+	return math.Float64frombits(binary.BigEndian.Uint64(b))
+}
+
+func Unmarshal(data []byte, version int16, value interface{}) error {
+	typ := elemTypeOf(value)
+	cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc)
+	key := versionedType{typ: typ, version: version}
+	decode := cache[key]
+
+	if decode == nil {
+		decode = decodeFuncOf(reflect.TypeOf(value).Elem(), version, false, structTag{
+			MinVersion: -1,
+			MaxVersion: -1,
+			TagID:      -2,
+			Compact:    true,
+			Nullable:   true,
+		})
+
+		newCache := make(map[versionedType]decodeFunc, len(cache)+1)
+		newCache[key] = decode
+
+		for typ, fun := range cache {
+			newCache[typ] = fun
+		}
+
+		unmarshalers.Store(newCache)
+	}
+
+	d, _ := decoders.Get().(*decoder)
+	if d == nil {
+		d = &decoder{reader: bytes.NewReader(nil)}
+	}
+
+	d.remain = len(data)
+	r, _ := d.reader.(*bytes.Reader)
+	r.Reset(data)
+
+	defer func() {
+		r.Reset(nil)
+		d.Reset(r, 0)
+		decoders.Put(d)
+	}()
+
+	decode(d, valueOf(value))
+	return dontExpectEOF(d.err)
+}
+
+var (
+	decoders     sync.Pool    // *decoder
+	unmarshalers atomic.Value // map[versionedType]decodeFunc
+)
diff -pruN 0.2.1-1.1/protocol/deleteacls/deleteacls.go 0.4.49+ds1-1/protocol/deleteacls/deleteacls.go
--- 0.2.1-1.1/protocol/deleteacls/deleteacls.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/deleteacls/deleteacls.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,74 @@
+package deleteacls
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	Filters []RequestFilter `kafka:"min=v0,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteAcls }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestFilter struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ResourceTypeFilter        int8   `kafka:"min=v0,max=v3"`
+	ResourceNameFilter        string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	ResourcePatternTypeFilter int8   `kafka:"min=v1,max=v3"`
+	PrincipalFilter           string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	HostFilter                string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	Operation                 int8   `kafka:"min=v0,max=v3"`
+	PermissionType            int8   `kafka:"min=v0,max=v3"`
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ThrottleTimeMs int32          `kafka:"min=v0,max=v3"`
+	FilterResults  []FilterResult `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteAcls }
+
+type FilterResult struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ErrorCode    int16         `kafka:"min=v0,max=v3"`
+	ErrorMessage string        `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	MatchingACLs []MatchingACL `kafka:"min=v0,max=v3"`
+}
+
+type MatchingACL struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ErrorCode           int16  `kafka:"min=v0,max=v3"`
+	ErrorMessage        string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	ResourceType        int8   `kafka:"min=v0,max=v3"`
+	ResourceName        string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	ResourcePatternType int8   `kafka:"min=v1,max=v3"`
+	Principal           string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	Host                string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	Operation           int8   `kafka:"min=v0,max=v3"`
+	PermissionType      int8   `kafka:"min=v0,max=v3"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/deleteacls/deleteacls_test.go 0.4.49+ds1-1/protocol/deleteacls/deleteacls_test.go
--- 0.2.1-1.1/protocol/deleteacls/deleteacls_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/deleteacls/deleteacls_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,165 @@
+package deleteacls_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/deleteacls"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+	v2 = 2
+	v3 = 3
+)
+
+func TestDeleteACLsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &deleteacls.Request{
+		Filters: []deleteacls.RequestFilter{
+			{
+				ResourceTypeFilter: 2,
+				ResourceNameFilter: "fake-topic-for-alice",
+				PrincipalFilter:    "User:alice",
+				HostFilter:         "*",
+				Operation:          3,
+				PermissionType:     3,
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v1, &deleteacls.Request{
+		Filters: []deleteacls.RequestFilter{
+			{
+				ResourceTypeFilter:        2,
+				ResourceNameFilter:        "fake-topic-for-alice",
+				ResourcePatternTypeFilter: 0,
+				PrincipalFilter:           "User:alice",
+				HostFilter:                "*",
+				Operation:                 3,
+				PermissionType:            3,
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v2, &deleteacls.Request{
+		Filters: []deleteacls.RequestFilter{
+			{
+				ResourceTypeFilter:        2,
+				ResourceNameFilter:        "fake-topic-for-alice",
+				ResourcePatternTypeFilter: 0,
+				PrincipalFilter:           "User:alice",
+				HostFilter:                "*",
+				Operation:                 3,
+				PermissionType:            3,
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v3, &deleteacls.Request{
+		Filters: []deleteacls.RequestFilter{
+			{
+				ResourceTypeFilter:        2,
+				ResourceNameFilter:        "fake-topic-for-alice",
+				ResourcePatternTypeFilter: 0,
+				PrincipalFilter:           "User:alice",
+				HostFilter:                "*",
+				Operation:                 3,
+				PermissionType:            3,
+			},
+		},
+	})
+}
+
+func TestDeleteACLsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &deleteacls.Response{
+		ThrottleTimeMs: 1,
+		FilterResults: []deleteacls.FilterResult{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+				MatchingACLs: []deleteacls.MatchingACL{
+					{
+						ErrorCode:      1,
+						ErrorMessage:   "bar",
+						ResourceType:   2,
+						ResourceName:   "fake-topic-for-alice",
+						Principal:      "User:alice",
+						Host:           "*",
+						Operation:      3,
+						PermissionType: 3,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &deleteacls.Response{
+		ThrottleTimeMs: 1,
+		FilterResults: []deleteacls.FilterResult{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+				MatchingACLs: []deleteacls.MatchingACL{
+					{
+						ErrorCode:           1,
+						ErrorMessage:        "bar",
+						ResourceType:        2,
+						ResourceName:        "fake-topic-for-alice",
+						ResourcePatternType: 0,
+						Principal:           "User:alice",
+						Host:                "*",
+						Operation:           3,
+						PermissionType:      3,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v2, &deleteacls.Response{
+		ThrottleTimeMs: 1,
+		FilterResults: []deleteacls.FilterResult{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+				MatchingACLs: []deleteacls.MatchingACL{
+					{
+						ErrorCode:           1,
+						ErrorMessage:        "bar",
+						ResourceType:        2,
+						ResourceName:        "fake-topic-for-alice",
+						ResourcePatternType: 0,
+						Principal:           "User:alice",
+						Host:                "*",
+						Operation:           3,
+						PermissionType:      3,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v3, &deleteacls.Response{
+		ThrottleTimeMs: 1,
+		FilterResults: []deleteacls.FilterResult{
+			{
+				ErrorCode:    1,
+				ErrorMessage: "foo",
+				MatchingACLs: []deleteacls.MatchingACL{
+					{
+						ErrorCode:           1,
+						ErrorMessage:        "bar",
+						ResourceType:        2,
+						ResourceName:        "fake-topic-for-alice",
+						ResourcePatternType: 0,
+						Principal:           "User:alice",
+						Host:                "*",
+						Operation:           3,
+						PermissionType:      3,
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/deletegroups/deletegroups.go 0.4.49+ds1-1/protocol/deletegroups/deletegroups.go
--- 0.2.1-1.1/protocol/deletegroups/deletegroups.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/deletegroups/deletegroups.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,45 @@
+package deletegroups
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v2,max=v2,tag"`
+
+	GroupIDs []string `kafka:"min=v0,max=v2"`
+}
+
+func (r *Request) Group() string {
+	// use first group to determine group coordinator
+	if len(r.GroupIDs) > 0 {
+		return r.GroupIDs[0]
+	}
+	return ""
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteGroups }
+
+var (
+	_ protocol.GroupMessage = (*Request)(nil)
+)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v2,max=v2,tag"`
+
+	ThrottleTimeMs int32           `kafka:"min=v0,max=v2"`
+	Responses      []ResponseGroup `kafka:"min=v0,max=v2"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteGroups }
+
+type ResponseGroup struct {
+	GroupID   string `kafka:"min=v0,max=v2"`
+	ErrorCode int16  `kafka:"min=v0,max=v2"`
+}
diff -pruN 0.2.1-1.1/protocol/deletegroups/deletegroups_test.go 0.4.49+ds1-1/protocol/deletegroups/deletegroups_test.go
--- 0.2.1-1.1/protocol/deletegroups/deletegroups_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/deletegroups/deletegroups_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,33 @@
+package deletegroups_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/deletegroups"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestDeleteGroupsRequest(t *testing.T) {
+	for _, version := range []int16{0, 1, 2} {
+		prototest.TestRequest(t, version, &deletegroups.Request{
+			GroupIDs: []string{"group1", "group2"},
+		})
+	}
+}
+
+func TestDeleteGroupsResponse(t *testing.T) {
+	for _, version := range []int16{0, 1, 2} {
+		prototest.TestResponse(t, version, &deletegroups.Response{
+			Responses: []deletegroups.ResponseGroup{
+				{
+					GroupID:   "group1",
+					ErrorCode: 0,
+				},
+				{
+					GroupID:   "group2",
+					ErrorCode: 1,
+				},
+			},
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/deletetopics/deletetopics.go 0.4.49+ds1-1/protocol/deletetopics/deletetopics.go
--- 0.2.1-1.1/protocol/deletetopics/deletetopics.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/deletetopics/deletetopics.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,34 @@
+package deletetopics
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	TopicNames []string `kafka:"min=v0,max=v3"`
+	TimeoutMs  int32    `kafka:"min=v0,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteTopics }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type Response struct {
+	ThrottleTimeMs int32           `kafka:"min=v1,max=v3"`
+	Responses      []ResponseTopic `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteTopics }
+
+type ResponseTopic struct {
+	Name      string `kafka:"min=v0,max=v3"`
+	ErrorCode int16  `kafka:"min=v0,max=v3"`
+}
+
+var (
+	_ protocol.BrokerMessage = (*Request)(nil)
+)
diff -pruN 0.2.1-1.1/protocol/deletetopics/deletetopics_test.go 0.4.49+ds1-1/protocol/deletetopics/deletetopics_test.go
--- 0.2.1-1.1/protocol/deletetopics/deletetopics_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/deletetopics/deletetopics_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,74 @@
+package deletetopics_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/deletetopics"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+	v3 = 3
+)
+
+func TestDeleteTopicsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &deletetopics.Request{
+		TopicNames: []string{"foo", "bar"},
+		TimeoutMs:  500,
+	})
+
+	prototest.TestRequest(t, v1, &deletetopics.Request{
+		TopicNames: []string{"foo", "bar"},
+		TimeoutMs:  500,
+	})
+
+	prototest.TestRequest(t, v3, &deletetopics.Request{
+		TopicNames: []string{"foo", "bar"},
+		TimeoutMs:  500,
+	})
+}
+
+func TestDeleteTopicsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &deletetopics.Response{
+		Responses: []deletetopics.ResponseTopic{
+			{
+				Name:      "foo",
+				ErrorCode: 1,
+			},
+			{
+				Name:      "bar",
+				ErrorCode: 1,
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &deletetopics.Response{
+		ThrottleTimeMs: 500,
+		Responses: []deletetopics.ResponseTopic{
+			{
+				Name:      "foo",
+				ErrorCode: 1,
+			},
+			{
+				Name:      "bar",
+				ErrorCode: 1,
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v3, &deletetopics.Response{
+		ThrottleTimeMs: 500,
+		Responses: []deletetopics.ResponseTopic{
+			{
+				Name:      "foo",
+				ErrorCode: 1,
+			},
+			{
+				Name:      "bar",
+				ErrorCode: 1,
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/describeacls/describeacls.go 0.4.49+ds1-1/protocol/describeacls/describeacls.go
--- 0.2.1-1.1/protocol/describeacls/describeacls.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describeacls/describeacls.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,72 @@
+package describeacls
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	Filter ACLFilter `kafka:"min=v0,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeAcls }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type ACLFilter struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ResourceTypeFilter        int8   `kafka:"min=v0,max=v3"`
+	ResourceNameFilter        string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	ResourcePatternTypeFilter int8   `kafka:"min=v1,max=v3"`
+	PrincipalFilter           string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	HostFilter                string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	Operation                 int8   `kafka:"min=v0,max=v3"`
+	PermissionType            int8   `kafka:"min=v0,max=v3"`
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ThrottleTimeMs int32      `kafka:"min=v0,max=v3"`
+	ErrorCode      int16      `kafka:"min=v0,max=v3"`
+	ErrorMessage   string     `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"`
+	Resources      []Resource `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeAcls }
+
+type Resource struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	ResourceType int8          `kafka:"min=v0,max=v3"`
+	ResourceName string        `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	PatternType  int8          `kafka:"min=v1,max=v3"`
+	ACLs         []ResponseACL `kafka:"min=v0,max=v3"`
+}
+
+type ResponseACL struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v2,max=v3,tag"`
+
+	Principal      string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	Host           string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"`
+	Operation      int8   `kafka:"min=v0,max=v3"`
+	PermissionType int8   `kafka:"min=v0,max=v3"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/describeacls/describeacls_test.go 0.4.49+ds1-1/protocol/describeacls/describeacls_test.go
--- 0.2.1-1.1/protocol/describeacls/describeacls_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describeacls/describeacls_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,149 @@
+package describeacls_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/describeacls"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+	v2 = 2
+	v3 = 3
+)
+
+func TestDescribeACLsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &describeacls.Request{
+		Filter: describeacls.ACLFilter{
+			ResourceTypeFilter: 2,
+			ResourceNameFilter: "fake-topic-for-alice",
+			PrincipalFilter:    "User:alice",
+			HostFilter:         "*",
+			Operation:          3,
+			PermissionType:     3,
+		},
+	})
+
+	prototest.TestRequest(t, v1, &describeacls.Request{
+		Filter: describeacls.ACLFilter{
+			ResourceTypeFilter:        2,
+			ResourceNameFilter:        "fake-topic-for-alice",
+			ResourcePatternTypeFilter: 0,
+			PrincipalFilter:           "User:alice",
+			HostFilter:                "*",
+			Operation:                 3,
+			PermissionType:            3,
+		},
+	})
+
+	prototest.TestRequest(t, v2, &describeacls.Request{
+		Filter: describeacls.ACLFilter{
+			ResourceTypeFilter:        2,
+			ResourceNameFilter:        "fake-topic-for-alice",
+			ResourcePatternTypeFilter: 0,
+			PrincipalFilter:           "User:alice",
+			HostFilter:                "*",
+			Operation:                 3,
+			PermissionType:            3,
+		},
+	})
+
+	prototest.TestRequest(t, v3, &describeacls.Request{
+		Filter: describeacls.ACLFilter{
+			ResourceTypeFilter:        2,
+			ResourceNameFilter:        "fake-topic-for-alice",
+			ResourcePatternTypeFilter: 0,
+			PrincipalFilter:           "User:alice",
+			HostFilter:                "*",
+			Operation:                 3,
+			PermissionType:            3,
+		},
+	})
+}
+
+func TestDescribeACLsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &describeacls.Response{
+		ThrottleTimeMs: 1,
+		ErrorCode:      1,
+		ErrorMessage:   "foo",
+		Resources: []describeacls.Resource{
+			{
+				ResourceType: 2,
+				ResourceName: "fake-topic-for-alice",
+				ACLs: []describeacls.ResponseACL{
+					{
+						Principal:      "User:alice",
+						Host:           "*",
+						Operation:      3,
+						PermissionType: 3,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &describeacls.Response{
+		ThrottleTimeMs: 1,
+		ErrorCode:      1,
+		ErrorMessage:   "foo",
+		Resources: []describeacls.Resource{
+			{
+				ResourceType: 2,
+				ResourceName: "fake-topic-for-alice",
+				PatternType:  3,
+				ACLs: []describeacls.ResponseACL{
+					{
+						Principal:      "User:alice",
+						Host:           "*",
+						Operation:      3,
+						PermissionType: 3,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v2, &describeacls.Response{
+		ThrottleTimeMs: 1,
+		ErrorCode:      1,
+		ErrorMessage:   "foo",
+		Resources: []describeacls.Resource{
+			{
+				ResourceType: 2,
+				ResourceName: "fake-topic-for-alice",
+				PatternType:  3,
+				ACLs: []describeacls.ResponseACL{
+					{
+						Principal:      "User:alice",
+						Host:           "*",
+						Operation:      3,
+						PermissionType: 3,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v3, &describeacls.Response{
+		ThrottleTimeMs: 1,
+		ErrorCode:      1,
+		ErrorMessage:   "foo",
+		Resources: []describeacls.Resource{
+			{
+				ResourceType: 2,
+				ResourceName: "fake-topic-for-alice",
+				PatternType:  3,
+				ACLs: []describeacls.ResponseACL{
+					{
+						Principal:      "User:alice",
+						Host:           "*",
+						Operation:      3,
+						PermissionType: 3,
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/describeclientquotas/describeclientquotas.go 0.4.49+ds1-1/protocol/describeclientquotas/describeclientquotas.go
--- 0.2.1-1.1/protocol/describeclientquotas/describeclientquotas.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describeclientquotas/describeclientquotas.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,68 @@
+package describeclientquotas
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_          struct{}    `kafka:"min=v1,max=v1,tag"`
+	Components []Component `kafka:"min=v0,max=v1"`
+	Strict     bool        `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeClientQuotas }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type Component struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_          struct{} `kafka:"min=v1,max=v1,tag"`
+	EntityType string   `kafka:"min=v0,max=v1"`
+	MatchType  int8     `kafka:"min=v0,max=v1"`
+	Match      string   `kafka:"min=v0,max=v1,nullable"`
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_              struct{}         `kafka:"min=v1,max=v1,tag"`
+	ThrottleTimeMs int32            `kafka:"min=v0,max=v1"`
+	ErrorCode      int16            `kafka:"min=v0,max=v1"`
+	ErrorMessage   string           `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"`
+	Entries        []ResponseQuotas `kafka:"min=v0,max=v1"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeClientQuotas }
+
+type Entity struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_          struct{} `kafka:"min=v1,max=v1,tag"`
+	EntityType string   `kafka:"min=v0,max=v0|min=v1,max=v1,compact"`
+	EntityName string   `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"`
+}
+
+type Value struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_     struct{} `kafka:"min=v1,max=v1,tag"`
+	Key   string   `kafka:"min=v0,max=v0|min=v1,max=v1,compact"`
+	Value float64  `kafka:"min=v0,max=v1"`
+}
+
+type ResponseQuotas struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_        struct{} `kafka:"min=v1,max=v1,tag"`
+	Entities []Entity `kafka:"min=v0,max=v1"`
+	Values   []Value  `kafka:"min=v0,max=v1"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/describeclientquotas/describeclientquotas_test.go 0.4.49+ds1-1/protocol/describeclientquotas/describeclientquotas_test.go
--- 0.2.1-1.1/protocol/describeclientquotas/describeclientquotas_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describeclientquotas/describeclientquotas_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,83 @@
+package describeclientquotas_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/describeclientquotas"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+)
+
+func TestDescribeClientQuotasRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &describeclientquotas.Request{
+		Strict: true,
+		Components: []describeclientquotas.Component{
+			{
+				EntityType: "client-id",
+				MatchType:  0,
+				Match:      "my-client-id",
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v1, &describeclientquotas.Request{
+		Strict: true,
+		Components: []describeclientquotas.Component{
+			{
+				EntityType: "client-id",
+				MatchType:  0,
+				Match:      "my-client-id",
+			},
+		},
+	})
+}
+
+func TestDescribeClientQuotasResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &describeclientquotas.Response{
+		ThrottleTimeMs: 1,
+		ErrorCode:      1,
+		ErrorMessage:   "foo",
+		Entries: []describeclientquotas.ResponseQuotas{
+			{
+				Entities: []describeclientquotas.Entity{
+					{
+						EntityType: "client-id",
+						EntityName: "my-client-id",
+					},
+				},
+				Values: []describeclientquotas.Value{
+					{
+						Key:   "foo",
+						Value: 1.0,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &describeclientquotas.Response{
+		ThrottleTimeMs: 1,
+		ErrorCode:      1,
+		ErrorMessage:   "foo",
+		Entries: []describeclientquotas.ResponseQuotas{
+			{
+				Entities: []describeclientquotas.Entity{
+					{
+						EntityType: "client-id",
+						EntityName: "my-client-id",
+					},
+				},
+				Values: []describeclientquotas.Value{
+					{
+						Key:   "foo",
+						Value: 1.0,
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/describeconfigs/describeconfigs.go 0.4.49+ds1-1/protocol/describeconfigs/describeconfigs.go
--- 0.2.1-1.1/protocol/describeconfigs/describeconfigs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describeconfigs/describeconfigs.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,129 @@
+package describeconfigs
+
+import (
+	"strconv"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+const (
+	resourceTypeBroker int8 = 4
+)
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeConfigs
+type Request struct {
+	Resources            []RequestResource `kafka:"min=v0,max=v3"`
+	IncludeSynonyms      bool              `kafka:"min=v1,max=v3"`
+	IncludeDocumentation bool              `kafka:"min=v3,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeConfigs }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	// Broker metadata requests must be sent to the associated broker
+	for _, resource := range r.Resources {
+		if resource.ResourceType == resourceTypeBroker {
+			brokerID, err := strconv.Atoi(resource.ResourceName)
+			if err != nil {
+				return protocol.Broker{}, err
+			}
+
+			return cluster.Brokers[int32(brokerID)], nil
+		}
+	}
+
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+func (r *Request) Split(cluster protocol.Cluster) (
+	[]protocol.Message,
+	protocol.Merger,
+	error,
+) {
+	messages := []protocol.Message{}
+	topicsMessage := Request{}
+
+	for _, resource := range r.Resources {
+		// Split out broker requests to separate brokers
+		if resource.ResourceType == resourceTypeBroker {
+			messages = append(messages, &Request{
+				Resources: []RequestResource{resource},
+			})
+		} else {
+			topicsMessage.Resources = append(
+				topicsMessage.Resources, resource,
+			)
+		}
+	}
+
+	if len(topicsMessage.Resources) > 0 {
+		messages = append(messages, &topicsMessage)
+	}
+
+	return messages, new(Response), nil
+}
+
+type RequestResource struct {
+	ResourceType int8     `kafka:"min=v0,max=v3"`
+	ResourceName string   `kafka:"min=v0,max=v3"`
+	ConfigNames  []string `kafka:"min=v0,max=v3,nullable"`
+}
+
+type Response struct {
+	ThrottleTimeMs int32              `kafka:"min=v0,max=v3"`
+	Resources      []ResponseResource `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeConfigs }
+
+func (r *Response) Merge(requests []protocol.Message, results []interface{}) (
+	protocol.Message,
+	error,
+) {
+	response := &Response{}
+
+	for _, result := range results {
+		m, err := protocol.Result(result)
+		if err != nil {
+			return nil, err
+		}
+		response.Resources = append(
+			response.Resources,
+			m.(*Response).Resources...,
+		)
+	}
+
+	return response, nil
+}
+
+type ResponseResource struct {
+	ErrorCode     int16                 `kafka:"min=v0,max=v3"`
+	ErrorMessage  string                `kafka:"min=v0,max=v3,nullable"`
+	ResourceType  int8                  `kafka:"min=v0,max=v3"`
+	ResourceName  string                `kafka:"min=v0,max=v3"`
+	ConfigEntries []ResponseConfigEntry `kafka:"min=v0,max=v3"`
+}
+
+type ResponseConfigEntry struct {
+	ConfigName          string                  `kafka:"min=v0,max=v3"`
+	ConfigValue         string                  `kafka:"min=v0,max=v3,nullable"`
+	ReadOnly            bool                    `kafka:"min=v0,max=v3"`
+	IsDefault           bool                    `kafka:"min=v0,max=v0"`
+	ConfigSource        int8                    `kafka:"min=v1,max=v3"`
+	IsSensitive         bool                    `kafka:"min=v0,max=v3"`
+	ConfigSynonyms      []ResponseConfigSynonym `kafka:"min=v1,max=v3"`
+	ConfigType          int8                    `kafka:"min=v3,max=v3"`
+	ConfigDocumentation string                  `kafka:"min=v3,max=v3,nullable"`
+}
+
+type ResponseConfigSynonym struct {
+	ConfigName   string `kafka:"min=v1,max=v3"`
+	ConfigValue  string `kafka:"min=v1,max=v3,nullable"`
+	ConfigSource int8   `kafka:"min=v1,max=v3"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/describeconfigs/describeconfigs_test.go 0.4.49+ds1-1/protocol/describeconfigs/describeconfigs_test.go
--- 0.2.1-1.1/protocol/describeconfigs/describeconfigs_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describeconfigs/describeconfigs_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,77 @@
+package describeconfigs
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/stretchr/testify/require"
+)
+
+func TestResponse_Merge(t *testing.T) {
+	t.Run("happy path", func(t *testing.T) {
+		r := &Response{}
+
+		r1 := &Response{
+			Resources: []ResponseResource{
+				{ResourceName: "r1"},
+			},
+		}
+		r2 := &Response{
+			Resources: []ResponseResource{
+				{ResourceName: "r2"},
+			},
+		}
+
+		got, err := r.Merge([]protocol.Message{&Request{}}, []interface{}{r1, r2})
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		want := &Response{
+			Resources: []ResponseResource{
+				{ResourceName: "r1"},
+				{ResourceName: "r2"},
+			},
+		}
+
+		if !reflect.DeepEqual(want, got) {
+			t.Fatalf("wanted response: \n%+v, got \n%+v", want, got)
+		}
+	})
+
+	t.Run("with errors", func(t *testing.T) {
+		r := &Response{}
+
+		r1 := &Response{
+			Resources: []ResponseResource{
+				{ResourceName: "r1"},
+			},
+		}
+
+		_, err := r.Merge([]protocol.Message{&Request{}}, []interface{}{r1, io.EOF})
+		if !errors.Is(err, io.EOF) {
+			t.Fatalf("wanted err io.EOF, got %v", err)
+		}
+	})
+
+	t.Run("panic with unexpected type", func(t *testing.T) {
+		defer func() {
+			msg := recover()
+			require.Equal(t, "BUG: result must be a message or an error but not string", fmt.Sprintf("%s", msg))
+		}()
+		r := &Response{}
+
+		r1 := &Response{
+			Resources: []ResponseResource{
+				{ResourceName: "r1"},
+			},
+		}
+
+		_, _ = r.Merge([]protocol.Message{&Request{}}, []interface{}{r1, "how did a string got here"})
+		t.Fatal("did not panic")
+	})
+}
diff -pruN 0.2.1-1.1/protocol/describegroups/describegroups.go 0.4.49+ds1-1/protocol/describegroups/describegroups.go
--- 0.2.1-1.1/protocol/describegroups/describegroups.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describegroups/describegroups.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,85 @@
+package describegroups
+
+import (
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeGroups
+type Request struct {
+	Groups                      []string `kafka:"min=v0,max=v4"`
+	IncludeAuthorizedOperations bool     `kafka:"min=v3,max=v4"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeGroups }
+
+func (r *Request) Group() string {
+	return r.Groups[0]
+}
+
+func (r *Request) Split(cluster protocol.Cluster) (
+	[]protocol.Message,
+	protocol.Merger,
+	error,
+) {
+	messages := []protocol.Message{}
+
+	// Split requests by group since they'll need to go to different coordinators.
+	for _, group := range r.Groups {
+		messages = append(
+			messages,
+			&Request{
+				Groups:                      []string{group},
+				IncludeAuthorizedOperations: r.IncludeAuthorizedOperations,
+			},
+		)
+	}
+
+	return messages, new(Response), nil
+}
+
+type Response struct {
+	ThrottleTimeMs int32           `kafka:"min=v1,max=v4"`
+	Groups         []ResponseGroup `kafka:"min=v0,max=v4"`
+}
+
+type ResponseGroup struct {
+	ErrorCode            int16                 `kafka:"min=v0,max=v4"`
+	GroupID              string                `kafka:"min=v0,max=v4"`
+	GroupState           string                `kafka:"min=v0,max=v4"`
+	ProtocolType         string                `kafka:"min=v0,max=v4"`
+	ProtocolData         string                `kafka:"min=v0,max=v4"`
+	Members              []ResponseGroupMember `kafka:"min=v0,max=v4"`
+	AuthorizedOperations int32                 `kafka:"min=v3,max=v4"`
+}
+
+type ResponseGroupMember struct {
+	MemberID         string `kafka:"min=v0,max=v4"`
+	GroupInstanceID  string `kafka:"min=v4,max=v4,nullable"`
+	ClientID         string `kafka:"min=v0,max=v4"`
+	ClientHost       string `kafka:"min=v0,max=v4"`
+	MemberMetadata   []byte `kafka:"min=v0,max=v4"`
+	MemberAssignment []byte `kafka:"min=v0,max=v4"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeGroups }
+
+func (r *Response) Merge(requests []protocol.Message, results []interface{}) (
+	protocol.Message,
+	error,
+) {
+	response := &Response{}
+
+	for _, result := range results {
+		m, err := protocol.Result(result)
+		if err != nil {
+			return nil, err
+		}
+		response.Groups = append(response.Groups, m.(*Response).Groups...)
+	}
+
+	return response, nil
+}
diff -pruN 0.2.1-1.1/protocol/describeuserscramcredentials/describeuserscramcredentials.go 0.4.49+ds1-1/protocol/describeuserscramcredentials/describeuserscramcredentials.go
--- 0.2.1-1.1/protocol/describeuserscramcredentials/describeuserscramcredentials.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describeuserscramcredentials/describeuserscramcredentials.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,64 @@
+package describeuserscramcredentials
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	Users []RequestUser `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeUserScramCredentials }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type RequestUser struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	Name string `kafka:"min=v0,max=v0,compact"`
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	ThrottleTimeMs int32            `kafka:"min=v0,max=v0"`
+	ErrorCode      int16            `kafka:"min=v0,max=v0"`
+	ErrorMessage   string           `kafka:"min=v0,max=v0,nullable"`
+	Results        []ResponseResult `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeUserScramCredentials }
+
+type ResponseResult struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	User            string           `kafka:"min=v0,max=v0,compact"`
+	ErrorCode       int16            `kafka:"min=v0,max=v0"`
+	ErrorMessage    string           `kafka:"min=v0,max=v0,nullable"`
+	CredentialInfos []CredentialInfo `kafka:"min=v0,max=v0"`
+}
+
+type CredentialInfo struct {
+	// We need at least one tagged field to indicate that v2+ uses "flexible"
+	// messages.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	Mechanism  int8  `kafka:"min=v0,max=v0"`
+	Iterations int32 `kafka:"min=v0,max=v0"`
+}
+
+var _ protocol.BrokerMessage = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/describeuserscramcredentials/describeuserscramcredentials_test.go 0.4.49+ds1-1/protocol/describeuserscramcredentials/describeuserscramcredentials_test.go
--- 0.2.1-1.1/protocol/describeuserscramcredentials/describeuserscramcredentials_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/describeuserscramcredentials/describeuserscramcredentials_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,41 @@
+package describeuserscramcredentials_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/describeuserscramcredentials"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+)
+
+func TestDescribeUserScramCredentialsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &describeuserscramcredentials.Request{
+		Users: []describeuserscramcredentials.RequestUser{
+			{
+				Name: "foo-1",
+			},
+		},
+	})
+}
+
+func TestDescribeUserScramCredentialsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &describeuserscramcredentials.Response{
+		ThrottleTimeMs: 500,
+		Results: []describeuserscramcredentials.ResponseResult{
+			{
+				User:         "foo",
+				ErrorCode:    1,
+				ErrorMessage: "foo-error",
+				CredentialInfos: []describeuserscramcredentials.CredentialInfo{
+					{
+						Mechanism:  2,
+						Iterations: 15000,
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/electleaders/electleaders.go 0.4.49+ds1-1/protocol/electleaders/electleaders.go
--- 0.2.1-1.1/protocol/electleaders/electleaders.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/electleaders/electleaders.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,44 @@
+package electleaders
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ElectLeaders
+type Request struct {
+	ElectionType    int8                     `kafka:"min=v1,max=v1"`
+	TopicPartitions []RequestTopicPartitions `kafka:"min=v0,max=v1"`
+	TimeoutMs       int32                    `kafka:"min=v0,max=v1"`
+}
+
+type RequestTopicPartitions struct {
+	Topic        string  `kafka:"min=v0,max=v1"`
+	PartitionIDs []int32 `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.ElectLeaders }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type Response struct {
+	ThrottleTime           int32                           `kafka:"min=v0,max=v1"`
+	ErrorCode              int16                           `kafka:"min=v1,max=v1"`
+	ReplicaElectionResults []ResponseReplicaElectionResult `kafka:"min=v0,max=v1"`
+}
+
+type ResponseReplicaElectionResult struct {
+	Topic            string                    `kafka:"min=v0,max=v1"`
+	PartitionResults []ResponsePartitionResult `kafka:"min=v0,max=v1"`
+}
+
+type ResponsePartitionResult struct {
+	PartitionID  int32  `kafka:"min=v0,max=v1"`
+	ErrorCode    int16  `kafka:"min=v0,max=v1"`
+	ErrorMessage string `kafka:"min=v0,max=v1,nullable"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.ElectLeaders }
diff -pruN 0.2.1-1.1/protocol/electleaders/electleaders_test.go 0.4.49+ds1-1/protocol/electleaders/electleaders_test.go
--- 0.2.1-1.1/protocol/electleaders/electleaders_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/electleaders/electleaders_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,67 @@
+package electleaders_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/electleaders"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+)
+
+func TestElectLeadersRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &electleaders.Request{
+		TimeoutMs: 500,
+		TopicPartitions: []electleaders.RequestTopicPartitions{
+			{
+				Topic:        "foo",
+				PartitionIDs: []int32{100, 101, 102},
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v1, &electleaders.Request{
+		ElectionType: 1,
+		TimeoutMs:    500,
+		TopicPartitions: []electleaders.RequestTopicPartitions{
+			{
+				Topic:        "foo",
+				PartitionIDs: []int32{100, 101, 102},
+			},
+		},
+	})
+}
+
+func TestElectLeadersResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &electleaders.Response{
+		ThrottleTime: 500,
+		ReplicaElectionResults: []electleaders.ResponseReplicaElectionResult{
+			{
+				Topic: "foo",
+				PartitionResults: []electleaders.ResponsePartitionResult{
+					{PartitionID: 100, ErrorCode: 0, ErrorMessage: ""},
+					{PartitionID: 101, ErrorCode: 0, ErrorMessage: ""},
+					{PartitionID: 102, ErrorCode: 0, ErrorMessage: ""},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &electleaders.Response{
+		ThrottleTime: 500,
+		ErrorCode:    1,
+		ReplicaElectionResults: []electleaders.ResponseReplicaElectionResult{
+			{
+				Topic: "foo",
+				PartitionResults: []electleaders.ResponsePartitionResult{
+					{PartitionID: 100, ErrorCode: 0, ErrorMessage: ""},
+					{PartitionID: 101, ErrorCode: 0, ErrorMessage: ""},
+					{PartitionID: 102, ErrorCode: 0, ErrorMessage: ""},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/encode.go 0.4.49+ds1-1/protocol/encode.go
--- 0.2.1-1.1/protocol/encode.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/encode.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,606 @@
+package protocol
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"math"
+	"reflect"
+	"sync"
+	"sync/atomic"
+)
+
+type encoder struct {
+	writer io.Writer
+	err    error
+	table  *crc32.Table
+	crc32  uint32
+	buffer [32]byte
+}
+
+type encoderChecksum struct {
+	reader  io.Reader
+	encoder *encoder
+}
+
+func (e *encoderChecksum) Read(b []byte) (int, error) {
+	n, err := e.reader.Read(b)
+	if n > 0 {
+		e.encoder.update(b[:n])
+	}
+	return n, err
+}
+
+func (e *encoder) Reset(w io.Writer) {
+	e.writer = w
+	e.err = nil
+	e.table = nil
+	e.crc32 = 0
+	e.buffer = [32]byte{}
+}
+
+func (e *encoder) ReadFrom(r io.Reader) (int64, error) {
+	if e.table != nil {
+		r = &encoderChecksum{
+			reader:  r,
+			encoder: e,
+		}
+	}
+	return io.Copy(e.writer, r)
+}
+
+func (e *encoder) Write(b []byte) (int, error) {
+	if e.err != nil {
+		return 0, e.err
+	}
+	n, err := e.writer.Write(b)
+	if n > 0 {
+		e.update(b[:n])
+	}
+	if err != nil {
+		e.err = err
+	}
+	return n, err
+}
+
+func (e *encoder) WriteByte(b byte) error {
+	e.buffer[0] = b
+	_, err := e.Write(e.buffer[:1])
+	return err
+}
+
+func (e *encoder) WriteString(s string) (int, error) {
+	// This implementation is an optimization to avoid the heap allocation that
+	// would occur when converting the string to a []byte to call crc32.Update.
+	//
+	// Strings are rarely long in the kafka protocol, so the use of a 32 byte
+	// buffer is a good comprise between keeping the encoder value small and
+	// limiting the number of calls to Write.
+	//
+	// We introduced this optimization because memory profiles on the benchmarks
+	// showed that most heap allocations were caused by this code path.
+	n := 0
+
+	for len(s) != 0 {
+		c := copy(e.buffer[:], s)
+		w, err := e.Write(e.buffer[:c])
+		n += w
+		if err != nil {
+			return n, err
+		}
+		s = s[c:]
+	}
+
+	return n, nil
+}
+
+func (e *encoder) setCRC(table *crc32.Table) {
+	e.table, e.crc32 = table, 0
+}
+
+func (e *encoder) update(b []byte) {
+	if e.table != nil {
+		e.crc32 = crc32.Update(e.crc32, e.table, b)
+	}
+}
+
+func (e *encoder) encodeBool(v value) {
+	b := int8(0)
+	if v.bool() {
+		b = 1
+	}
+	e.writeInt8(b)
+}
+
+func (e *encoder) encodeInt8(v value) {
+	e.writeInt8(v.int8())
+}
+
+func (e *encoder) encodeInt16(v value) {
+	e.writeInt16(v.int16())
+}
+
+func (e *encoder) encodeInt32(v value) {
+	e.writeInt32(v.int32())
+}
+
+func (e *encoder) encodeInt64(v value) {
+	e.writeInt64(v.int64())
+}
+
+func (e *encoder) encodeFloat64(v value) {
+	e.writeFloat64(v.float64())
+}
+
+func (e *encoder) encodeString(v value) {
+	e.writeString(v.string())
+}
+
+func (e *encoder) encodeCompactString(v value) {
+	e.writeCompactString(v.string())
+}
+
+func (e *encoder) encodeNullString(v value) {
+	e.writeNullString(v.string())
+}
+
+func (e *encoder) encodeCompactNullString(v value) {
+	e.writeCompactNullString(v.string())
+}
+
+func (e *encoder) encodeBytes(v value) {
+	e.writeBytes(v.bytes())
+}
+
+func (e *encoder) encodeCompactBytes(v value) {
+	e.writeCompactBytes(v.bytes())
+}
+
+func (e *encoder) encodeNullBytes(v value) {
+	e.writeNullBytes(v.bytes())
+}
+
+func (e *encoder) encodeCompactNullBytes(v value) {
+	e.writeCompactNullBytes(v.bytes())
+}
+
+func (e *encoder) encodeArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
+	a := v.array(elemType)
+	n := a.length()
+	e.writeInt32(int32(n))
+
+	for i := 0; i < n; i++ {
+		encodeElem(e, a.index(i))
+	}
+}
+
+func (e *encoder) encodeCompactArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
+	a := v.array(elemType)
+	n := a.length()
+	e.writeUnsignedVarInt(uint64(n + 1))
+
+	for i := 0; i < n; i++ {
+		encodeElem(e, a.index(i))
+	}
+}
+
+func (e *encoder) encodeNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
+	a := v.array(elemType)
+	if a.isNil() {
+		e.writeInt32(-1)
+		return
+	}
+
+	n := a.length()
+	e.writeInt32(int32(n))
+
+	for i := 0; i < n; i++ {
+		encodeElem(e, a.index(i))
+	}
+}
+
+func (e *encoder) encodeCompactNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
+	a := v.array(elemType)
+	if a.isNil() {
+		e.writeUnsignedVarInt(0)
+		return
+	}
+
+	n := a.length()
+	e.writeUnsignedVarInt(uint64(n + 1))
+	for i := 0; i < n; i++ {
+		encodeElem(e, a.index(i))
+	}
+}
+
+func (e *encoder) writeInt8(i int8) {
+	writeInt8(e.buffer[:1], i)
+	e.Write(e.buffer[:1])
+}
+
+func (e *encoder) writeInt16(i int16) {
+	writeInt16(e.buffer[:2], i)
+	e.Write(e.buffer[:2])
+}
+
+func (e *encoder) writeInt32(i int32) {
+	writeInt32(e.buffer[:4], i)
+	e.Write(e.buffer[:4])
+}
+
+func (e *encoder) writeInt64(i int64) {
+	writeInt64(e.buffer[:8], i)
+	e.Write(e.buffer[:8])
+}
+
+func (e *encoder) writeFloat64(f float64) {
+	writeFloat64(e.buffer[:8], f)
+	e.Write(e.buffer[:8])
+}
+
+func (e *encoder) writeString(s string) {
+	e.writeInt16(int16(len(s)))
+	e.WriteString(s)
+}
+
+func (e *encoder) writeVarString(s string) {
+	e.writeVarInt(int64(len(s)))
+	e.WriteString(s)
+}
+
+func (e *encoder) writeCompactString(s string) {
+	e.writeUnsignedVarInt(uint64(len(s)) + 1)
+	e.WriteString(s)
+}
+
+func (e *encoder) writeNullString(s string) {
+	if s == "" {
+		e.writeInt16(-1)
+	} else {
+		e.writeInt16(int16(len(s)))
+		e.WriteString(s)
+	}
+}
+
+func (e *encoder) writeCompactNullString(s string) {
+	if s == "" {
+		e.writeUnsignedVarInt(0)
+	} else {
+		e.writeUnsignedVarInt(uint64(len(s)) + 1)
+		e.WriteString(s)
+	}
+}
+
+func (e *encoder) writeBytes(b []byte) {
+	e.writeInt32(int32(len(b)))
+	e.Write(b)
+}
+
+func (e *encoder) writeCompactBytes(b []byte) {
+	e.writeUnsignedVarInt(uint64(len(b)) + 1)
+	e.Write(b)
+}
+
+func (e *encoder) writeNullBytes(b []byte) {
+	if b == nil {
+		e.writeInt32(-1)
+	} else {
+		e.writeInt32(int32(len(b)))
+		e.Write(b)
+	}
+}
+
+func (e *encoder) writeVarNullBytes(b []byte) {
+	if b == nil {
+		e.writeVarInt(-1)
+	} else {
+		e.writeVarInt(int64(len(b)))
+		e.Write(b)
+	}
+}
+
+func (e *encoder) writeCompactNullBytes(b []byte) {
+	if b == nil {
+		e.writeUnsignedVarInt(0)
+	} else {
+		e.writeUnsignedVarInt(uint64(len(b)) + 1)
+		e.Write(b)
+	}
+}
+
+func (e *encoder) writeNullBytesFrom(b Bytes) error {
+	if b == nil {
+		e.writeInt32(-1)
+		return nil
+	} else {
+		size := int64(b.Len())
+		e.writeInt32(int32(size))
+		n, err := io.Copy(e, b)
+		if err == nil && n != size {
+			err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF)
+		}
+		return err
+	}
+}
+
+func (e *encoder) writeVarNullBytesFrom(b Bytes) error {
+	if b == nil {
+		e.writeVarInt(-1)
+		return nil
+	} else {
+		size := int64(b.Len())
+		e.writeVarInt(size)
+		n, err := io.Copy(e, b)
+		if err == nil && n != size {
+			err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF)
+		}
+		return err
+	}
+}
+
+func (e *encoder) writeVarInt(i int64) {
+	e.writeUnsignedVarInt(uint64((i << 1) ^ (i >> 63)))
+}
+
+func (e *encoder) writeUnsignedVarInt(i uint64) {
+	b := e.buffer[:]
+	n := 0
+
+	for i >= 0x80 && n < len(b) {
+		b[n] = byte(i) | 0x80
+		i >>= 7
+		n++
+	}
+
+	if n < len(b) {
+		b[n] = byte(i)
+		n++
+	}
+
+	e.Write(b[:n])
+}
+
+type encodeFunc func(*encoder, value)
+
+var (
+	_ io.ReaderFrom   = (*encoder)(nil)
+	_ io.Writer       = (*encoder)(nil)
+	_ io.ByteWriter   = (*encoder)(nil)
+	_ io.StringWriter = (*encoder)(nil)
+
+	writerTo = reflect.TypeOf((*io.WriterTo)(nil)).Elem()
+)
+
+func encodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc {
+	if reflect.PtrTo(typ).Implements(writerTo) {
+		return writerEncodeFuncOf(typ)
+	}
+	switch typ.Kind() {
+	case reflect.Bool:
+		return (*encoder).encodeBool
+	case reflect.Int8:
+		return (*encoder).encodeInt8
+	case reflect.Int16:
+		return (*encoder).encodeInt16
+	case reflect.Int32:
+		return (*encoder).encodeInt32
+	case reflect.Int64:
+		return (*encoder).encodeInt64
+	case reflect.Float64:
+		return (*encoder).encodeFloat64
+	case reflect.String:
+		return stringEncodeFuncOf(flexible, tag)
+	case reflect.Struct:
+		return structEncodeFuncOf(typ, version, flexible)
+	case reflect.Slice:
+		if typ.Elem().Kind() == reflect.Uint8 { // []byte
+			return bytesEncodeFuncOf(flexible, tag)
+		}
+		return arrayEncodeFuncOf(typ, version, flexible, tag)
+	default:
+		panic("unsupported type: " + typ.String())
+	}
+}
+
+func stringEncodeFuncOf(flexible bool, tag structTag) encodeFunc {
+	switch {
+	case flexible && tag.Nullable:
+		// In flexible messages, all strings are compact
+		return (*encoder).encodeCompactNullString
+	case flexible:
+		// In flexible messages, all strings are compact
+		return (*encoder).encodeCompactString
+	case tag.Nullable:
+		return (*encoder).encodeNullString
+	default:
+		return (*encoder).encodeString
+	}
+}
+
+func bytesEncodeFuncOf(flexible bool, tag structTag) encodeFunc {
+	switch {
+	case flexible && tag.Nullable:
+		// In flexible messages, all arrays are compact
+		return (*encoder).encodeCompactNullBytes
+	case flexible:
+		// In flexible messages, all arrays are compact
+		return (*encoder).encodeCompactBytes
+	case tag.Nullable:
+		return (*encoder).encodeNullBytes
+	default:
+		return (*encoder).encodeBytes
+	}
+}
+
+func structEncodeFuncOf(typ reflect.Type, version int16, flexible bool) encodeFunc {
+	type field struct {
+		encode encodeFunc
+		index  index
+		tagID  int
+	}
+
+	var fields []field
+	var taggedFields []field
+
+	forEachStructField(typ, func(typ reflect.Type, index index, tag string) {
+		if typ.Size() != 0 { // skip struct{}
+			forEachStructTag(tag, func(tag structTag) bool {
+				if tag.MinVersion <= version && version <= tag.MaxVersion {
+					f := field{
+						encode: encodeFuncOf(typ, version, flexible, tag),
+						index:  index,
+						tagID:  tag.TagID,
+					}
+
+					if tag.TagID < -1 {
+						// Normal required field
+						fields = append(fields, f)
+					} else {
+						// Optional tagged field (flexible messages only)
+						taggedFields = append(taggedFields, f)
+					}
+					return false
+				}
+				return true
+			})
+		}
+	})
+
+	return func(e *encoder, v value) {
+		for i := range fields {
+			f := &fields[i]
+			f.encode(e, v.fieldByIndex(f.index))
+		}
+
+		if flexible {
+			// See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
+			// for details of tag buffers in "flexible" messages.
+			e.writeUnsignedVarInt(uint64(len(taggedFields)))
+
+			for i := range taggedFields {
+				f := &taggedFields[i]
+				e.writeUnsignedVarInt(uint64(f.tagID))
+
+				buf := &bytes.Buffer{}
+				se := &encoder{writer: buf}
+				f.encode(se, v.fieldByIndex(f.index))
+				e.writeUnsignedVarInt(uint64(buf.Len()))
+				e.Write(buf.Bytes())
+			}
+		}
+	}
+}
+
+func arrayEncodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc {
+	elemType := typ.Elem()
+	elemFunc := encodeFuncOf(elemType, version, flexible, tag)
+	switch {
+	case flexible && tag.Nullable:
+		// In flexible messages, all arrays are compact
+		return func(e *encoder, v value) { e.encodeCompactNullArray(v, elemType, elemFunc) }
+	case flexible:
+		// In flexible messages, all arrays are compact
+		return func(e *encoder, v value) { e.encodeCompactArray(v, elemType, elemFunc) }
+	case tag.Nullable:
+		return func(e *encoder, v value) { e.encodeNullArray(v, elemType, elemFunc) }
+	default:
+		return func(e *encoder, v value) { e.encodeArray(v, elemType, elemFunc) }
+	}
+}
+
+func writerEncodeFuncOf(typ reflect.Type) encodeFunc {
+	typ = reflect.PtrTo(typ)
+	return func(e *encoder, v value) {
+		// Optimization to write directly into the buffer when the encoder
+		// does no need to compute a crc32 checksum.
+		w := io.Writer(e)
+		if e.table == nil {
+			w = e.writer
+		}
+		_, err := v.iface(typ).(io.WriterTo).WriteTo(w)
+		if err != nil {
+			e.err = err
+		}
+	}
+}
+
+func writeInt8(b []byte, i int8) {
+	b[0] = byte(i)
+}
+
+func writeInt16(b []byte, i int16) {
+	binary.BigEndian.PutUint16(b, uint16(i))
+}
+
+func writeInt32(b []byte, i int32) {
+	binary.BigEndian.PutUint32(b, uint32(i))
+}
+
+func writeInt64(b []byte, i int64) {
+	binary.BigEndian.PutUint64(b, uint64(i))
+}
+
+func writeFloat64(b []byte, f float64) {
+	binary.BigEndian.PutUint64(b, math.Float64bits(f))
+}
+
+func Marshal(version int16, value interface{}) ([]byte, error) {
+	typ := typeOf(value)
+	cache, _ := marshalers.Load().(map[versionedType]encodeFunc)
+	key := versionedType{typ: typ, version: version}
+	encode := cache[key]
+
+	if encode == nil {
+		encode = encodeFuncOf(reflect.TypeOf(value), version, false, structTag{
+			MinVersion: -1,
+			MaxVersion: -1,
+			TagID:      -2,
+			Compact:    true,
+			Nullable:   true,
+		})
+
+		newCache := make(map[versionedType]encodeFunc, len(cache)+1)
+		newCache[key] = encode
+
+		for typ, fun := range cache {
+			newCache[typ] = fun
+		}
+
+		marshalers.Store(newCache)
+	}
+
+	e, _ := encoders.Get().(*encoder)
+	if e == nil {
+		e = &encoder{writer: new(bytes.Buffer)}
+	}
+
+	b, _ := e.writer.(*bytes.Buffer)
+	defer func() {
+		b.Reset()
+		e.Reset(b)
+		encoders.Put(e)
+	}()
+
+	encode(e, nonAddressableValueOf(value))
+
+	if e.err != nil {
+		return nil, e.err
+	}
+
+	buf := b.Bytes()
+	out := make([]byte, len(buf))
+	copy(out, buf)
+	return out, nil
+}
+
+type versionedType struct {
+	typ     _type
+	version int16
+}
+
+var (
+	encoders   sync.Pool    // *encoder
+	marshalers atomic.Value // map[versionedType]encodeFunc
+)
diff -pruN 0.2.1-1.1/protocol/endtxn/endtxn.go 0.4.49+ds1-1/protocol/endtxn/endtxn.go
--- 0.2.1-1.1/protocol/endtxn/endtxn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/endtxn/endtxn.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,35 @@
+package endtxn
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	ProducerID      int64  `kafka:"min=v0,max=v3"`
+	ProducerEpoch   int16  `kafka:"min=v0,max=v3"`
+	Committed       bool   `kafka:"min=v0,max=v3"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.EndTxn }
+
+func (r *Request) Transaction() string { return r.TransactionalID }
+
+var _ protocol.TransactionalMessage = (*Request)(nil)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	ThrottleTimeMs int32 `kafka:"min=v0,max=v3"`
+	ErrorCode      int16 `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.EndTxn }
diff -pruN 0.2.1-1.1/protocol/endtxn/endtxn_test.go 0.4.49+ds1-1/protocol/endtxn/endtxn_test.go
--- 0.2.1-1.1/protocol/endtxn/endtxn_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/endtxn/endtxn_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,28 @@
+package endtxn_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/endtxn"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestEndTxnRequest(t *testing.T) {
+	for _, version := range []int16{0, 1, 2, 3} {
+		prototest.TestRequest(t, version, &endtxn.Request{
+			TransactionalID: "transactional-id-1",
+			ProducerID:      1,
+			ProducerEpoch:   100,
+			Committed:       false,
+		})
+	}
+}
+
+func TestEndTxnResponse(t *testing.T) {
+	for _, version := range []int16{0, 1, 2, 3} {
+		prototest.TestResponse(t, version, &endtxn.Response{
+			ThrottleTimeMs: 1000,
+			ErrorCode:      4,
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/error.go 0.4.49+ds1-1/protocol/error.go
--- 0.2.1-1.1/protocol/error.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/error.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,91 @@
+package protocol
+
+import (
+	"fmt"
+)
+
+// Error represents client-side protocol errors.
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+func Errorf(msg string, args ...interface{}) Error {
+	return Error(fmt.Sprintf(msg, args...))
+}
+
+const (
+	// ErrNoTopic is returned when a request needs to be sent to a specific.
+	ErrNoTopic Error = "topic not found"
+
+	// ErrNoPartition is returned when a request needs to be sent to a specific
+	// partition, but the client did not find it in the cluster metadata.
+	ErrNoPartition Error = "topic partition not found"
+
+	// ErrNoLeader is returned when a request needs to be sent to a partition
+	// leader, but the client could not determine what the leader was at this
+	// time.
+	ErrNoLeader Error = "topic partition has no leader"
+
+	// ErrNoRecord is returned when attempting to write a message containing an
+	// empty record set (which kafka forbids).
+	//
+	// We handle this case client-side because kafka will close the connection
+	// that it received an empty produce request on, causing all concurrent
+	// requests to be aborted.
+	ErrNoRecord Error = "record set contains no records"
+
+	// ErrNoReset is returned by ResetRecordReader when the record reader does
+	// not support being reset.
+	ErrNoReset Error = "record sequence does not support reset"
+)
+
+type TopicError struct {
+	Topic string
+	Err   error
+}
+
+func NewTopicError(topic string, err error) *TopicError {
+	return &TopicError{Topic: topic, Err: err}
+}
+
+func NewErrNoTopic(topic string) *TopicError {
+	return NewTopicError(topic, ErrNoTopic)
+}
+
+func (e *TopicError) Error() string {
+	return fmt.Sprintf("%v (topic=%q)", e.Err, e.Topic)
+}
+
+func (e *TopicError) Unwrap() error {
+	return e.Err
+}
+
+type TopicPartitionError struct {
+	Topic     string
+	Partition int32
+	Err       error
+}
+
+func NewTopicPartitionError(topic string, partition int32, err error) *TopicPartitionError {
+	return &TopicPartitionError{
+		Topic:     topic,
+		Partition: partition,
+		Err:       err,
+	}
+}
+
+func NewErrNoPartition(topic string, partition int32) *TopicPartitionError {
+	return NewTopicPartitionError(topic, partition, ErrNoPartition)
+}
+
+func NewErrNoLeader(topic string, partition int32) *TopicPartitionError {
+	return NewTopicPartitionError(topic, partition, ErrNoLeader)
+}
+
+func (e *TopicPartitionError) Error() string {
+	return fmt.Sprintf("%v (topic=%q partition=%d)", e.Err, e.Topic, e.Partition)
+}
+
+func (e *TopicPartitionError) Unwrap() error {
+	return e.Err
+}
diff -pruN 0.2.1-1.1/protocol/fetch/fetch.go 0.4.49+ds1-1/protocol/fetch/fetch.go
--- 0.2.1-1.1/protocol/fetch/fetch.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/fetch/fetch.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,126 @@
+package fetch
+
+import (
+	"fmt"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	ReplicaID       int32                   `kafka:"min=v0,max=v11"`
+	MaxWaitTime     int32                   `kafka:"min=v0,max=v11"`
+	MinBytes        int32                   `kafka:"min=v0,max=v11"`
+	MaxBytes        int32                   `kafka:"min=v3,max=v11"`
+	IsolationLevel  int8                    `kafka:"min=v4,max=v11"`
+	SessionID       int32                   `kafka:"min=v7,max=v11"`
+	SessionEpoch    int32                   `kafka:"min=v7,max=v11"`
+	Topics          []RequestTopic          `kafka:"min=v0,max=v11"`
+	ForgottenTopics []RequestForgottenTopic `kafka:"min=v7,max=v11"`
+	RackID          string                  `kafka:"min=v11,max=v11"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.Fetch }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	broker := protocol.Broker{ID: -1}
+
+	for i := range r.Topics {
+		t := &r.Topics[i]
+
+		topic, ok := cluster.Topics[t.Topic]
+		if !ok {
+			return broker, NewError(protocol.NewErrNoTopic(t.Topic))
+		}
+
+		for j := range t.Partitions {
+			p := &t.Partitions[j]
+
+			partition, ok := topic.Partitions[p.Partition]
+			if !ok {
+				return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition))
+			}
+
+			if b, ok := cluster.Brokers[partition.Leader]; !ok {
+				return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition))
+			} else if broker.ID < 0 {
+				broker = b
+			} else if b.ID != broker.ID {
+				return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID))
+			}
+		}
+	}
+
+	return broker, nil
+}
+
+type RequestTopic struct {
+	Topic      string             `kafka:"min=v0,max=v11"`
+	Partitions []RequestPartition `kafka:"min=v0,max=v11"`
+}
+
+type RequestPartition struct {
+	Partition          int32 `kafka:"min=v0,max=v11"`
+	CurrentLeaderEpoch int32 `kafka:"min=v9,max=v11"`
+	FetchOffset        int64 `kafka:"min=v0,max=v11"`
+	LogStartOffset     int64 `kafka:"min=v5,max=v11"`
+	PartitionMaxBytes  int32 `kafka:"min=v0,max=v11"`
+}
+
+type RequestForgottenTopic struct {
+	Topic      string  `kafka:"min=v7,max=v11"`
+	Partitions []int32 `kafka:"min=v7,max=v11"`
+}
+
+type Response struct {
+	ThrottleTimeMs int32           `kafka:"min=v1,max=v11"`
+	ErrorCode      int16           `kafka:"min=v7,max=v11"`
+	SessionID      int32           `kafka:"min=v7,max=v11"`
+	Topics         []ResponseTopic `kafka:"min=v0,max=v11"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.Fetch }
+
+type ResponseTopic struct {
+	Topic      string              `kafka:"min=v0,max=v11"`
+	Partitions []ResponsePartition `kafka:"min=v0,max=v11"`
+}
+
+type ResponsePartition struct {
+	Partition            int32                 `kafka:"min=v0,max=v11"`
+	ErrorCode            int16                 `kafka:"min=v0,max=v11"`
+	HighWatermark        int64                 `kafka:"min=v0,max=v11"`
+	LastStableOffset     int64                 `kafka:"min=v4,max=v11"`
+	LogStartOffset       int64                 `kafka:"min=v5,max=v11"`
+	AbortedTransactions  []ResponseTransaction `kafka:"min=v4,max=v11"`
+	PreferredReadReplica int32                 `kafka:"min=v11,max=v11"`
+	RecordSet            protocol.RecordSet    `kafka:"min=v0,max=v11"`
+}
+
+type ResponseTransaction struct {
+	ProducerID  int64 `kafka:"min=v4,max=v11"`
+	FirstOffset int64 `kafka:"min=v4,max=v11"`
+}
+
+var (
+	_ protocol.BrokerMessage = (*Request)(nil)
+)
+
+type Error struct {
+	Err error
+}
+
+func NewError(err error) *Error {
+	return &Error{Err: err}
+}
+
+func (e *Error) Error() string {
+	return fmt.Sprintf("fetch request error: %v", e.Err)
+}
+
+func (e *Error) Unwrap() error {
+	return e.Err
+}
diff -pruN 0.2.1-1.1/protocol/fetch/fetch_test.go 0.4.49+ds1-1/protocol/fetch/fetch_test.go
--- 0.2.1-1.1/protocol/fetch/fetch_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/fetch/fetch_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,147 @@
+package fetch_test
+
+import (
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/fetch"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0  = 0
+	v11 = 11
+)
+
+func TestFetchRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &fetch.Request{
+		ReplicaID:   -1,
+		MaxWaitTime: 500,
+		MinBytes:    1024,
+		Topics: []fetch.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []fetch.RequestPartition{
+					{
+						Partition:         1,
+						FetchOffset:       2,
+						PartitionMaxBytes: 1024,
+					},
+				},
+			},
+		},
+	})
+}
+
+func TestFetchResponse(t *testing.T) {
+	t0 := time.Now().Truncate(time.Millisecond)
+	t1 := t0.Add(1 * time.Millisecond)
+	t2 := t0.Add(2 * time.Millisecond)
+
+	prototest.TestResponse(t, v0, &fetch.Response{
+		Topics: []fetch.ResponseTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []fetch.ResponsePartition{
+					{
+						Partition:     1,
+						HighWatermark: 1000,
+						RecordSet: protocol.RecordSet{
+							Version: 1,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+
+	headers := []protocol.Header{
+		{Key: "key-1", Value: []byte("value-1")},
+		{Key: "key-2", Value: []byte("value-2")},
+		{Key: "key-3", Value: []byte("value-3")},
+	}
+
+	prototest.TestResponse(t, v11, &fetch.Response{
+		Topics: []fetch.ResponseTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []fetch.ResponsePartition{
+					{
+						Partition:     1,
+						HighWatermark: 1000,
+						RecordSet: protocol.RecordSet{
+							Version: 2,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+}
+
+func BenchmarkFetchResponse(b *testing.B) {
+	t0 := time.Now().Truncate(time.Millisecond)
+	t1 := t0.Add(1 * time.Millisecond)
+	t2 := t0.Add(2 * time.Millisecond)
+
+	prototest.BenchmarkResponse(b, v0, &fetch.Response{
+		Topics: []fetch.ResponseTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []fetch.ResponsePartition{
+					{
+						Partition:     1,
+						HighWatermark: 1000,
+						RecordSet: protocol.RecordSet{
+							Version: 1,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+
+	headers := []protocol.Header{
+		{Key: "key-1", Value: []byte("value-1")},
+		{Key: "key-2", Value: []byte("value-2")},
+		{Key: "key-3", Value: []byte("value-3")},
+	}
+
+	prototest.BenchmarkResponse(b, v11, &fetch.Response{
+		Topics: []fetch.ResponseTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []fetch.ResponsePartition{
+					{
+						Partition:     1,
+						HighWatermark: 1000,
+						RecordSet: protocol.RecordSet{
+							Version: 2,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/findcoordinator/findcoordinator.go 0.4.49+ds1-1/protocol/findcoordinator/findcoordinator.go
--- 0.2.1-1.1/protocol/findcoordinator/findcoordinator.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/findcoordinator/findcoordinator.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,25 @@
+package findcoordinator
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	Key     string `kafka:"min=v0,max=v2"`
+	KeyType int8   `kafka:"min=v1,max=v2"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.FindCoordinator }
+
+type Response struct {
+	ThrottleTimeMs int32  `kafka:"min=v1,max=v2"`
+	ErrorCode      int16  `kafka:"min=v0,max=v2"`
+	ErrorMessage   string `kafka:"min=v1,max=v2,nullable"`
+	NodeID         int32  `kafka:"min=v0,max=v2"`
+	Host           string `kafka:"min=v0,max=v2"`
+	Port           int32  `kafka:"min=v0,max=v2"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.FindCoordinator }
diff -pruN 0.2.1-1.1/protocol/heartbeat/heartbeat.go 0.4.49+ds1-1/protocol/heartbeat/heartbeat.go
--- 0.2.1-1.1/protocol/heartbeat/heartbeat.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/heartbeat/heartbeat.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,36 @@
+package heartbeat
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_Heartbeat
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v4,tag"`
+
+	GroupID         string `kafka:"min=v0,max=v4"`
+	GenerationID    int32  `kafka:"min=v0,max=v4"`
+	MemberID        string `kafka:"min=v0,max=v4"`
+	GroupInstanceID string `kafka:"min=v3,max=v4,nullable"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey {
+	return protocol.Heartbeat
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v4,tag"`
+
+	ErrorCode      int16 `kafka:"min=v0,max=v4"`
+	ThrottleTimeMs int32 `kafka:"min=v1,max=v4"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey {
+	return protocol.Heartbeat
+}
diff -pruN 0.2.1-1.1/protocol/heartbeat/heartbeat_test.go 0.4.49+ds1-1/protocol/heartbeat/heartbeat_test.go
--- 0.2.1-1.1/protocol/heartbeat/heartbeat_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/heartbeat/heartbeat_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,44 @@
+package heartbeat_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/heartbeat"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestHeartbeatRequest(t *testing.T) {
+	// Versions 0-3 have all the same fields.
+	for _, version := range []int16{0, 1, 2, 3} {
+		prototest.TestRequest(t, version, &heartbeat.Request{
+			GroupID:      "group-1",
+			GenerationID: 1,
+			MemberID:     "member-1",
+		})
+	}
+
+	for _, version := range []int16{4} {
+		prototest.TestRequest(t, version, &heartbeat.Request{
+			GroupID:         "group-2",
+			GenerationID:    10,
+			MemberID:        "member-2",
+			GroupInstanceID: "instace-1",
+		})
+	}
+}
+
+func TestHeartbeatResponse(t *testing.T) {
+	for _, version := range []int16{0} {
+		prototest.TestResponse(t, version, &heartbeat.Response{
+			ErrorCode: 4,
+		})
+	}
+
+	// Versions 1-4 have all the same fields.
+	for _, version := range []int16{1, 2, 3, 4} {
+		prototest.TestResponse(t, version, &heartbeat.Response{
+			ErrorCode:      4,
+			ThrottleTimeMs: 10,
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/incrementalalterconfigs/incrementalalterconfigs.go 0.4.49+ds1-1/protocol/incrementalalterconfigs/incrementalalterconfigs.go
--- 0.2.1-1.1/protocol/incrementalalterconfigs/incrementalalterconfigs.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/incrementalalterconfigs/incrementalalterconfigs.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,79 @@
+package incrementalalterconfigs
+
+import (
+	"errors"
+	"strconv"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+const (
+	resourceTypeBroker int8 = 4
+)
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_IncrementalAlterConfigs
+type Request struct {
+	Resources    []RequestResource `kafka:"min=v0,max=v0"`
+	ValidateOnly bool              `kafka:"min=v0,max=v0"`
+}
+
+type RequestResource struct {
+	ResourceType int8            `kafka:"min=v0,max=v0"`
+	ResourceName string          `kafka:"min=v0,max=v0"`
+	Configs      []RequestConfig `kafka:"min=v0,max=v0"`
+}
+
+type RequestConfig struct {
+	Name            string `kafka:"min=v0,max=v0"`
+	ConfigOperation int8   `kafka:"min=v0,max=v0"`
+	Value           string `kafka:"min=v0,max=v0,nullable"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.IncrementalAlterConfigs }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	// Check that at most only one broker is being updated.
+	//
+	// TODO: Support updating multiple brokers in a single request.
+	brokers := map[string]struct{}{}
+	for _, resource := range r.Resources {
+		if resource.ResourceType == resourceTypeBroker {
+			brokers[resource.ResourceName] = struct{}{}
+		}
+	}
+	if len(brokers) > 1 {
+		return protocol.Broker{},
+			errors.New("Updating more than one broker in a single request is not supported yet")
+	}
+
+	for _, resource := range r.Resources {
+		if resource.ResourceType == resourceTypeBroker {
+			brokerID, err := strconv.Atoi(resource.ResourceName)
+			if err != nil {
+				return protocol.Broker{}, err
+			}
+
+			return cluster.Brokers[int32(brokerID)], nil
+		}
+	}
+
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type Response struct {
+	ThrottleTimeMs int32                   `kafka:"min=v0,max=v0"`
+	Responses      []ResponseAlterResponse `kafka:"min=v0,max=v0"`
+}
+
+type ResponseAlterResponse struct {
+	ErrorCode    int16  `kafka:"min=v0,max=v0"`
+	ErrorMessage string `kafka:"min=v0,max=v0,nullable"`
+	ResourceType int8   `kafka:"min=v0,max=v0"`
+	ResourceName string `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.IncrementalAlterConfigs }
diff -pruN 0.2.1-1.1/protocol/incrementalalterconfigs/incrementalalterconfigs_test.go 0.4.49+ds1-1/protocol/incrementalalterconfigs/incrementalalterconfigs_test.go
--- 0.2.1-1.1/protocol/incrementalalterconfigs/incrementalalterconfigs_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/incrementalalterconfigs/incrementalalterconfigs_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,130 @@
+package incrementalalterconfigs_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/incrementalalterconfigs"
+)
+
+const (
+	resourceTypeTopic  int8 = 2
+	resourceTypeBroker int8 = 4
+)
+
+func TestMetadataRequestBroker(t *testing.T) {
+	req := &incrementalalterconfigs.Request{
+		Resources: []incrementalalterconfigs.RequestResource{
+			{
+				ResourceType: resourceTypeBroker,
+				ResourceName: "1",
+				Configs: []incrementalalterconfigs.RequestConfig{
+					{
+						Name:  "test-name1",
+						Value: "test-value1",
+					},
+				},
+			},
+			{
+				ResourceType: resourceTypeBroker,
+				ResourceName: "1",
+				Configs: []incrementalalterconfigs.RequestConfig{
+					{
+						Name:  "test-name2",
+						Value: "test-value2",
+					},
+				},
+			},
+			{
+				ResourceType: resourceTypeTopic,
+				ResourceName: "test-topic1",
+				Configs: []incrementalalterconfigs.RequestConfig{
+					{
+						Name:  "test-name3",
+						Value: "test-value3",
+					},
+				},
+			},
+			{
+				ResourceType: resourceTypeTopic,
+				ResourceName: "test-topic2",
+				Configs: []incrementalalterconfigs.RequestConfig{
+					{
+						Name:  "test-name4",
+						Value: "test-value4",
+					},
+				},
+			},
+		},
+	}
+	b, err := req.Broker(protocol.Cluster{
+		Brokers: map[int32]protocol.Broker{
+			0: {
+				ID: 0,
+			},
+			1: {
+				ID: 1,
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(
+			"Unexpected error getting request broker",
+			"expected", nil,
+			"got", err,
+		)
+	}
+	if b.ID != 1 {
+		t.Error(
+			"Unexpected id returned for request broker",
+			"expected", 1,
+			"got", b.ID,
+		)
+	}
+
+	req = &incrementalalterconfigs.Request{
+		Resources: []incrementalalterconfigs.RequestResource{
+			{
+				ResourceType: resourceTypeBroker,
+				ResourceName: "1",
+				Configs: []incrementalalterconfigs.RequestConfig{
+					{
+						Name:  "test-name1",
+						Value: "test-value1",
+					},
+				},
+			},
+			{
+				ResourceType: resourceTypeBroker,
+				ResourceName: "2",
+				Configs: []incrementalalterconfigs.RequestConfig{
+					{
+						Name:  "test-name2",
+						Value: "test-value2",
+					},
+				},
+			},
+		},
+	}
+
+	_, err = req.Broker(protocol.Cluster{
+		Brokers: map[int32]protocol.Broker{
+			0: {
+				ID: 0,
+			},
+			1: {
+				ID: 1,
+			},
+			2: {
+				ID: 1,
+			},
+		},
+	})
+	if err == nil {
+		t.Fatal(
+			"Unexpected error getting request broker",
+			"expected", "non-nil",
+			"got", err,
+		)
+	}
+}
diff -pruN 0.2.1-1.1/protocol/initproducerid/initproducerid.go 0.4.49+ds1-1/protocol/initproducerid/initproducerid.go
--- 0.2.1-1.1/protocol/initproducerid/initproducerid.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/initproducerid/initproducerid.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,37 @@
+package initproducerid
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v2,max=v4,tag"`
+
+	TransactionalID      string `kafka:"min=v0,max=v4,nullable"`
+	TransactionTimeoutMs int32  `kafka:"min=v0,max=v4"`
+	ProducerID           int64  `kafka:"min=v3,max=v4"`
+	ProducerEpoch        int16  `kafka:"min=v3,max=v4"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.InitProducerId }
+
+func (r *Request) Transaction() string { return r.TransactionalID }
+
+var _ protocol.TransactionalMessage = (*Request)(nil)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v2,max=v4,tag"`
+
+	ThrottleTimeMs int32 `kafka:"min=v0,max=v4"`
+	ErrorCode      int16 `kafka:"min=v0,max=v4"`
+	ProducerID     int64 `kafka:"min=v0,max=v4"`
+	ProducerEpoch  int16 `kafka:"min=v0,max=v4"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.InitProducerId }
diff -pruN 0.2.1-1.1/protocol/initproducerid/initproducerid_test.go 0.4.49+ds1-1/protocol/initproducerid/initproducerid_test.go
--- 0.2.1-1.1/protocol/initproducerid/initproducerid_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/initproducerid/initproducerid_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,40 @@
+package initproducerid_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/initproducerid"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestInitProducerIDRequest(t *testing.T) {
+	for _, version := range []int16{0, 1, 2} {
+		prototest.TestRequest(t, version, &initproducerid.Request{
+			TransactionalID:      "transactional-id-0",
+			TransactionTimeoutMs: 1000,
+		})
+	}
+
+	// Version 2 added:
+	// ProducerID
+	// ProducerEpoch
+	for _, version := range []int16{3, 4} {
+		prototest.TestRequest(t, version, &initproducerid.Request{
+			TransactionalID:      "transactional-id-0",
+			TransactionTimeoutMs: 1000,
+			ProducerID:           10,
+			ProducerEpoch:        5,
+		})
+	}
+}
+
+func TestInitProducerIDResponse(t *testing.T) {
+	for _, version := range []int16{0, 1, 2, 3, 4} {
+		prototest.TestResponse(t, version, &initproducerid.Response{
+			ThrottleTimeMs: 1000,
+			ErrorCode:      9,
+			ProducerID:     10,
+			ProducerEpoch:  1000,
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/joingroup/joingroup.go 0.4.49+ds1-1/protocol/joingroup/joingroup.go
--- 0.2.1-1.1/protocol/joingroup/joingroup.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/joingroup/joingroup.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,67 @@
+package joingroup
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v6,max=v7,tag"`
+
+	GroupID            string            `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+	SessionTimeoutMS   int32             `kafka:"min=v0,max=v7"`
+	RebalanceTimeoutMS int32             `kafka:"min=v1,max=v7"`
+	MemberID           string            `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+	GroupInstanceID    string            `kafka:"min=v5,max=v5,nullable|min=v6,max=v7,compact,nullable"`
+	ProtocolType       string            `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+	Protocols          []RequestProtocol `kafka:"min=v0,max=v7"`
+}
+
+type RequestProtocol struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v6,max=v7,tag"`
+
+	Name     string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+	Metadata []byte `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey {
+	return protocol.JoinGroup
+}
+
+func (r *Request) Group() string { return r.GroupID }
+
+var _ protocol.GroupMessage = (*Request)(nil)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v6,max=v7,tag"`
+
+	ThrottleTimeMS int32            `kafka:"min=v2,max=v7"`
+	ErrorCode      int16            `kafka:"min=v0,max=v7"`
+	GenerationID   int32            `kafka:"min=v0,max=v7"`
+	ProtocolType   string           `kafka:"min=v7,max=v7,compact,nullable"`
+	ProtocolName   string           `kafka:"min=v0,max=v5|min=v6,max=v6,compact|min=v7,max=v7,compact,nullable"`
+	LeaderID       string           `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+	MemberID       string           `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+	Members        []ResponseMember `kafka:"min=v0,max=v7"`
+}
+
+type ResponseMember struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v6,max=v7,tag"`
+
+	MemberID        string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+	GroupInstanceID string `kafka:"min=v5,max=v5,nullable|min=v6,max=v7,nullable,compact"`
+	Metadata        []byte `kafka:"min=v0,max=v5|min=v6,max=v7,compact"`
+}
+
+type ResponseMemberMetadata struct{}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.JoinGroup }
diff -pruN 0.2.1-1.1/protocol/joingroup/joingroup_test.go 0.4.49+ds1-1/protocol/joingroup/joingroup_test.go
--- 0.2.1-1.1/protocol/joingroup/joingroup_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/joingroup/joingroup_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,140 @@
+package joingroup_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/joingroup"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestJoinGroupReq(t *testing.T) {
+	for _, version := range []int16{0} {
+		prototest.TestRequest(t, version, &joingroup.Request{
+			GroupID:          "group-id",
+			SessionTimeoutMS: 10,
+			MemberID:         "member-id",
+			ProtocolType:     "protocol-type",
+			Protocols: []joingroup.RequestProtocol{
+				{
+					Name:     "protocol-1",
+					Metadata: []byte{0, 1, 2, 3, 4},
+				},
+			},
+		})
+	}
+
+	// Version 1 added
+	// RebalanceTimeoutMS
+	for _, version := range []int16{1, 2, 3, 4} {
+		prototest.TestRequest(t, version, &joingroup.Request{
+			GroupID:            "group-id",
+			SessionTimeoutMS:   10,
+			RebalanceTimeoutMS: 10,
+			MemberID:           "member-id",
+			ProtocolType:       "protocol-type",
+			Protocols: []joingroup.RequestProtocol{
+				{
+					Name:     "protocol-1",
+					Metadata: []byte{0, 1, 2, 3, 4},
+				},
+			},
+		})
+	}
+
+	// Version 5 added
+	// GroupInstanceID
+	for _, version := range []int16{5, 6, 7} {
+		prototest.TestRequest(t, version, &joingroup.Request{
+			GroupID:            "group-id",
+			SessionTimeoutMS:   10,
+			RebalanceTimeoutMS: 10,
+			MemberID:           "member-id",
+			ProtocolType:       "protocol-type",
+			GroupInstanceID:    "group-instance-id",
+			Protocols: []joingroup.RequestProtocol{
+				{
+					Name:     "protocol-1",
+					Metadata: []byte{0, 1, 2, 3, 4},
+				},
+			},
+		})
+	}
+}
+
+func TestJoinGroupResp(t *testing.T) {
+	for _, version := range []int16{0, 1} {
+		prototest.TestResponse(t, version, &joingroup.Response{
+			ErrorCode:    10,
+			GenerationID: 10,
+			ProtocolName: "protocol-name",
+			LeaderID:     "leader",
+			MemberID:     "member-id-1",
+			Members: []joingroup.ResponseMember{
+				{
+					MemberID: "member-id-2",
+					Metadata: []byte{0, 1, 2, 3, 4},
+				},
+			},
+		})
+	}
+
+	// Version 2 added
+	// ThrottleTimeMS
+	for _, version := range []int16{2, 3, 4} {
+		prototest.TestResponse(t, version, &joingroup.Response{
+			ErrorCode:      10,
+			GenerationID:   10,
+			ThrottleTimeMS: 100,
+			ProtocolName:   "protocol-name",
+			LeaderID:       "leader",
+			MemberID:       "member-id-1",
+			Members: []joingroup.ResponseMember{
+				{
+					MemberID: "member-id-2",
+					Metadata: []byte{0, 1, 2, 3, 4},
+				},
+			},
+		})
+	}
+
+	// Version 5 added
+	// ResponseMember.GroupInstanceID
+	for _, version := range []int16{5, 6} {
+		prototest.TestResponse(t, version, &joingroup.Response{
+			ErrorCode:      10,
+			GenerationID:   10,
+			ThrottleTimeMS: 100,
+			ProtocolName:   "protocol-name",
+			LeaderID:       "leader",
+			MemberID:       "member-id-1",
+			Members: []joingroup.ResponseMember{
+				{
+					MemberID:        "member-id-2",
+					Metadata:        []byte{0, 1, 2, 3, 4},
+					GroupInstanceID: "group-instance-id",
+				},
+			},
+		})
+	}
+
+	// Version 7 added
+	// ProtocolType
+	for _, version := range []int16{7} {
+		prototest.TestResponse(t, version, &joingroup.Response{
+			ErrorCode:      10,
+			GenerationID:   10,
+			ThrottleTimeMS: 100,
+			ProtocolName:   "protocol-name",
+			ProtocolType:   "protocol-type",
+			LeaderID:       "leader",
+			MemberID:       "member-id-1",
+			Members: []joingroup.ResponseMember{
+				{
+					MemberID:        "member-id-2",
+					Metadata:        []byte{0, 1, 2, 3, 4},
+					GroupInstanceID: "group-instance-id",
+				},
+			},
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/leavegroup/leavegroup.go 0.4.49+ds1-1/protocol/leavegroup/leavegroup.go
--- 0.2.1-1.1/protocol/leavegroup/leavegroup.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/leavegroup/leavegroup.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,65 @@
+package leavegroup
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v4,tag"`
+
+	GroupID  string          `kafka:"min=v0,max=v2|min=v3,max=v4,compact"`
+	MemberID string          `kafka:"min=v0,max=v2"`
+	Members  []RequestMember `kafka:"min=v3,max=v4"`
+}
+
+func (r *Request) Prepare(apiVersion int16) {
+	if apiVersion < 3 {
+		if len(r.Members) > 0 {
+			r.MemberID = r.Members[0].MemberID
+		}
+	}
+}
+
+type RequestMember struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v4,tag"`
+
+	MemberID        string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"`
+	GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.LeaveGroup }
+
+func (r *Request) Group() string { return r.GroupID }
+
+var (
+	_ protocol.GroupMessage    = (*Request)(nil)
+	_ protocol.PreparedMessage = (*Request)(nil)
+)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v4,tag"`
+
+	ErrorCode      int16            `kafka:"min=v0,max=v4"`
+	ThrottleTimeMS int32            `kafka:"min=v1,max=v4"`
+	Members        []ResponseMember `kafka:"min=v3,max=v4"`
+}
+
+type ResponseMember struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v4,tag"`
+
+	MemberID        string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"`
+	GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"`
+	ErrorCode       int16  `kafka:"min=v3,max=v4"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.LeaveGroup }
diff -pruN 0.2.1-1.1/protocol/leavegroup/leavegroup_test.go 0.4.49+ds1-1/protocol/leavegroup/leavegroup_test.go
--- 0.2.1-1.1/protocol/leavegroup/leavegroup_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/leavegroup/leavegroup_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,66 @@
+package leavegroup_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/leavegroup"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestLeaveGroupReq(t *testing.T) {
+	for _, version := range []int16{0, 1, 2} {
+		prototest.TestRequest(t, version, &leavegroup.Request{
+			GroupID:  "group-id",
+			MemberID: "member-id",
+		})
+	}
+
+	// Version 3 added
+	// Members
+	// removed
+	// MemberID
+	for _, version := range []int16{3, 4} {
+		prototest.TestRequest(t, version, &leavegroup.Request{
+			GroupID: "group-id",
+			Members: []leavegroup.RequestMember{
+				{
+					MemberID:        "member-id-1",
+					GroupInstanceID: "group-instance-id",
+				},
+			},
+		})
+	}
+}
+
+func TestLeaveGroupResp(t *testing.T) {
+	for _, version := range []int16{0} {
+		prototest.TestResponse(t, version, &leavegroup.Response{
+			ErrorCode: 10,
+		})
+	}
+
+	// Version 1 added
+	// ThrottleTimeMS
+	for _, version := range []int16{1, 2} {
+		prototest.TestResponse(t, version, &leavegroup.Response{
+			ErrorCode:      10,
+			ThrottleTimeMS: 100,
+		})
+	}
+
+	// Version 3 added
+	// Members
+	for _, version := range []int16{3, 4} {
+		prototest.TestResponse(t, version, &leavegroup.Response{
+			ErrorCode:      10,
+			ThrottleTimeMS: 100,
+			Members: []leavegroup.ResponseMember{
+				{
+					MemberID:        "member-id-1",
+					GroupInstanceID: "group-instance-id",
+					ErrorCode:       10,
+				},
+			},
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/listgroups/listgroups.go 0.4.49+ds1-1/protocol/listgroups/listgroups.go
--- 0.2.1-1.1/protocol/listgroups/listgroups.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/listgroups/listgroups.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,82 @@
+package listgroups
+
+import (
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListGroups
+type Request struct {
+	_        struct{} `kafka:"min=v0,max=v2"`
+	brokerID int32
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListGroups }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[r.brokerID], nil
+}
+
+func (r *Request) Split(cluster protocol.Cluster) (
+	[]protocol.Message,
+	protocol.Merger,
+	error,
+) {
+	messages := []protocol.Message{}
+
+	for _, broker := range cluster.Brokers {
+		messages = append(messages, &Request{brokerID: broker.ID})
+	}
+
+	return messages, new(Response), nil
+}
+
+type Response struct {
+	ThrottleTimeMs int32           `kafka:"min=v1,max=v2"`
+	ErrorCode      int16           `kafka:"min=v0,max=v2"`
+	Groups         []ResponseGroup `kafka:"min=v0,max=v2"`
+}
+
+type ResponseGroup struct {
+	GroupID      string `kafka:"min=v0,max=v2"`
+	ProtocolType string `kafka:"min=v0,max=v2"`
+
+	// Use this to store which broker returned the response
+	BrokerID int32 `kafka:"-"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListGroups }
+
+func (r *Response) Merge(requests []protocol.Message, results []interface{}) (
+	protocol.Message,
+	error,
+) {
+	response := &Response{}
+
+	for r, result := range results {
+		m, err := protocol.Result(result)
+		if err != nil {
+			return nil, err
+		}
+		brokerResp := m.(*Response)
+		respGroups := []ResponseGroup{}
+
+		for _, brokerResp := range brokerResp.Groups {
+			respGroups = append(
+				respGroups,
+				ResponseGroup{
+					GroupID:      brokerResp.GroupID,
+					ProtocolType: brokerResp.ProtocolType,
+					BrokerID:     requests[r].(*Request).brokerID,
+				},
+			)
+		}
+
+		response.Groups = append(response.Groups, respGroups...)
+	}
+
+	return response, nil
+}
diff -pruN 0.2.1-1.1/protocol/listoffsets/listoffsets.go 0.4.49+ds1-1/protocol/listoffsets/listoffsets.go
--- 0.2.1-1.1/protocol/listoffsets/listoffsets.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/listoffsets/listoffsets.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,230 @@
+package listoffsets
+
+import (
+	"sort"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	ReplicaID      int32          `kafka:"min=v1,max=v5"`
+	IsolationLevel int8           `kafka:"min=v2,max=v5"`
+	Topics         []RequestTopic `kafka:"min=v1,max=v5"`
+}
+
+type RequestTopic struct {
+	Topic      string             `kafka:"min=v1,max=v5"`
+	Partitions []RequestPartition `kafka:"min=v1,max=v5"`
+}
+
+type RequestPartition struct {
+	Partition          int32 `kafka:"min=v1,max=v5"`
+	CurrentLeaderEpoch int32 `kafka:"min=v4,max=v5"`
+	Timestamp          int64 `kafka:"min=v1,max=v5"`
+	// v0 of the API predates kafka 0.10, and doesn't make much sense to
+	// use so we chose not to support it. It had this extra field to limit
+	// the number of offsets returned, which has been removed in v1.
+	//
+	// MaxNumOffsets int32 `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListOffsets }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	// Expects r to be a request that was returned by Map, will likely panic
+	// or produce the wrong result if that's not the case.
+	partition := r.Topics[0].Partitions[0].Partition
+	topic := r.Topics[0].Topic
+
+	for _, p := range cluster.Topics[topic].Partitions {
+		if p.ID == partition {
+			return cluster.Brokers[p.Leader], nil
+		}
+	}
+
+	return protocol.Broker{ID: -1}, nil
+}
+
+func (r *Request) Split(cluster protocol.Cluster) ([]protocol.Message, protocol.Merger, error) {
+	// Because kafka refuses to answer ListOffsets requests containing multiple
+	// entries of unique topic/partition pairs, we submit multiple requests on
+	// the wire and merge their results back.
+	//
+	// ListOffsets requests also need to be sent to partition leaders, to keep
+	// the logic simple we simply split each offset request into a single
+	// message. This may cause a bit more requests to be sent on the wire but
+	// it keeps the code sane, we can still optimize the aggregation mechanism
+	// later if it becomes a problem.
+	//
+	// Really the idea here is to shield applications from having to deal with
+	// the limitation of the kafka server, so they can request any combinations
+	// of topic/partition/offsets.
+	requests := make([]Request, 0, 2*len(r.Topics))
+
+	for _, t := range r.Topics {
+		for _, p := range t.Partitions {
+			requests = append(requests, Request{
+				ReplicaID:      r.ReplicaID,
+				IsolationLevel: r.IsolationLevel,
+				Topics: []RequestTopic{{
+					Topic: t.Topic,
+					Partitions: []RequestPartition{{
+						Partition:          p.Partition,
+						CurrentLeaderEpoch: p.CurrentLeaderEpoch,
+						Timestamp:          p.Timestamp,
+					}},
+				}},
+			})
+		}
+	}
+
+	messages := make([]protocol.Message, len(requests))
+
+	for i := range requests {
+		messages[i] = &requests[i]
+	}
+
+	return messages, new(Response), nil
+}
+
+type Response struct {
+	ThrottleTimeMs int32           `kafka:"min=v2,max=v5"`
+	Topics         []ResponseTopic `kafka:"min=v1,max=v5"`
+}
+
+type ResponseTopic struct {
+	Topic      string              `kafka:"min=v1,max=v5"`
+	Partitions []ResponsePartition `kafka:"min=v1,max=v5"`
+}
+
+type ResponsePartition struct {
+	Partition   int32 `kafka:"min=v1,max=v5"`
+	ErrorCode   int16 `kafka:"min=v1,max=v5"`
+	Timestamp   int64 `kafka:"min=v1,max=v5"`
+	Offset      int64 `kafka:"min=v1,max=v5"`
+	LeaderEpoch int32 `kafka:"min=v4,max=v5"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListOffsets }
+
+func (r *Response) Merge(requests []protocol.Message, results []interface{}) (protocol.Message, error) {
+	type topicPartition struct {
+		topic     string
+		partition int32
+	}
+
+	// Kafka doesn't always return the timestamp in the response, for example
+	// when the request sends -2 (for the first offset) it always returns -1,
+	// probably to indicate that the timestamp is unknown. This means that we
+	// can't correlate the requests and responses based on their timestamps,
+	// the primary key is the topic/partition pair.
+	//
+	// To make the API a bit friendly, we reconstructing an index of topic
+	// partitions to the timestamps that were requested, and override the
+	// timestamp value in the response.
+	timestamps := make([]map[topicPartition]int64, len(requests))
+
+	for i, m := range requests {
+		req := m.(*Request)
+		ts := make(map[topicPartition]int64, len(req.Topics))
+
+		for _, t := range req.Topics {
+			for _, p := range t.Partitions {
+				ts[topicPartition{
+					topic:     t.Topic,
+					partition: p.Partition,
+				}] = p.Timestamp
+			}
+		}
+
+		timestamps[i] = ts
+	}
+
+	topics := make(map[string][]ResponsePartition)
+	errors := 0
+
+	for i, res := range results {
+		m, err := protocol.Result(res)
+		if err != nil {
+			for _, t := range requests[i].(*Request).Topics {
+				partitions := topics[t.Topic]
+
+				for _, p := range t.Partitions {
+					partitions = append(partitions, ResponsePartition{
+						Partition:   p.Partition,
+						ErrorCode:   -1, // UNKNOWN, can we do better?
+						Timestamp:   -1,
+						Offset:      -1,
+						LeaderEpoch: -1,
+					})
+				}
+
+				topics[t.Topic] = partitions
+			}
+			errors++
+			continue
+		}
+
+		response := m.(*Response)
+
+		if r.ThrottleTimeMs < response.ThrottleTimeMs {
+			r.ThrottleTimeMs = response.ThrottleTimeMs
+		}
+
+		for _, t := range response.Topics {
+			for _, p := range t.Partitions {
+				if timestamp, ok := timestamps[i][topicPartition{
+					topic:     t.Topic,
+					partition: p.Partition,
+				}]; ok {
+					p.Timestamp = timestamp
+				}
+				topics[t.Topic] = append(topics[t.Topic], p)
+			}
+		}
+
+	}
+
+	if errors > 0 && errors == len(results) {
+		_, err := protocol.Result(results[0])
+		return nil, err
+	}
+
+	r.Topics = make([]ResponseTopic, 0, len(topics))
+
+	for topicName, partitions := range topics {
+		r.Topics = append(r.Topics, ResponseTopic{
+			Topic:      topicName,
+			Partitions: partitions,
+		})
+	}
+
+	sort.Slice(r.Topics, func(i, j int) bool {
+		return r.Topics[i].Topic < r.Topics[j].Topic
+	})
+
+	for _, t := range r.Topics {
+		sort.Slice(t.Partitions, func(i, j int) bool {
+			p1 := &t.Partitions[i]
+			p2 := &t.Partitions[j]
+
+			if p1.Partition != p2.Partition {
+				return p1.Partition < p2.Partition
+			}
+
+			return p1.Offset < p2.Offset
+		})
+	}
+
+	return r, nil
+}
+
+var (
+	_ protocol.BrokerMessage = (*Request)(nil)
+	_ protocol.Splitter      = (*Request)(nil)
+	_ protocol.Merger        = (*Response)(nil)
+)
diff -pruN 0.2.1-1.1/protocol/listoffsets/listoffsets_test.go 0.4.49+ds1-1/protocol/listoffsets/listoffsets_test.go
--- 0.2.1-1.1/protocol/listoffsets/listoffsets_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/listoffsets/listoffsets_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,104 @@
+package listoffsets_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/listoffsets"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v1 = 1
+	v4 = 4
+)
+
+func TestListOffsetsRequest(t *testing.T) {
+	prototest.TestRequest(t, v1, &listoffsets.Request{
+		ReplicaID: 1,
+		Topics: []listoffsets.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []listoffsets.RequestPartition{
+					{Partition: 0, Timestamp: 1e9},
+					{Partition: 1, Timestamp: 1e9},
+					{Partition: 2, Timestamp: 1e9},
+				},
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v4, &listoffsets.Request{
+		ReplicaID:      1,
+		IsolationLevel: 2,
+		Topics: []listoffsets.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []listoffsets.RequestPartition{
+					{Partition: 0, Timestamp: 1e9},
+					{Partition: 1, Timestamp: 1e9},
+					{Partition: 2, Timestamp: 1e9},
+				},
+			},
+			{
+				Topic: "topic-2",
+				Partitions: []listoffsets.RequestPartition{
+					{Partition: 0, CurrentLeaderEpoch: 10, Timestamp: 1e9},
+					{Partition: 1, CurrentLeaderEpoch: 11, Timestamp: 1e9},
+					{Partition: 2, CurrentLeaderEpoch: 12, Timestamp: 1e9},
+				},
+			},
+		},
+	})
+}
+
+func TestListOffsetsResponse(t *testing.T) {
+	prototest.TestResponse(t, v1, &listoffsets.Response{
+		Topics: []listoffsets.ResponseTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []listoffsets.ResponsePartition{
+					{
+						Partition: 0,
+						ErrorCode: 0,
+						Timestamp: 1e9,
+						Offset:    1234567890,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v4, &listoffsets.Response{
+		ThrottleTimeMs: 1234,
+		Topics: []listoffsets.ResponseTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []listoffsets.ResponsePartition{
+					{
+						Partition:   0,
+						ErrorCode:   0,
+						Timestamp:   1e9,
+						Offset:      1234567890,
+						LeaderEpoch: 10,
+					},
+				},
+			},
+			{
+				Topic: "topic-2",
+				Partitions: []listoffsets.ResponsePartition{
+					{
+						Partition:   0,
+						ErrorCode:   0,
+						Timestamp:   1e9,
+						Offset:      1234567890,
+						LeaderEpoch: 10,
+					},
+					{
+						Partition: 1,
+						ErrorCode: 2,
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/listpartitionreassignments/listpartitionreassignments.go 0.4.49+ds1-1/protocol/listpartitionreassignments/listpartitionreassignments.go
--- 0.2.1-1.1/protocol/listpartitionreassignments/listpartitionreassignments.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/listpartitionreassignments/listpartitionreassignments.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,70 @@
+package listpartitionreassignments
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListPartitionReassignments.
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	TimeoutMs int32          `kafka:"min=v0,max=v0"`
+	Topics    []RequestTopic `kafka:"min=v0,max=v0,nullable"`
+}
+
+type RequestTopic struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	Name             string  `kafka:"min=v0,max=v0"`
+	PartitionIndexes []int32 `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey {
+	return protocol.ListPartitionReassignments
+}
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	return cluster.Brokers[cluster.Controller], nil
+}
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	ThrottleTimeMs int32           `kafka:"min=v0,max=v0"`
+	ErrorCode      int16           `kafka:"min=v0,max=v0"`
+	ErrorMessage   string          `kafka:"min=v0,max=v0,nullable"`
+	Topics         []ResponseTopic `kafka:"min=v0,max=v0"`
+}
+
+type ResponseTopic struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	Name       string              `kafka:"min=v0,max=v0"`
+	Partitions []ResponsePartition `kafka:"min=v0,max=v0"`
+}
+
+type ResponsePartition struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v0,max=v0,tag"`
+
+	PartitionIndex   int32   `kafka:"min=v0,max=v0"`
+	Replicas         []int32 `kafka:"min=v0,max=v0"`
+	AddingReplicas   []int32 `kafka:"min=v0,max=v0"`
+	RemovingReplicas []int32 `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey {
+	return protocol.ListPartitionReassignments
+}
diff -pruN 0.2.1-1.1/protocol/listpartitionreassignments/listpartitionreassignments_test.go 0.4.49+ds1-1/protocol/listpartitionreassignments/listpartitionreassignments_test.go
--- 0.2.1-1.1/protocol/listpartitionreassignments/listpartitionreassignments_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/listpartitionreassignments/listpartitionreassignments_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,41 @@
+package listpartitionreassignments_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/listpartitionreassignments"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+)
+
+func TestListPartitionReassignmentsRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &listpartitionreassignments.Request{
+		Topics: []listpartitionreassignments.RequestTopic{
+			{
+				Name:             "topic-1",
+				PartitionIndexes: []int32{1, 2, 3},
+			},
+		},
+	})
+}
+
+func TestListPartitionReassignmentsResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &listpartitionreassignments.Response{
+		Topics: []listpartitionreassignments.ResponseTopic{
+			{
+				Name: "topic-1",
+				Partitions: []listpartitionreassignments.ResponsePartition{
+					{
+						PartitionIndex:   1,
+						Replicas:         []int32{1, 2, 3},
+						AddingReplicas:   []int32{4, 5, 6},
+						RemovingReplicas: []int32{7, 8, 9},
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/metadata/metadata.go 0.4.49+ds1-1/protocol/metadata/metadata.go
--- 0.2.1-1.1/protocol/metadata/metadata.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/metadata/metadata.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,52 @@
+package metadata
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	TopicNames                         []string `kafka:"min=v0,max=v8,nullable"`
+	AllowAutoTopicCreation             bool     `kafka:"min=v4,max=v8"`
+	IncludeClusterAuthorizedOperations bool     `kafka:"min=v8,max=v8"`
+	IncludeTopicAuthorizedOperations   bool     `kafka:"min=v8,max=v8"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.Metadata }
+
+type Response struct {
+	ThrottleTimeMs              int32            `kafka:"min=v3,max=v8"`
+	Brokers                     []ResponseBroker `kafka:"min=v0,max=v8"`
+	ClusterID                   string           `kafka:"min=v2,max=v8,nullable"`
+	ControllerID                int32            `kafka:"min=v1,max=v8"`
+	Topics                      []ResponseTopic  `kafka:"min=v0,max=v8"`
+	ClusterAuthorizedOperations int32            `kafka:"min=v8,max=v8"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.Metadata }
+
+type ResponseBroker struct {
+	NodeID int32  `kafka:"min=v0,max=v8"`
+	Host   string `kafka:"min=v0,max=v8"`
+	Port   int32  `kafka:"min=v0,max=v8"`
+	Rack   string `kafka:"min=v1,max=v8,nullable"`
+}
+
+type ResponseTopic struct {
+	ErrorCode                 int16               `kafka:"min=v0,max=v8"`
+	Name                      string              `kafka:"min=v0,max=v8"`
+	IsInternal                bool                `kafka:"min=v1,max=v8"`
+	Partitions                []ResponsePartition `kafka:"min=v0,max=v8"`
+	TopicAuthorizedOperations int32               `kafka:"min=v8,max=v8"`
+}
+
+type ResponsePartition struct {
+	ErrorCode       int16   `kafka:"min=v0,max=v8"`
+	PartitionIndex  int32   `kafka:"min=v0,max=v8"`
+	LeaderID        int32   `kafka:"min=v0,max=v8"`
+	LeaderEpoch     int32   `kafka:"min=v7,max=v8"`
+	ReplicaNodes    []int32 `kafka:"min=v0,max=v8"`
+	IsrNodes        []int32 `kafka:"min=v0,max=v8"`
+	OfflineReplicas []int32 `kafka:"min=v5,max=v8"`
+}
diff -pruN 0.2.1-1.1/protocol/metadata/metadata_test.go 0.4.49+ds1-1/protocol/metadata/metadata_test.go
--- 0.2.1-1.1/protocol/metadata/metadata_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/metadata/metadata_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,199 @@
+package metadata_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/metadata"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v1 = 1
+	v4 = 4
+	v8 = 8
+)
+
+func TestMetadataRequest(t *testing.T) {
+	prototest.TestRequest(t, v0, &metadata.Request{
+		TopicNames: nil,
+	})
+
+	prototest.TestRequest(t, v4, &metadata.Request{
+		TopicNames:             []string{"hello", "world"},
+		AllowAutoTopicCreation: true,
+	})
+
+	prototest.TestRequest(t, v8, &metadata.Request{
+		TopicNames:                         []string{"hello", "world"},
+		AllowAutoTopicCreation:             true,
+		IncludeClusterAuthorizedOperations: true,
+		IncludeTopicAuthorizedOperations:   true,
+	})
+}
+
+func TestMetadataResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &metadata.Response{
+		Brokers: []metadata.ResponseBroker{
+			{
+				NodeID: 0,
+				Host:   "127.0.0.1",
+				Port:   9092,
+			},
+			{
+				NodeID: 1,
+				Host:   "127.0.0.1",
+				Port:   9093,
+			},
+		},
+		Topics: []metadata.ResponseTopic{
+			{
+				Name: "topic-1",
+				Partitions: []metadata.ResponsePartition{
+					{
+						PartitionIndex: 0,
+						LeaderID:       1,
+						ReplicaNodes:   []int32{0},
+						IsrNodes:       []int32{1, 0},
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v1, &metadata.Response{
+		ControllerID: 1,
+		Brokers: []metadata.ResponseBroker{
+			{
+				NodeID: 0,
+				Host:   "127.0.0.1",
+				Port:   9092,
+				Rack:   "rack-1",
+			},
+			{
+				NodeID: 1,
+				Host:   "127.0.0.1",
+				Port:   9093,
+				Rack:   "rack-2",
+			},
+		},
+		Topics: []metadata.ResponseTopic{
+			{
+				Name:       "topic-1",
+				IsInternal: true,
+				Partitions: []metadata.ResponsePartition{
+					{
+						PartitionIndex: 0,
+						LeaderID:       1,
+						ReplicaNodes:   []int32{0},
+						IsrNodes:       []int32{1, 0},
+					},
+					{
+						PartitionIndex: 1,
+						LeaderID:       0,
+						ReplicaNodes:   []int32{1},
+						IsrNodes:       []int32{0, 1},
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v8, &metadata.Response{
+		ThrottleTimeMs:              123,
+		ClusterID:                   "test",
+		ControllerID:                1,
+		ClusterAuthorizedOperations: 0x01,
+		Brokers: []metadata.ResponseBroker{
+			{
+				NodeID: 0,
+				Host:   "127.0.0.1",
+				Port:   9092,
+				Rack:   "rack-1",
+			},
+			{
+				NodeID: 1,
+				Host:   "127.0.0.1",
+				Port:   9093,
+				Rack:   "rack-2",
+			},
+		},
+		Topics: []metadata.ResponseTopic{
+			{
+				Name: "topic-1",
+				Partitions: []metadata.ResponsePartition{
+					{
+						PartitionIndex:  0,
+						LeaderID:        1,
+						LeaderEpoch:     1234567890,
+						ReplicaNodes:    []int32{0},
+						IsrNodes:        []int32{0},
+						OfflineReplicas: []int32{1},
+					},
+					{
+						ErrorCode:       1,
+						ReplicaNodes:    []int32{},
+						IsrNodes:        []int32{},
+						OfflineReplicas: []int32{},
+					},
+				},
+				TopicAuthorizedOperations: 0x01,
+			},
+		},
+	})
+}
+
+func BenchmarkMetadataRequest(b *testing.B) {
+	prototest.BenchmarkRequest(b, v8, &metadata.Request{
+		TopicNames:                         []string{"hello", "world"},
+		AllowAutoTopicCreation:             true,
+		IncludeClusterAuthorizedOperations: true,
+		IncludeTopicAuthorizedOperations:   true,
+	})
+}
+
+func BenchmarkMetadataResponse(b *testing.B) {
+	prototest.BenchmarkResponse(b, v8, &metadata.Response{
+		ThrottleTimeMs:              123,
+		ClusterID:                   "test",
+		ControllerID:                1,
+		ClusterAuthorizedOperations: 0x01,
+		Brokers: []metadata.ResponseBroker{
+			{
+				NodeID: 0,
+				Host:   "127.0.0.1",
+				Port:   9092,
+				Rack:   "rack-1",
+			},
+			{
+				NodeID: 1,
+				Host:   "127.0.0.1",
+				Port:   9093,
+				Rack:   "rack-2",
+			},
+		},
+		Topics: []metadata.ResponseTopic{
+			{
+				Name: "topic-1",
+				Partitions: []metadata.ResponsePartition{
+					{
+						PartitionIndex:  0,
+						LeaderID:        1,
+						LeaderEpoch:     1234567890,
+						ReplicaNodes:    []int32{0},
+						IsrNodes:        []int32{0},
+						OfflineReplicas: []int32{1},
+					},
+					{
+						ErrorCode:       1,
+						ReplicaNodes:    []int32{},
+						IsrNodes:        []int32{},
+						OfflineReplicas: []int32{},
+					},
+				},
+				TopicAuthorizedOperations: 0x01,
+			},
+		},
+	})
+
+}
diff -pruN 0.2.1-1.1/protocol/offsetcommit/offsetcommit.go 0.4.49+ds1-1/protocol/offsetcommit/offsetcommit.go
--- 0.2.1-1.1/protocol/offsetcommit/offsetcommit.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/offsetcommit/offsetcommit.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,54 @@
+package offsetcommit
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	GroupID         string         `kafka:"min=v0,max=v7"`
+	GenerationID    int32          `kafka:"min=v1,max=v7"`
+	MemberID        string         `kafka:"min=v1,max=v7"`
+	RetentionTimeMs int64          `kafka:"min=v2,max=v4"`
+	GroupInstanceID string         `kafka:"min=v7,max=v7,nullable"`
+	Topics          []RequestTopic `kafka:"min=v0,max=v7"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetCommit }
+
+func (r *Request) Group() string { return r.GroupID }
+
+type RequestTopic struct {
+	Name       string             `kafka:"min=v0,max=v7"`
+	Partitions []RequestPartition `kafka:"min=v0,max=v7"`
+}
+
+type RequestPartition struct {
+	PartitionIndex       int32  `kafka:"min=v0,max=v7"`
+	CommittedOffset      int64  `kafka:"min=v0,max=v7"`
+	CommitTimestamp      int64  `kafka:"min=v1,max=v1"`
+	CommittedLeaderEpoch int32  `kafka:"min=v6,max=v7"`
+	CommittedMetadata    string `kafka:"min=v0,max=v7,nullable"`
+}
+
+var (
+	_ protocol.GroupMessage = (*Request)(nil)
+)
+
+type Response struct {
+	ThrottleTimeMs int32           `kafka:"min=v3,max=v7"`
+	Topics         []ResponseTopic `kafka:"min=v0,max=v7"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetCommit }
+
+type ResponseTopic struct {
+	Name       string              `kafka:"min=v0,max=v7"`
+	Partitions []ResponsePartition `kafka:"min=v0,max=v7"`
+}
+
+type ResponsePartition struct {
+	PartitionIndex int32 `kafka:"min=v0,max=v7"`
+	ErrorCode      int16 `kafka:"min=v0,max=v7"`
+}
diff -pruN 0.2.1-1.1/protocol/offsetcommit/offsetcommit_test.go 0.4.49+ds1-1/protocol/offsetcommit/offsetcommit_test.go
--- 0.2.1-1.1/protocol/offsetcommit/offsetcommit_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/offsetcommit/offsetcommit_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,220 @@
+package offsetcommit_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/offsetcommit"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestOffsetCommitRequest(t *testing.T) {
+	for _, version := range []int16{0} {
+		prototest.TestRequest(t, version, &offsetcommit.Request{
+			GroupID: "group-0",
+			Topics: []offsetcommit.RequestTopic{
+				{
+					Name: "topic-0",
+					Partitions: []offsetcommit.RequestPartition{
+						{
+							PartitionIndex:    0,
+							CommittedOffset:   1,
+							CommittedMetadata: "meta-0-0",
+						},
+						{
+							PartitionIndex:    1,
+							CommittedOffset:   2,
+							CommittedMetadata: "meta-0-1",
+						},
+					},
+				},
+			},
+		})
+	}
+
+	// Version 1 added:
+	// GenerationID
+	// MemberID
+	// RequestTopic.RequestPartition.CommitTimestamp
+	for _, version := range []int16{1} {
+		prototest.TestRequest(t, version, &offsetcommit.Request{
+			GroupID:      "group-1",
+			GenerationID: 1,
+			MemberID:     "member-1",
+			Topics: []offsetcommit.RequestTopic{
+				{
+					Name: "topic-1",
+					Partitions: []offsetcommit.RequestPartition{
+						{
+							PartitionIndex:    0,
+							CommittedOffset:   1,
+							CommittedMetadata: "meta-1-0",
+							CommitTimestamp:   10,
+						},
+						{
+							PartitionIndex:    1,
+							CommittedOffset:   2,
+							CommittedMetadata: "meta-1-1",
+							CommitTimestamp:   11,
+						},
+					},
+				},
+			},
+		})
+	}
+
+	// Version 2 added:
+	// RetentionTimeMs
+	// Version 2 removed:
+	// RequestTopic.RequestPartition.CommitTimestamp
+	// Fields are the same through version 4.
+	for _, version := range []int16{2, 3, 4} {
+		prototest.TestRequest(t, version, &offsetcommit.Request{
+			GroupID:         "group-2",
+			GenerationID:    1,
+			MemberID:        "member-2",
+			RetentionTimeMs: 1999,
+			Topics: []offsetcommit.RequestTopic{
+				{
+					Name: "topic-2",
+					Partitions: []offsetcommit.RequestPartition{
+						{
+							PartitionIndex:    0,
+							CommittedOffset:   1,
+							CommittedMetadata: "meta-2-0",
+						},
+						{
+							PartitionIndex:    1,
+							CommittedOffset:   2,
+							CommittedMetadata: "meta-2-1",
+						},
+					},
+				},
+			},
+		})
+	}
+
+	// Version 5 removed:
+	// RetentionTimeMs
+	for _, version := range []int16{5} {
+		prototest.TestRequest(t, version, &offsetcommit.Request{
+			GroupID:      "group-3",
+			GenerationID: 1,
+			MemberID:     "member-3",
+			Topics: []offsetcommit.RequestTopic{
+				{
+					Name: "topic-3",
+					Partitions: []offsetcommit.RequestPartition{
+						{
+							PartitionIndex:    0,
+							CommittedOffset:   1,
+							CommittedMetadata: "meta-3-0",
+						},
+						{
+							PartitionIndex:    1,
+							CommittedOffset:   2,
+							CommittedMetadata: "meta-3-1",
+						},
+					},
+				},
+			},
+		})
+	}
+
+	// Version 6 added:
+	// RequestTopic.RequestPartition.CommittedLeaderEpoch
+	for _, version := range []int16{6} {
+		prototest.TestRequest(t, version, &offsetcommit.Request{
+			GroupID:      "group-4",
+			GenerationID: 1,
+			MemberID:     "member-4",
+			Topics: []offsetcommit.RequestTopic{
+				{
+					Name: "topic-4",
+					Partitions: []offsetcommit.RequestPartition{
+						{
+							PartitionIndex:       0,
+							CommittedOffset:      1,
+							CommittedMetadata:    "meta-4-0",
+							CommittedLeaderEpoch: 10,
+						},
+						{
+							PartitionIndex:       1,
+							CommittedOffset:      2,
+							CommittedMetadata:    "meta-4-1",
+							CommittedLeaderEpoch: 11,
+						},
+					},
+				},
+			},
+		})
+	}
+
+	// Version 7 added:
+	// GroupInstanceID
+	for _, version := range []int16{7} {
+		prototest.TestRequest(t, version, &offsetcommit.Request{
+			GroupID:         "group-5",
+			GenerationID:    1,
+			MemberID:        "member-5",
+			GroupInstanceID: "instance-5",
+			Topics: []offsetcommit.RequestTopic{
+				{
+					Name: "topic-4",
+					Partitions: []offsetcommit.RequestPartition{
+						{
+							PartitionIndex:       0,
+							CommittedOffset:      1,
+							CommittedMetadata:    "meta-5-0",
+							CommittedLeaderEpoch: 10,
+						},
+						{
+							PartitionIndex:       1,
+							CommittedOffset:      2,
+							CommittedMetadata:    "meta-5-1",
+							CommittedLeaderEpoch: 11,
+						},
+					},
+				},
+			},
+		})
+	}
+}
+
+func TestOffsetCommitResponse(t *testing.T) {
+	// Fields are the same through version 2.
+	for _, version := range []int16{0, 1, 2} {
+		prototest.TestResponse(t, version, &offsetcommit.Response{
+			Topics: []offsetcommit.ResponseTopic{
+				{
+					Name: "topic-1",
+					Partitions: []offsetcommit.ResponsePartition{
+						{
+							PartitionIndex: 4,
+							ErrorCode:      34,
+						},
+					},
+				},
+			},
+		})
+	}
+
+	// Version 3 added:
+	// ThrottleTimeMs
+	// Field are the same through version 7.
+	for _, version := range []int16{3, 4, 5, 6, 7} {
+		prototest.TestResponse(t, version, &offsetcommit.Response{
+			ThrottleTimeMs: 10000,
+			Topics: []offsetcommit.ResponseTopic{
+				{
+					Name: "topic-2",
+					Partitions: []offsetcommit.ResponsePartition{
+						{
+							PartitionIndex: 2,
+							ErrorCode:      3,
+						},
+					},
+				},
+			},
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/offsetdelete/offsetdelete.go 0.4.49+ds1-1/protocol/offsetdelete/offsetdelete.go
--- 0.2.1-1.1/protocol/offsetdelete/offsetdelete.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/offsetdelete/offsetdelete.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,47 @@
+package offsetdelete
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	GroupID string         `kafka:"min=v0,max=v0"`
+	Topics  []RequestTopic `kafka:"min=v0,max=v0"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetDelete }
+
+func (r *Request) Group() string { return r.GroupID }
+
+type RequestTopic struct {
+	Name       string             `kafka:"min=v0,max=v0"`
+	Partitions []RequestPartition `kafka:"min=v0,max=v0"`
+}
+
+type RequestPartition struct {
+	PartitionIndex int32 `kafka:"min=v0,max=v0"`
+}
+
+var (
+	_ protocol.GroupMessage = (*Request)(nil)
+)
+
+type Response struct {
+	ErrorCode      int16           `kafka:"min=v0,max=v0"`
+	ThrottleTimeMs int32           `kafka:"min=v0,max=v0"`
+	Topics         []ResponseTopic `kafka:"min=v0,max=v0"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetDelete }
+
+type ResponseTopic struct {
+	Name       string              `kafka:"min=v0,max=v0"`
+	Partitions []ResponsePartition `kafka:"min=v0,max=v0"`
+}
+
+type ResponsePartition struct {
+	PartitionIndex int32 `kafka:"min=v0,max=v0"`
+	ErrorCode      int16 `kafka:"min=v0,max=v0"`
+}
diff -pruN 0.2.1-1.1/protocol/offsetdelete/offsetdelete_test.go 0.4.49+ds1-1/protocol/offsetdelete/offsetdelete_test.go
--- 0.2.1-1.1/protocol/offsetdelete/offsetdelete_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/offsetdelete/offsetdelete_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,52 @@
+package offsetdelete_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/offsetdelete"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+func TestOffsetDeleteRequest(t *testing.T) {
+	for _, version := range []int16{0} {
+		prototest.TestRequest(t, version, &offsetdelete.Request{
+			GroupID: "group-0",
+			Topics: []offsetdelete.RequestTopic{
+				{
+					Name: "topic-0",
+					Partitions: []offsetdelete.RequestPartition{
+						{
+							PartitionIndex: 0,
+						},
+						{
+							PartitionIndex: 1,
+						},
+					},
+				},
+			},
+		})
+	}
+}
+
+func TestOffsetDeleteResponse(t *testing.T) {
+	for _, version := range []int16{0} {
+		prototest.TestResponse(t, version, &offsetdelete.Response{
+			ErrorCode: 0,
+			Topics: []offsetdelete.ResponseTopic{
+				{
+					Name: "topic-0",
+					Partitions: []offsetdelete.ResponsePartition{
+						{
+							PartitionIndex: 0,
+							ErrorCode:      1,
+						},
+						{
+							PartitionIndex: 1,
+							ErrorCode:      1,
+						},
+					},
+				},
+			},
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/offsetfetch/offsetfetch.go 0.4.49+ds1-1/protocol/offsetfetch/offsetfetch.go
--- 0.2.1-1.1/protocol/offsetfetch/offsetfetch.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/offsetfetch/offsetfetch.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,46 @@
+package offsetfetch
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	GroupID string         `kafka:"min=v0,max=v5"`
+	Topics  []RequestTopic `kafka:"min=v0,max=v5,nullable"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetFetch }
+
+func (r *Request) Group() string { return r.GroupID }
+
+type RequestTopic struct {
+	Name             string  `kafka:"min=v0,max=v5"`
+	PartitionIndexes []int32 `kafka:"min=v0,max=v5"`
+}
+
+var (
+	_ protocol.GroupMessage = (*Request)(nil)
+)
+
+type Response struct {
+	ThrottleTimeMs int32           `kafka:"min=v3,max=v5"`
+	Topics         []ResponseTopic `kafka:"min=v0,max=v5"`
+	ErrorCode      int16           `kafka:"min=v2,max=v5"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetFetch }
+
+type ResponseTopic struct {
+	Name       string              `kafka:"min=v0,max=v5"`
+	Partitions []ResponsePartition `kafka:"min=v0,max=v5"`
+}
+
+type ResponsePartition struct {
+	PartitionIndex      int32  `kafka:"min=v0,max=v5"`
+	CommittedOffset     int64  `kafka:"min=v0,max=v5"`
+	ComittedLeaderEpoch int32  `kafka:"min=v5,max=v5"`
+	Metadata            string `kafka:"min=v0,max=v5,nullable"`
+	ErrorCode           int16  `kafka:"min=v0,max=v5"`
+}
diff -pruN 0.2.1-1.1/protocol/produce/produce.go 0.4.49+ds1-1/protocol/produce/produce.go
--- 0.2.1-1.1/protocol/produce/produce.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/produce/produce.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,147 @@
+package produce
+
+import (
+	"fmt"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	TransactionalID string         `kafka:"min=v3,max=v8,nullable"`
+	Acks            int16          `kafka:"min=v0,max=v8"`
+	Timeout         int32          `kafka:"min=v0,max=v8"`
+	Topics          []RequestTopic `kafka:"min=v0,max=v8"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.Produce }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	broker := protocol.Broker{ID: -1}
+
+	for i := range r.Topics {
+		t := &r.Topics[i]
+
+		topic, ok := cluster.Topics[t.Topic]
+		if !ok {
+			return broker, NewError(protocol.NewErrNoTopic(t.Topic))
+		}
+
+		for j := range t.Partitions {
+			p := &t.Partitions[j]
+
+			partition, ok := topic.Partitions[p.Partition]
+			if !ok {
+				return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition))
+			}
+
+			if b, ok := cluster.Brokers[partition.Leader]; !ok {
+				return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition))
+			} else if broker.ID < 0 {
+				broker = b
+			} else if b.ID != broker.ID {
+				return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID))
+			}
+		}
+	}
+
+	return broker, nil
+}
+
+func (r *Request) Prepare(apiVersion int16) {
+	// Determine which version of the message should be used, based on which
+	// version of the Produce API is supported by the server.
+	//
+	// In version 0.11, kafka gives this error:
+	//
+	//   org.apache.kafka.common.record.InvalidRecordException
+	//   Produce requests with version 3 are only allowed to contain record batches with magic version.
+	//
+	// In version 2.x, kafka refuses the message claiming that the CRC32
+	// checksum is invalid.
+	var recordVersion int8
+
+	if apiVersion < 3 {
+		recordVersion = 1
+	} else {
+		recordVersion = 2
+	}
+
+	for i := range r.Topics {
+		t := &r.Topics[i]
+
+		for j := range t.Partitions {
+			p := &t.Partitions[j]
+
+			// Allow the program to overload the version if really needed.
+			if p.RecordSet.Version == 0 {
+				p.RecordSet.Version = recordVersion
+			}
+		}
+	}
+}
+
+func (r *Request) HasResponse() bool {
+	return r.Acks != 0
+}
+
+type RequestTopic struct {
+	Topic      string             `kafka:"min=v0,max=v8"`
+	Partitions []RequestPartition `kafka:"min=v0,max=v8"`
+}
+
+type RequestPartition struct {
+	Partition int32              `kafka:"min=v0,max=v8"`
+	RecordSet protocol.RecordSet `kafka:"min=v0,max=v8"`
+}
+
+type Response struct {
+	Topics         []ResponseTopic `kafka:"min=v0,max=v8"`
+	ThrottleTimeMs int32           `kafka:"min=v1,max=v8"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.Produce }
+
+type ResponseTopic struct {
+	Topic      string              `kafka:"min=v0,max=v8"`
+	Partitions []ResponsePartition `kafka:"min=v0,max=v8"`
+}
+
+type ResponsePartition struct {
+	Partition      int32           `kafka:"min=v0,max=v8"`
+	ErrorCode      int16           `kafka:"min=v0,max=v8"`
+	BaseOffset     int64           `kafka:"min=v0,max=v8"`
+	LogAppendTime  int64           `kafka:"min=v2,max=v8"`
+	LogStartOffset int64           `kafka:"min=v5,max=v8"`
+	RecordErrors   []ResponseError `kafka:"min=v8,max=v8"`
+	ErrorMessage   string          `kafka:"min=v8,max=v8,nullable"`
+}
+
+type ResponseError struct {
+	BatchIndex             int32  `kafka:"min=v8,max=v8"`
+	BatchIndexErrorMessage string `kafka:"min=v8,max=v8,nullable"`
+}
+
+var (
+	_ protocol.BrokerMessage   = (*Request)(nil)
+	_ protocol.PreparedMessage = (*Request)(nil)
+)
+
+type Error struct {
+	Err error
+}
+
+func NewError(err error) *Error {
+	return &Error{Err: err}
+}
+
+func (e *Error) Error() string {
+	return fmt.Sprintf("fetch request error: %v", e.Err)
+}
+
+func (e *Error) Unwrap() error {
+	return e.Err
+}
diff -pruN 0.2.1-1.1/protocol/produce/produce_test.go 0.4.49+ds1-1/protocol/produce/produce_test.go
--- 0.2.1-1.1/protocol/produce/produce_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/produce/produce_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,273 @@
+package produce_test
+
+import (
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/produce"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+)
+
+const (
+	v0 = 0
+	v3 = 3
+	v5 = 5
+	v8 = 8
+)
+
+func TestProduceRequest(t *testing.T) {
+	t0 := time.Now().Truncate(time.Millisecond)
+	t1 := t0.Add(1 * time.Millisecond)
+	t2 := t0.Add(2 * time.Millisecond)
+
+	prototest.TestRequest(t, v0, &produce.Request{
+		Acks:    1,
+		Timeout: 500,
+		Topics: []produce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []produce.RequestPartition{
+					{
+						Partition: 0,
+						RecordSet: protocol.RecordSet{
+							Version: 1,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+							),
+						},
+					},
+					{
+						Partition: 1,
+						RecordSet: protocol.RecordSet{
+							Version: 1,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+
+			{
+				Topic: "topic-2",
+				Partitions: []produce.RequestPartition{
+					{
+						Partition: 0,
+						RecordSet: protocol.RecordSet{
+							Version:    1,
+							Attributes: protocol.Gzip,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestRequest(t, v3, &produce.Request{
+		TransactionalID: "1234",
+		Acks:            1,
+		Timeout:         500,
+		Topics: []produce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []produce.RequestPartition{
+					{
+						Partition: 0,
+						RecordSet: protocol.RecordSet{
+							Version: 1,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+							),
+						},
+					},
+					{
+						Partition: 1,
+						RecordSet: protocol.RecordSet{
+							Version: 1,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+
+	headers := []protocol.Header{
+		{Key: "key-1", Value: []byte("value-1")},
+		{Key: "key-2", Value: []byte("value-2")},
+		{Key: "key-3", Value: []byte("value-3")},
+	}
+
+	prototest.TestRequest(t, v5, &produce.Request{
+		TransactionalID: "1234",
+		Acks:            1,
+		Timeout:         500,
+		Topics: []produce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []produce.RequestPartition{
+					{
+						Partition: 1,
+						RecordSet: protocol.RecordSet{
+							Version: 2,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+
+			{
+				Topic: "topic-2",
+				Partitions: []produce.RequestPartition{
+					{
+						Partition: 1,
+						RecordSet: protocol.RecordSet{
+							Version:    2,
+							Attributes: protocol.Snappy,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+}
+
+func TestProduceResponse(t *testing.T) {
+	prototest.TestResponse(t, v0, &produce.Response{
+		Topics: []produce.ResponseTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []produce.ResponsePartition{
+					{
+						Partition:  0,
+						ErrorCode:  0,
+						BaseOffset: 0,
+					},
+					{
+						Partition:  1,
+						ErrorCode:  0,
+						BaseOffset: 42,
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestResponse(t, v8, &produce.Response{
+		Topics: []produce.ResponseTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []produce.ResponsePartition{
+					{
+						Partition:      0,
+						ErrorCode:      0,
+						BaseOffset:     42,
+						LogAppendTime:  1e9,
+						LogStartOffset: 10,
+						RecordErrors:   []produce.ResponseError{},
+					},
+					{
+						Partition: 1,
+						ErrorCode: 1,
+						RecordErrors: []produce.ResponseError{
+							{BatchIndex: 1, BatchIndexErrorMessage: "message-1"},
+							{BatchIndex: 2, BatchIndexErrorMessage: "message-2"},
+							{BatchIndex: 3, BatchIndexErrorMessage: "message-3"},
+						},
+						ErrorMessage: "something went wrong",
+					},
+				},
+			},
+		},
+	})
+}
+
+func BenchmarkProduceRequest(b *testing.B) {
+	t0 := time.Now().Truncate(time.Millisecond)
+	t1 := t0.Add(1 * time.Millisecond)
+	t2 := t0.Add(2 * time.Millisecond)
+
+	prototest.BenchmarkRequest(b, v3, &produce.Request{
+		TransactionalID: "1234",
+		Acks:            1,
+		Timeout:         500,
+		Topics: []produce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []produce.RequestPartition{
+					{
+						Partition: 0,
+						RecordSet: protocol.RecordSet{
+							Version: 1,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+							),
+						},
+					},
+					{
+						Partition: 1,
+						RecordSet: protocol.RecordSet{
+							Version: 1,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+
+	headers := []protocol.Header{
+		{Key: "key-1", Value: []byte("value-1")},
+		{Key: "key-2", Value: []byte("value-2")},
+		{Key: "key-3", Value: []byte("value-3")},
+	}
+
+	prototest.BenchmarkRequest(b, v5, &produce.Request{
+		TransactionalID: "1234",
+		Acks:            1,
+		Timeout:         500,
+		Topics: []produce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []produce.RequestPartition{
+					{
+						Partition: 1,
+						RecordSet: protocol.RecordSet{
+							Version: 2,
+							Records: protocol.NewRecordReader(
+								protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+								protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+								protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+							),
+						},
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/protocol.go 0.4.49+ds1-1/protocol/protocol.go
--- 0.2.1-1.1/protocol/protocol.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/protocol.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,541 @@
+package protocol
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// Message is an interface implemented by all request and response types of the
+// kafka protocol.
+//
+// This interface is used mostly as a safe-guard to provide a compile-time check
+// for values passed to functions dealing kafka message types.
+type Message interface {
+	ApiKey() ApiKey
+}
+
+type ApiKey int16
+
+func (k ApiKey) String() string {
+	if i := int(k); i >= 0 && i < len(apiNames) {
+		return apiNames[i]
+	}
+	return strconv.Itoa(int(k))
+}
+
+func (k ApiKey) MinVersion() int16 { return k.apiType().minVersion() }
+
+func (k ApiKey) MaxVersion() int16 { return k.apiType().maxVersion() }
+
+func (k ApiKey) SelectVersion(minVersion, maxVersion int16) int16 {
+	min := k.MinVersion()
+	max := k.MaxVersion()
+	switch {
+	case min > maxVersion:
+		return min
+	case max < maxVersion:
+		return max
+	default:
+		return maxVersion
+	}
+}
+
+func (k ApiKey) apiType() apiType {
+	if i := int(k); i >= 0 && i < len(apiTypes) {
+		return apiTypes[i]
+	}
+	return apiType{}
+}
+
+const (
+	Produce                      ApiKey = 0
+	Fetch                        ApiKey = 1
+	ListOffsets                  ApiKey = 2
+	Metadata                     ApiKey = 3
+	LeaderAndIsr                 ApiKey = 4
+	StopReplica                  ApiKey = 5
+	UpdateMetadata               ApiKey = 6
+	ControlledShutdown           ApiKey = 7
+	OffsetCommit                 ApiKey = 8
+	OffsetFetch                  ApiKey = 9
+	FindCoordinator              ApiKey = 10
+	JoinGroup                    ApiKey = 11
+	Heartbeat                    ApiKey = 12
+	LeaveGroup                   ApiKey = 13
+	SyncGroup                    ApiKey = 14
+	DescribeGroups               ApiKey = 15
+	ListGroups                   ApiKey = 16
+	SaslHandshake                ApiKey = 17
+	ApiVersions                  ApiKey = 18
+	CreateTopics                 ApiKey = 19
+	DeleteTopics                 ApiKey = 20
+	DeleteRecords                ApiKey = 21
+	InitProducerId               ApiKey = 22
+	OffsetForLeaderEpoch         ApiKey = 23
+	AddPartitionsToTxn           ApiKey = 24
+	AddOffsetsToTxn              ApiKey = 25
+	EndTxn                       ApiKey = 26
+	WriteTxnMarkers              ApiKey = 27
+	TxnOffsetCommit              ApiKey = 28
+	DescribeAcls                 ApiKey = 29
+	CreateAcls                   ApiKey = 30
+	DeleteAcls                   ApiKey = 31
+	DescribeConfigs              ApiKey = 32
+	AlterConfigs                 ApiKey = 33
+	AlterReplicaLogDirs          ApiKey = 34
+	DescribeLogDirs              ApiKey = 35
+	SaslAuthenticate             ApiKey = 36
+	CreatePartitions             ApiKey = 37
+	CreateDelegationToken        ApiKey = 38
+	RenewDelegationToken         ApiKey = 39
+	ExpireDelegationToken        ApiKey = 40
+	DescribeDelegationToken      ApiKey = 41
+	DeleteGroups                 ApiKey = 42
+	ElectLeaders                 ApiKey = 43
+	IncrementalAlterConfigs      ApiKey = 44
+	AlterPartitionReassignments  ApiKey = 45
+	ListPartitionReassignments   ApiKey = 46
+	OffsetDelete                 ApiKey = 47
+	DescribeClientQuotas         ApiKey = 48
+	AlterClientQuotas            ApiKey = 49
+	DescribeUserScramCredentials ApiKey = 50
+	AlterUserScramCredentials    ApiKey = 51
+
+	numApis = 52
+)
+
+var apiNames = [numApis]string{
+	Produce:                      "Produce",
+	Fetch:                        "Fetch",
+	ListOffsets:                  "ListOffsets",
+	Metadata:                     "Metadata",
+	LeaderAndIsr:                 "LeaderAndIsr",
+	StopReplica:                  "StopReplica",
+	UpdateMetadata:               "UpdateMetadata",
+	ControlledShutdown:           "ControlledShutdown",
+	OffsetCommit:                 "OffsetCommit",
+	OffsetFetch:                  "OffsetFetch",
+	FindCoordinator:              "FindCoordinator",
+	JoinGroup:                    "JoinGroup",
+	Heartbeat:                    "Heartbeat",
+	LeaveGroup:                   "LeaveGroup",
+	SyncGroup:                    "SyncGroup",
+	DescribeGroups:               "DescribeGroups",
+	ListGroups:                   "ListGroups",
+	SaslHandshake:                "SaslHandshake",
+	ApiVersions:                  "ApiVersions",
+	CreateTopics:                 "CreateTopics",
+	DeleteTopics:                 "DeleteTopics",
+	DeleteRecords:                "DeleteRecords",
+	InitProducerId:               "InitProducerId",
+	OffsetForLeaderEpoch:         "OffsetForLeaderEpoch",
+	AddPartitionsToTxn:           "AddPartitionsToTxn",
+	AddOffsetsToTxn:              "AddOffsetsToTxn",
+	EndTxn:                       "EndTxn",
+	WriteTxnMarkers:              "WriteTxnMarkers",
+	TxnOffsetCommit:              "TxnOffsetCommit",
+	DescribeAcls:                 "DescribeAcls",
+	CreateAcls:                   "CreateAcls",
+	DeleteAcls:                   "DeleteAcls",
+	DescribeConfigs:              "DescribeConfigs",
+	AlterConfigs:                 "AlterConfigs",
+	AlterReplicaLogDirs:          "AlterReplicaLogDirs",
+	DescribeLogDirs:              "DescribeLogDirs",
+	SaslAuthenticate:             "SaslAuthenticate",
+	CreatePartitions:             "CreatePartitions",
+	CreateDelegationToken:        "CreateDelegationToken",
+	RenewDelegationToken:         "RenewDelegationToken",
+	ExpireDelegationToken:        "ExpireDelegationToken",
+	DescribeDelegationToken:      "DescribeDelegationToken",
+	DeleteGroups:                 "DeleteGroups",
+	ElectLeaders:                 "ElectLeaders",
+	IncrementalAlterConfigs:      "IncrementalAlterConfigs",
+	AlterPartitionReassignments:  "AlterPartitionReassignments",
+	ListPartitionReassignments:   "ListPartitionReassignments",
+	OffsetDelete:                 "OffsetDelete",
+	DescribeClientQuotas:         "DescribeClientQuotas",
+	AlterClientQuotas:            "AlterClientQuotas",
+	DescribeUserScramCredentials: "DescribeUserScramCredentials",
+	AlterUserScramCredentials:    "AlterUserScramCredentials",
+}
+
+type messageType struct {
+	version  int16
+	flexible bool
+	gotype   reflect.Type
+	decode   decodeFunc
+	encode   encodeFunc
+}
+
+func (t *messageType) new() Message {
+	return reflect.New(t.gotype).Interface().(Message)
+}
+
+type apiType struct {
+	requests  []messageType
+	responses []messageType
+}
+
+func (t apiType) minVersion() int16 {
+	if len(t.requests) == 0 {
+		return 0
+	}
+	return t.requests[0].version
+}
+
+func (t apiType) maxVersion() int16 {
+	if len(t.requests) == 0 {
+		return 0
+	}
+	return t.requests[len(t.requests)-1].version
+}
+
+var apiTypes [numApis]apiType
+
+// Register is automatically called by sub-packages are imported to install a
+// new pair of request/response message types.
+func Register(req, res Message) {
+	k1 := req.ApiKey()
+	k2 := res.ApiKey()
+
+	if k1 != k2 {
+		panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2))
+	}
+
+	apiTypes[k1] = apiType{
+		requests:  typesOf(req),
+		responses: typesOf(res),
+	}
+}
+
+// OverrideTypeMessage is an interface implemented by messages that want to override the standard
+// request/response types for a given API.
+type OverrideTypeMessage interface {
+	TypeKey() OverrideTypeKey
+}
+
+type OverrideTypeKey int16
+
+const (
+	RawProduceOverride OverrideTypeKey = 0
+)
+
+var overrideApiTypes [numApis]map[OverrideTypeKey]apiType
+
+func RegisterOverride(req, res Message, key OverrideTypeKey) {
+	k1 := req.ApiKey()
+	k2 := res.ApiKey()
+
+	if k1 != k2 {
+		panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2))
+	}
+
+	if overrideApiTypes[k1] == nil {
+		overrideApiTypes[k1] = make(map[OverrideTypeKey]apiType)
+	}
+	overrideApiTypes[k1][key] = apiType{
+		requests:  typesOf(req),
+		responses: typesOf(res),
+	}
+}
+
+func typesOf(v interface{}) []messageType {
+	return makeTypes(reflect.TypeOf(v).Elem())
+}
+
+func makeTypes(t reflect.Type) []messageType {
+	minVersion := int16(-1)
+	maxVersion := int16(-1)
+
+	// All future versions will be flexible (according to spec), so don't need to
+	// worry about maxes here.
+	minFlexibleVersion := int16(-1)
+
+	forEachStructField(t, func(_ reflect.Type, _ index, tag string) {
+		forEachStructTag(tag, func(tag structTag) bool {
+			if minVersion < 0 || tag.MinVersion < minVersion {
+				minVersion = tag.MinVersion
+			}
+			if maxVersion < 0 || tag.MaxVersion > maxVersion {
+				maxVersion = tag.MaxVersion
+			}
+			if tag.TagID > -2 && (minFlexibleVersion < 0 || tag.MinVersion < minFlexibleVersion) {
+				minFlexibleVersion = tag.MinVersion
+			}
+			return true
+		})
+	})
+
+	types := make([]messageType, 0, (maxVersion-minVersion)+1)
+
+	for v := minVersion; v <= maxVersion; v++ {
+		flexible := minFlexibleVersion >= 0 && v >= minFlexibleVersion
+
+		types = append(types, messageType{
+			version:  v,
+			gotype:   t,
+			flexible: flexible,
+			decode:   decodeFuncOf(t, v, flexible, structTag{}),
+			encode:   encodeFuncOf(t, v, flexible, structTag{}),
+		})
+	}
+
+	return types
+}
+
+type structTag struct {
+	MinVersion int16
+	MaxVersion int16
+	Compact    bool
+	Nullable   bool
+	TagID      int
+}
+
+func forEachStructTag(tag string, do func(structTag) bool) {
+	if tag == "-" {
+		return // special case to ignore the field
+	}
+
+	forEach(tag, '|', func(s string) bool {
+		tag := structTag{
+			MinVersion: -1,
+			MaxVersion: -1,
+
+			// Legitimate tag IDs can start at 0. We use -1 as a placeholder to indicate
+			// that the message type is flexible, so that leaves -2 as the default for
+			// indicating that there is no tag ID and the message is not flexible.
+			TagID: -2,
+		}
+
+		var err error
+		forEach(s, ',', func(s string) bool {
+			switch {
+			case strings.HasPrefix(s, "min="):
+				tag.MinVersion, err = parseVersion(s[4:])
+			case strings.HasPrefix(s, "max="):
+				tag.MaxVersion, err = parseVersion(s[4:])
+			case s == "tag":
+				tag.TagID = -1
+			case strings.HasPrefix(s, "tag="):
+				tag.TagID, err = strconv.Atoi(s[4:])
+			case s == "compact":
+				tag.Compact = true
+			case s == "nullable":
+				tag.Nullable = true
+			default:
+				err = fmt.Errorf("unrecognized option: %q", s)
+			}
+			return err == nil
+		})
+
+		if err != nil {
+			panic(fmt.Errorf("malformed struct tag: %w", err))
+		}
+
+		if tag.MinVersion < 0 && tag.MaxVersion >= 0 {
+			panic(fmt.Errorf("missing minimum version in struct tag: %q", s))
+		}
+
+		if tag.MaxVersion < 0 && tag.MinVersion >= 0 {
+			panic(fmt.Errorf("missing maximum version in struct tag: %q", s))
+		}
+
+		if tag.MinVersion > tag.MaxVersion {
+			panic(fmt.Errorf("invalid version range in struct tag: %q", s))
+		}
+
+		return do(tag)
+	})
+}
+
+func forEach(s string, sep byte, do func(string) bool) bool {
+	for len(s) != 0 {
+		p := ""
+		i := strings.IndexByte(s, sep)
+		if i < 0 {
+			p, s = s, ""
+		} else {
+			p, s = s[:i], s[i+1:]
+		}
+		if !do(p) {
+			return false
+		}
+	}
+	return true
+}
+
+func forEachStructField(t reflect.Type, do func(reflect.Type, index, string)) {
+	for i, n := 0, t.NumField(); i < n; i++ {
+		f := t.Field(i)
+
+		if f.PkgPath != "" && f.Name != "_" {
+			continue
+		}
+
+		kafkaTag, ok := f.Tag.Lookup("kafka")
+		if !ok {
+			kafkaTag = "|"
+		}
+
+		do(f.Type, indexOf(f), kafkaTag)
+	}
+}
+
+func parseVersion(s string) (int16, error) {
+	if !strings.HasPrefix(s, "v") {
+		return 0, fmt.Errorf("invalid version number: %q", s)
+	}
+	i, err := strconv.ParseInt(s[1:], 10, 16)
+	if err != nil {
+		return 0, fmt.Errorf("invalid version number: %q: %w", s, err)
+	}
+	if i < 0 {
+		return 0, fmt.Errorf("invalid negative version number: %q", s)
+	}
+	return int16(i), nil
+}
+
+func dontExpectEOF(err error) error {
+	if err != nil {
+		if errors.Is(err, io.EOF) {
+			return io.ErrUnexpectedEOF
+		}
+
+		return err
+	}
+
+	return nil
+}
+
+type Broker struct {
+	Rack string
+	Host string
+	Port int32
+	ID   int32
+}
+
+func (b Broker) String() string {
+	return net.JoinHostPort(b.Host, itoa(b.Port))
+}
+
+func (b Broker) Format(w fmt.State, v rune) {
+	switch v {
+	case 'd':
+		io.WriteString(w, itoa(b.ID))
+	case 's':
+		io.WriteString(w, b.String())
+	case 'v':
+		io.WriteString(w, itoa(b.ID))
+		io.WriteString(w, " ")
+		io.WriteString(w, b.String())
+		if b.Rack != "" {
+			io.WriteString(w, " ")
+			io.WriteString(w, b.Rack)
+		}
+	}
+}
+
+func itoa(i int32) string {
+	return strconv.Itoa(int(i))
+}
+
+type Topic struct {
+	Name       string
+	Error      int16
+	Partitions map[int32]Partition
+}
+
+type Partition struct {
+	ID       int32
+	Error    int16
+	Leader   int32
+	Replicas []int32
+	ISR      []int32
+	Offline  []int32
+}
+
+// RawExchanger is an extention to the Message interface to allow messages
+// to control the request response cycle for the message. This is currently
+// only used to facilitate v0 SASL Authenticate requests being written in
+// a non-standard fashion when the SASL Handshake was done at v0 but not
+// when done at v1.
+type RawExchanger interface {
+	// Required should return true when a RawExchange is needed.
+	// The passed in versions are the negotiated versions for the connection
+	// performing the request.
+	Required(versions map[ApiKey]int16) bool
+	// RawExchange is given the raw connection to the broker and the Message
+	// is responsible for writing itself to the connection as well as reading
+	// the response.
+	RawExchange(rw io.ReadWriter) (Message, error)
+}
+
+// BrokerMessage is an extension of the Message interface implemented by some
+// request types to customize the broker assignment logic.
+type BrokerMessage interface {
+	// Given a representation of the kafka cluster state as argument, returns
+	// the broker that the message should be routed to.
+	Broker(Cluster) (Broker, error)
+}
+
+// GroupMessage is an extension of the Message interface implemented by some
+// request types to inform the program that they should be routed to a group
+// coordinator.
+type GroupMessage interface {
+	// Returns the group configured on the message.
+	Group() string
+}
+
+// TransactionalMessage is an extension of the Message interface implemented by some
+// request types to inform the program that they should be routed to a transaction
+// coordinator.
+type TransactionalMessage interface {
+	// Returns the transactional id configured on the message.
+	Transaction() string
+}
+
+// PreparedMessage is an extension of the Message interface implemented by some
+// request types which may need to run some pre-processing on their state before
+// being sent.
+type PreparedMessage interface {
+	// Prepares the message before being sent to a kafka broker using the API
+	// version passed as argument.
+	Prepare(apiVersion int16)
+}
+
+// Splitter is an interface implemented by messages that can be split into
+// multiple requests and have their results merged back by a Merger.
+type Splitter interface {
+	// For a given cluster layout, returns the list of messages constructed
+	// from the receiver for each requests that should be sent to the cluster.
+	// The second return value is a Merger which can be used to merge back the
+	// results of each request into a single message (or an error).
+	Split(Cluster) ([]Message, Merger, error)
+}
+
+// Merger is an interface implemented by messages which can merge multiple
+// results into one response.
+type Merger interface {
+	// Given a list of message and associated results, merge them back into a
+	// response (or an error). The results must be either Message or error
+	// values, other types should trigger a panic.
+	Merge(messages []Message, results []interface{}) (Message, error)
+}
+
+// Result converts r to a Message or an error, or panics if r could not be
+// converted to these types.
+func Result(r interface{}) (Message, error) {
+	switch v := r.(type) {
+	case Message:
+		return v, nil
+	case error:
+		return nil, v
+	default:
+		panic(fmt.Errorf("BUG: result must be a message or an error but not %T", v))
+	}
+}
diff -pruN 0.2.1-1.1/protocol/protocol_test.go 0.4.49+ds1-1/protocol/protocol_test.go
--- 0.2.1-1.1/protocol/protocol_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/protocol_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,342 @@
+package protocol
+
+import (
+	"bytes"
+	"math"
+	"reflect"
+	"testing"
+)
+
+type testType struct {
+	Field1   string        `kafka:"min=v0,max=v4,nullable"`
+	Field2   int16         `kafka:"min=v2,max=v4"`
+	Field3   []byte        `kafka:"min=v2,max=v4,nullable"`
+	SubTypes []testSubType `kafka:"min=v1,max=v4"`
+
+	TaggedField1 int8   `kafka:"min=v3,max=v4,tag=0"`
+	TaggedField2 string `kafka:"min=v4,max=v4,tag=1"`
+}
+
+type testSubType struct {
+	SubField1 int8 `kafka:"min=v1,max=v4"`
+}
+
+func TestMakeFlexibleTypes(t *testing.T) {
+	types := makeTypes(reflect.TypeOf(&testType{}).Elem())
+	if len(types) != 5 {
+		t.Error(
+			"Wrong number of types",
+			"expected", 5,
+			"got", len(types),
+		)
+	}
+
+	fv := []int16{}
+
+	for _, to := range types {
+		if to.flexible {
+			fv = append(fv, to.version)
+		}
+	}
+
+	if !reflect.DeepEqual([]int16{3, 4}, fv) {
+		t.Error(
+			"Unexpected flexible versions",
+			"expected", []int16{3, 4},
+			"got", fv,
+		)
+	}
+}
+
+func TestEncodeDecodeFlexibleType(t *testing.T) {
+	f := &testType{
+		Field1: "value1",
+		Field2: 15,
+		Field3: []byte("hello"),
+		SubTypes: []testSubType{
+			{
+				SubField1: 2,
+			},
+			{
+				SubField1: 3,
+			},
+		},
+
+		TaggedField1: 34,
+		TaggedField2: "taggedValue2",
+	}
+
+	b := &bytes.Buffer{}
+	e := &encoder{writer: b}
+
+	types := makeTypes(reflect.TypeOf(&testType{}).Elem())
+	ft := types[4]
+	ft.encode(e, valueOf(f))
+	if e.err != nil {
+		t.Error(
+			"Error during encoding",
+			"expected", nil,
+			"got", e.err,
+		)
+	}
+
+	exp := []byte{
+		// size of "value1" + 1
+		7,
+		// "value1"
+		118, 97, 108, 117, 101, 49,
+		// 15 as 16-bit int
+		0, 15,
+		// size of []byte("hello") + 1
+		6,
+		// []byte("hello")
+		104, 101, 108, 108, 111,
+		// size of []SubTypes + 1
+		3,
+		// 2 as 8-bit int
+		2,
+		// tag buffer for first SubType struct
+		0,
+		// 3 as 8-bit int
+		3,
+		// tag buffer for second SubType struct
+		0,
+		// number of tagged fields
+		2,
+		// id of first tagged field
+		0,
+		// size of first tagged field
+		1,
+		// 34 as 8-bit int
+		34,
+		// id of second tagged field
+		1,
+		// size of second tagged field
+		13,
+		// size of "taggedValue2" + 1
+		13,
+		// "taggedValue2"
+		116, 97, 103, 103, 101, 100, 86, 97, 108, 117, 101, 50,
+	}
+
+	if !reflect.DeepEqual(exp, b.Bytes()) {
+		t.Error(
+			"Wrong encoded output",
+			"expected", exp,
+			"got", b.Bytes(),
+		)
+	}
+
+	b = &bytes.Buffer{}
+	b.Write(exp)
+	d := &decoder{reader: b, remain: len(exp)}
+
+	f2 := &testType{}
+	ft.decode(d, valueOf(f2))
+	if d.err != nil {
+		t.Error(
+			"Error during decoding",
+			"expected", nil,
+			"got", e.err,
+		)
+	}
+
+	if !reflect.DeepEqual(f, f2) {
+		t.Error(
+			"Decoded value does not equal encoded one",
+			"expected", *f,
+			"got", *f2,
+		)
+	}
+}
+
+func TestVarInts(t *testing.T) {
+	type tc struct {
+		input      int64
+		expVarInt  []byte
+		expUVarInt []byte
+	}
+
+	tcs := []tc{
+		{
+			input:      12,
+			expVarInt:  []byte{24},
+			expUVarInt: []byte{12},
+		},
+		{
+			input:      63,
+			expVarInt:  []byte{126},
+			expUVarInt: []byte{63},
+		},
+		{
+			input:      -64,
+			expVarInt:  []byte{127},
+			expUVarInt: []byte{192, 255, 255, 255, 255, 255, 255, 255, 255, 1},
+		},
+		{
+			input:      64,
+			expVarInt:  []byte{128, 1},
+			expUVarInt: []byte{64},
+		},
+		{
+			input:      127,
+			expVarInt:  []byte{254, 1},
+			expUVarInt: []byte{127},
+		},
+		{
+			input:      128,
+			expVarInt:  []byte{128, 2},
+			expUVarInt: []byte{128, 1},
+		},
+		{
+			input:      129,
+			expVarInt:  []byte{130, 2},
+			expUVarInt: []byte{129, 1},
+		},
+		{
+			input:      12345,
+			expVarInt:  []byte{242, 192, 1},
+			expUVarInt: []byte{185, 96},
+		},
+		{
+			input:      123456789101112,
+			expVarInt:  []byte{240, 232, 249, 224, 144, 146, 56},
+			expUVarInt: []byte{184, 244, 188, 176, 136, 137, 28},
+		},
+	}
+
+	for _, tc := range tcs {
+		b := &bytes.Buffer{}
+		e := &encoder{writer: b}
+		e.writeVarInt(tc.input)
+		if e.err != nil {
+			t.Errorf(
+				"Unexpected error encoding %d as varInt: %+v",
+				tc.input,
+				e.err,
+			)
+		}
+		if !reflect.DeepEqual(b.Bytes(), tc.expVarInt) {
+			t.Error(
+				"Wrong output encoding value", tc.input, "as varInt",
+				"expected", tc.expVarInt,
+				"got", b.Bytes(),
+			)
+		}
+		expLen := sizeOfVarInt(tc.input)
+		if expLen != len(b.Bytes()) {
+			t.Error(
+				"Wrong sizeOf for", tc.input, "as varInt",
+				"expected", expLen,
+				"got", len(b.Bytes()),
+			)
+		}
+
+		d := &decoder{reader: b, remain: len(b.Bytes())}
+		v := d.readVarInt()
+		if v != tc.input {
+			t.Error(
+				"Decoded varInt value does not equal encoded one",
+				"expected", tc.input,
+				"got", v,
+			)
+		}
+
+		b = &bytes.Buffer{}
+		e = &encoder{writer: b}
+		e.writeUnsignedVarInt(uint64(tc.input))
+		if e.err != nil {
+			t.Errorf(
+				"Unexpected error encoding %d as unsignedVarInt: %+v",
+				tc.input,
+				e.err,
+			)
+		}
+		if !reflect.DeepEqual(b.Bytes(), tc.expUVarInt) {
+			t.Error(
+				"Wrong output encoding value", tc.input, "as unsignedVarInt",
+				"expected", tc.expUVarInt,
+				"got", b.Bytes(),
+			)
+		}
+		expLen = sizeOfUnsignedVarInt(uint64(tc.input))
+		if expLen != len(b.Bytes()) {
+			t.Error(
+				"Wrong sizeOf for", tc.input, "as unsignedVarInt",
+				"expected", expLen,
+				"got", len(b.Bytes()),
+			)
+		}
+
+		d = &decoder{reader: b, remain: len(b.Bytes())}
+		v = int64(d.readUnsignedVarInt())
+		if v != tc.input {
+			t.Error(
+				"Decoded unsignedVarInt value does not equal encoded one",
+				"expected", tc.input,
+				"got", v,
+			)
+		}
+
+	}
+}
+
+func TestFloat64(t *testing.T) {
+	type tc struct {
+		input    float64
+		expected []byte
+	}
+
+	tcs := []tc{
+		{
+			input:    0.0,
+			expected: []byte{0, 0, 0, 0, 0, 0, 0, 0},
+		},
+		{
+			input:    math.MaxFloat64,
+			expected: []byte{127, 239, 255, 255, 255, 255, 255, 255},
+		},
+		{
+			input:    -math.MaxFloat64,
+			expected: []byte{255, 239, 255, 255, 255, 255, 255, 255},
+		},
+		{
+			input:    math.SmallestNonzeroFloat64,
+			expected: []byte{0, 0, 0, 0, 0, 0, 0, 1},
+		},
+		{
+			input:    -math.SmallestNonzeroFloat64,
+			expected: []byte{128, 0, 0, 0, 0, 0, 0, 1},
+		},
+	}
+
+	for _, tc := range tcs {
+		b := &bytes.Buffer{}
+		e := &encoder{writer: b}
+		e.writeFloat64(tc.input)
+		if e.err != nil {
+			t.Errorf(
+				"Unexpected error encoding %f as float64: %+v",
+				tc.input,
+				e.err,
+			)
+		}
+		if !reflect.DeepEqual(b.Bytes(), tc.expected) {
+			t.Error(
+				"Wrong output encoding value", tc.input, "as float64",
+				"expected", tc.expected,
+				"got", b.Bytes(),
+			)
+		}
+
+		d := &decoder{reader: b, remain: len(b.Bytes())}
+		v := d.readFloat64()
+		if v != tc.input {
+			t.Error(
+				"Decoded float64 value does not equal encoded one",
+				"expected", tc.input,
+				"got", v,
+			)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/protocol/prototest/bytes.go 0.4.49+ds1-1/protocol/prototest/bytes.go
--- 0.2.1-1.1/protocol/prototest/bytes.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/prototest/bytes.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,15 @@
+package prototest
+
+import (
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+// Bytes constructs a Bytes which exposes the content of b.
+func Bytes(b []byte) protocol.Bytes {
+	return protocol.NewBytes(b)
+}
+
+// String constructs a Bytes which exposes the content of s.
+func String(s string) protocol.Bytes {
+	return protocol.NewBytes([]byte(s))
+}
diff -pruN 0.2.1-1.1/protocol/prototest/prototest.go 0.4.49+ds1-1/protocol/prototest/prototest.go
--- 0.2.1-1.1/protocol/prototest/prototest.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/prototest/prototest.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,185 @@
+package prototest
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"reflect"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func deepEqual(x1, x2 interface{}) bool {
+	if x1 == nil {
+		return x2 == nil
+	}
+	if r1, ok := x1.(protocol.RecordReader); ok {
+		if r2, ok := x2.(protocol.RecordReader); ok {
+			return deepEqualRecords(r1, r2)
+		}
+		return false
+	}
+	if b1, ok := x1.(protocol.Bytes); ok {
+		if b2, ok := x2.(protocol.Bytes); ok {
+			return deepEqualBytes(b1, b2)
+		}
+		return false
+	}
+	if t1, ok := x1.(time.Time); ok {
+		if t2, ok := x2.(time.Time); ok {
+			return t1.Equal(t2)
+		}
+		return false
+	}
+	return deepEqualValue(reflect.ValueOf(x1), reflect.ValueOf(x2))
+}
+
+func deepEqualValue(v1, v2 reflect.Value) bool {
+	t1 := v1.Type()
+	t2 := v2.Type()
+
+	if t1 != t2 {
+		return false
+	}
+
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.String:
+		return v1.String() == v2.String()
+	case reflect.Struct:
+		return deepEqualStruct(v1, v2)
+	case reflect.Ptr:
+		return deepEqualPtr(v1, v2)
+	case reflect.Slice:
+		return deepEqualSlice(v1, v2)
+	default:
+		panic("comparing values of unsupported type: " + v1.Type().String())
+	}
+}
+
+func deepEqualPtr(v1, v2 reflect.Value) bool {
+	if v1.IsNil() {
+		return v2.IsNil()
+	}
+	return deepEqual(v1.Elem().Interface(), v2.Elem().Interface())
+}
+
+func deepEqualStruct(v1, v2 reflect.Value) bool {
+	t := v1.Type()
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+
+		if f.PkgPath != "" { // ignore unexported fields
+			continue
+		}
+
+		f1 := v1.Field(i)
+		f2 := v2.Field(i)
+
+		if !deepEqual(f1.Interface(), f2.Interface()) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func deepEqualSlice(v1, v2 reflect.Value) bool {
+	t := v1.Type()
+	e := t.Elem()
+
+	if e.Kind() == reflect.Uint8 { // []byte
+		return bytes.Equal(v1.Bytes(), v2.Bytes())
+	}
+
+	n1 := v1.Len()
+	n2 := v2.Len()
+
+	if n1 != n2 {
+		return false
+	}
+
+	for i := 0; i < n1; i++ {
+		f1 := v1.Index(i)
+		f2 := v2.Index(i)
+
+		if !deepEqual(f1.Interface(), f2.Interface()) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func deepEqualBytes(s1, s2 protocol.Bytes) bool {
+	if s1 == nil {
+		return s2 == nil
+	}
+
+	if s2 == nil {
+		return false
+	}
+
+	n1 := s1.Len()
+	n2 := s2.Len()
+
+	if n1 != n2 {
+		return false
+	}
+
+	b1 := make([]byte, n1)
+	b2 := make([]byte, n2)
+
+	if _, err := s1.(io.ReaderAt).ReadAt(b1, 0); err != nil {
+		panic(err)
+	}
+
+	if _, err := s2.(io.ReaderAt).ReadAt(b2, 0); err != nil {
+		panic(err)
+	}
+
+	return bytes.Equal(b1, b2)
+}
+
+func deepEqualRecords(r1, r2 protocol.RecordReader) bool {
+	for {
+		rec1, err1 := r1.ReadRecord()
+		rec2, err2 := r2.ReadRecord()
+
+		if err1 != nil || err2 != nil {
+			return errors.Is(err1, err2)
+		}
+
+		if !deepEqualRecord(rec1, rec2) {
+			return false
+		}
+	}
+}
+
+func deepEqualRecord(r1, r2 *protocol.Record) bool {
+	if r1.Offset != r2.Offset {
+		return false
+	}
+
+	if !r1.Time.Equal(r2.Time) {
+		return false
+	}
+
+	if !deepEqualBytes(r1.Key, r2.Key) {
+		return false
+	}
+
+	if !deepEqualBytes(r1.Value, r2.Value) {
+		return false
+	}
+
+	return deepEqual(r1.Headers, r2.Headers)
+}
diff -pruN 0.2.1-1.1/protocol/prototest/reflect.go 0.4.49+ds1-1/protocol/prototest/reflect.go
--- 0.2.1-1.1/protocol/prototest/reflect.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/prototest/reflect.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,150 @@
+package prototest
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"reflect"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+var (
+	recordReader = reflect.TypeOf((*protocol.RecordReader)(nil)).Elem()
+)
+
+func closeMessage(m protocol.Message) {
+	forEachField(reflect.ValueOf(m), func(v reflect.Value) {
+		if v.Type().Implements(recordReader) {
+			rr := v.Interface().(protocol.RecordReader)
+			for {
+				r, err := rr.ReadRecord()
+				if err != nil {
+					break
+				}
+				if r.Key != nil {
+					r.Key.Close()
+				}
+				if r.Value != nil {
+					r.Value.Close()
+				}
+			}
+		}
+	})
+}
+
+func load(v interface{}) (reset func()) {
+	return loadValue(reflect.ValueOf(v))
+}
+
+func loadValue(v reflect.Value) (reset func()) {
+	resets := []func(){}
+
+	forEachField(v, func(f reflect.Value) {
+		switch x := f.Interface().(type) {
+		case protocol.RecordReader:
+			records := loadRecords(x)
+			resetFunc := func() {
+				f.Set(reflect.ValueOf(protocol.NewRecordReader(makeRecords(records)...)))
+			}
+			resetFunc()
+			resets = append(resets, resetFunc)
+		case io.Reader:
+			buf, _ := io.ReadAll(x)
+			resetFunc := func() {
+				f.Set(reflect.ValueOf(bytes.NewBuffer(buf)))
+			}
+			resetFunc()
+			resets = append(resets, resetFunc)
+		}
+	})
+
+	return func() {
+		for _, f := range resets {
+			f()
+		}
+	}
+}
+
+func forEachField(v reflect.Value, do func(reflect.Value)) {
+	for v.Kind() == reflect.Ptr {
+		if v.IsNil() {
+			return
+		}
+		v = v.Elem()
+	}
+
+	switch v.Kind() {
+	case reflect.Slice:
+		for i, n := 0, v.Len(); i < n; i++ {
+			forEachField(v.Index(i), do)
+		}
+
+	case reflect.Struct:
+		for i, n := 0, v.NumField(); i < n; i++ {
+			forEachField(v.Field(i), do)
+		}
+
+	default:
+		do(v)
+	}
+}
+
+type memoryRecord struct {
+	offset  int64
+	time    time.Time
+	key     []byte
+	value   []byte
+	headers []protocol.Header
+}
+
+func (m *memoryRecord) Record() protocol.Record {
+	return protocol.Record{
+		Offset:  m.offset,
+		Time:    m.time,
+		Key:     protocol.NewBytes(m.key),
+		Value:   protocol.NewBytes(m.value),
+		Headers: m.headers,
+	}
+}
+
+func makeRecords(memoryRecords []memoryRecord) []protocol.Record {
+	records := make([]protocol.Record, len(memoryRecords))
+	for i, m := range memoryRecords {
+		records[i] = m.Record()
+	}
+	return records
+}
+
+func loadRecords(r protocol.RecordReader) []memoryRecord {
+	records := []memoryRecord{}
+
+	for {
+		rec, err := r.ReadRecord()
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				return records
+			}
+			panic(err)
+		}
+		records = append(records, memoryRecord{
+			offset:  rec.Offset,
+			time:    rec.Time,
+			key:     readAll(rec.Key),
+			value:   readAll(rec.Value),
+			headers: rec.Headers,
+		})
+	}
+}
+
+func readAll(bytes protocol.Bytes) []byte {
+	if bytes != nil {
+		defer bytes.Close()
+	}
+	b, err := protocol.ReadAll(bytes)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
diff -pruN 0.2.1-1.1/protocol/prototest/request.go 0.4.49+ds1-1/protocol/prototest/request.go
--- 0.2.1-1.1/protocol/prototest/request.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/prototest/request.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,132 @@
+package prototest
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func TestRequest(t *testing.T, version int16, msg protocol.Message) {
+	reset := load(msg)
+
+	t.Run(fmt.Sprintf("v%d", version), func(t *testing.T) {
+		b := &bytes.Buffer{}
+
+		if err := protocol.WriteRequest(b, version, 1234, "me", msg); err != nil {
+			t.Fatal(err)
+		}
+
+		reset()
+
+		t.Logf("\n%s\n", hex.Dump(b.Bytes()))
+
+		apiVersion, correlationID, clientID, req, err := protocol.ReadRequest(b)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if apiVersion != version {
+			t.Errorf("api version mismatch: %d != %d", apiVersion, version)
+		}
+		if correlationID != 1234 {
+			t.Errorf("correlation id mismatch: %d != %d", correlationID, 1234)
+		}
+		if clientID != "me" {
+			t.Errorf("client id mismatch: %q != %q", clientID, "me")
+		}
+		if !deepEqual(msg, req) {
+			t.Errorf("request message mismatch:")
+			t.Logf("expected: %+v", msg)
+			t.Logf("found:    %+v", req)
+		}
+	})
+}
+
+// TestRequestWithOverride validates requests that have an overridden type. For requests with type overrides, we
+// double-serialize the request to ensure the resulting encoding of the overridden and original type are identical.
+func TestRequestWithOverride(t *testing.T, version int16, msg protocol.Message) {
+	reset := load(msg)
+
+	t.Run(fmt.Sprintf("v%d", version), func(t *testing.T) {
+		b1 := &bytes.Buffer{}
+
+		if err := protocol.WriteRequest(b1, version, 1234, "me", msg); err != nil {
+			t.Fatal(err)
+		}
+
+		reset()
+		t.Logf("\n%s\n", hex.Dump(b1.Bytes()))
+
+		_, _, _, req, err := protocol.ReadRequest(b1)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		b2 := &bytes.Buffer{}
+		if err := protocol.WriteRequest(b2, version, 1234, "me", req); err != nil {
+			t.Fatal(err)
+		}
+
+		if !deepEqual(b1, b2) {
+			t.Errorf("request message mismatch:")
+			t.Logf("expected: %+v", hex.Dump(b1.Bytes()))
+			t.Logf("found:    %+v", hex.Dump(b2.Bytes()))
+		}
+	})
+}
+
+func BenchmarkRequest(b *testing.B, version int16, msg protocol.Message) {
+	reset := load(msg)
+
+	b.Run(fmt.Sprintf("v%d", version), func(b *testing.B) {
+		buffer := &bytes.Buffer{}
+		buffer.Grow(1024)
+
+		b.Run("read", func(b *testing.B) {
+			w := io.Writer(buffer)
+
+			if err := protocol.WriteRequest(w, version, 1234, "client", msg); err != nil {
+				b.Fatal(err)
+			}
+
+			reset()
+
+			p := buffer.Bytes()
+			x := bytes.NewReader(p)
+			r := bufio.NewReader(x)
+
+			for i := 0; i < b.N; i++ {
+				_, _, _, req, err := protocol.ReadRequest(r)
+				if err != nil {
+					b.Fatal(err)
+				}
+				closeMessage(req)
+				x.Reset(p)
+				r.Reset(x)
+			}
+
+			b.SetBytes(int64(len(p)))
+			buffer.Reset()
+		})
+
+		b.Run("write", func(b *testing.B) {
+			w := io.Writer(buffer)
+			n := int64(0)
+
+			for i := 0; i < b.N; i++ {
+				if err := protocol.WriteRequest(w, version, 1234, "client", msg); err != nil {
+					b.Fatal(err)
+				}
+				reset()
+				n = int64(buffer.Len())
+				buffer.Reset()
+			}
+
+			b.SetBytes(n)
+		})
+	})
+}
diff -pruN 0.2.1-1.1/protocol/prototest/response.go 0.4.49+ds1-1/protocol/prototest/response.go
--- 0.2.1-1.1/protocol/prototest/response.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/prototest/response.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,95 @@
+package prototest
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func TestResponse(t *testing.T, version int16, msg protocol.Message) {
+	reset := load(msg)
+
+	t.Run(fmt.Sprintf("v%d", version), func(t *testing.T) {
+		b := &bytes.Buffer{}
+
+		if err := protocol.WriteResponse(b, version, 1234, msg); err != nil {
+			t.Fatal(err)
+		}
+
+		reset()
+
+		t.Logf("\n%s", hex.Dump(b.Bytes()))
+
+		correlationID, res, err := protocol.ReadResponse(b, msg.ApiKey(), version)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if correlationID != 1234 {
+			t.Errorf("correlation id mismatch: %d != %d", correlationID, 1234)
+		}
+		if !deepEqual(msg, res) {
+			t.Errorf("response message mismatch:")
+			t.Logf("expected: %+v", msg)
+			t.Logf("found:    %+v", res)
+		}
+		closeMessage(res)
+	})
+}
+
+func BenchmarkResponse(b *testing.B, version int16, msg protocol.Message) {
+	reset := load(msg)
+
+	b.Run(fmt.Sprintf("v%d", version), func(b *testing.B) {
+		apiKey := msg.ApiKey()
+		buffer := &bytes.Buffer{}
+		buffer.Grow(1024)
+
+		b.Run("read", func(b *testing.B) {
+			w := io.Writer(buffer)
+
+			if err := protocol.WriteResponse(w, version, 1234, msg); err != nil {
+				b.Fatal(err)
+			}
+
+			reset()
+
+			p := buffer.Bytes()
+			x := bytes.NewReader(p)
+			r := bufio.NewReader(x)
+
+			for i := 0; i < b.N; i++ {
+				_, res, err := protocol.ReadResponse(r, apiKey, version)
+				if err != nil {
+					b.Fatal(err)
+				}
+				closeMessage(res)
+				x.Reset(p)
+				r.Reset(x)
+			}
+
+			b.SetBytes(int64(len(p)))
+			buffer.Reset()
+		})
+
+		b.Run("write", func(b *testing.B) {
+			w := io.Writer(buffer)
+			n := int64(0)
+
+			for i := 0; i < b.N; i++ {
+				if err := protocol.WriteResponse(w, version, 1234, msg); err != nil {
+					b.Fatal(err)
+				}
+				reset()
+				n = int64(buffer.Len())
+				buffer.Reset()
+			}
+
+			b.SetBytes(n)
+		})
+	})
+}
diff -pruN 0.2.1-1.1/protocol/rawproduce/rawproduce.go 0.4.49+ds1-1/protocol/rawproduce/rawproduce.go
--- 0.2.1-1.1/protocol/rawproduce/rawproduce.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/rawproduce/rawproduce.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,91 @@
+package rawproduce
+
+import (
+	"fmt"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/produce"
+)
+
+func init() {
+	// Register a type override so that raw produce requests will be encoded with the correct type.
+	req := &Request{}
+	protocol.RegisterOverride(req, &produce.Response{}, req.TypeKey())
+}
+
+type Request struct {
+	TransactionalID string         `kafka:"min=v3,max=v8,nullable"`
+	Acks            int16          `kafka:"min=v0,max=v8"`
+	Timeout         int32          `kafka:"min=v0,max=v8"`
+	Topics          []RequestTopic `kafka:"min=v0,max=v8"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.Produce }
+
+func (r *Request) TypeKey() protocol.OverrideTypeKey { return protocol.RawProduceOverride }
+
+func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) {
+	broker := protocol.Broker{ID: -1}
+
+	for i := range r.Topics {
+		t := &r.Topics[i]
+
+		topic, ok := cluster.Topics[t.Topic]
+		if !ok {
+			return broker, NewError(protocol.NewErrNoTopic(t.Topic))
+		}
+
+		for j := range t.Partitions {
+			p := &t.Partitions[j]
+
+			partition, ok := topic.Partitions[p.Partition]
+			if !ok {
+				return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition))
+			}
+
+			if b, ok := cluster.Brokers[partition.Leader]; !ok {
+				return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition))
+			} else if broker.ID < 0 {
+				broker = b
+			} else if b.ID != broker.ID {
+				return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID))
+			}
+		}
+	}
+
+	return broker, nil
+}
+
+func (r *Request) HasResponse() bool {
+	return r.Acks != 0
+}
+
+type RequestTopic struct {
+	Topic      string             `kafka:"min=v0,max=v8"`
+	Partitions []RequestPartition `kafka:"min=v0,max=v8"`
+}
+
+type RequestPartition struct {
+	Partition int32                 `kafka:"min=v0,max=v8"`
+	RecordSet protocol.RawRecordSet `kafka:"min=v0,max=v8"`
+}
+
+var (
+	_ protocol.BrokerMessage = (*Request)(nil)
+)
+
+type Error struct {
+	Err error
+}
+
+func NewError(err error) *Error {
+	return &Error{Err: err}
+}
+
+func (e *Error) Error() string {
+	return fmt.Sprintf("fetch request error: %v", e.Err)
+}
+
+func (e *Error) Unwrap() error {
+	return e.Err
+}
diff -pruN 0.2.1-1.1/protocol/rawproduce/rawproduce_test.go 0.4.49+ds1-1/protocol/rawproduce/rawproduce_test.go
--- 0.2.1-1.1/protocol/rawproduce/rawproduce_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/rawproduce/rawproduce_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,201 @@
+package rawproduce_test
+
+import (
+	"bytes"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/prototest"
+	"github.com/segmentio/kafka-go/protocol/rawproduce"
+)
+
+const (
+	v0 = 0
+	v3 = 3
+	v5 = 5
+)
+
+func TestRawProduceRequest(t *testing.T) {
+	t0 := time.Now().Truncate(time.Millisecond)
+	t1 := t0.Add(1 * time.Millisecond)
+	t2 := t0.Add(2 * time.Millisecond)
+
+	prototest.TestRequestWithOverride(t, v0, &rawproduce.Request{
+		Acks:    1,
+		Timeout: 500,
+		Topics: []rawproduce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []rawproduce.RequestPartition{
+					{
+						Partition: 0,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+						), 1, 0),
+					},
+					{
+						Partition: 1,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+							protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+							protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+						), 1, 0),
+					},
+				},
+			},
+
+			{
+				Topic: "topic-2",
+				Partitions: []rawproduce.RequestPartition{
+					{
+						Partition: 0,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+							protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+							protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+						), 1, protocol.Gzip),
+					},
+				},
+			},
+		},
+	})
+
+	prototest.TestRequestWithOverride(t, v3, &rawproduce.Request{
+		TransactionalID: "1234",
+		Acks:            1,
+		Timeout:         500,
+		Topics: []rawproduce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []rawproduce.RequestPartition{
+					{
+						Partition: 0,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+						), 1, 0),
+					},
+					{
+						Partition: 1,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+							protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+							protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+						), 1, 0),
+					},
+				},
+			},
+		},
+	})
+
+	headers := []protocol.Header{
+		{Key: "key-1", Value: []byte("value-1")},
+		{Key: "key-2", Value: []byte("value-2")},
+		{Key: "key-3", Value: []byte("value-3")},
+	}
+
+	prototest.TestRequestWithOverride(t, v5, &rawproduce.Request{
+		TransactionalID: "1234",
+		Acks:            1,
+		Timeout:         500,
+		Topics: []rawproduce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []rawproduce.RequestPartition{
+					{
+						Partition: 1,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+							protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+							protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+						), 2, 0),
+					},
+				},
+			},
+
+			{
+				Topic: "topic-2",
+				Partitions: []rawproduce.RequestPartition{
+					{
+						Partition: 1,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+							protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+							protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+						), 2, protocol.Snappy),
+					},
+				},
+			},
+		},
+	})
+}
+
+func NewRawRecordSet(reader protocol.RecordReader, version int8, attr protocol.Attributes) protocol.RawRecordSet {
+	rs := protocol.RecordSet{Version: version, Attributes: attr, Records: reader}
+	buf := &bytes.Buffer{}
+	rs.WriteTo(buf)
+
+	return protocol.RawRecordSet{
+		Reader: buf,
+	}
+}
+
+func BenchmarkProduceRequest(b *testing.B) {
+	t0 := time.Now().Truncate(time.Millisecond)
+	t1 := t0.Add(1 * time.Millisecond)
+	t2 := t0.Add(2 * time.Millisecond)
+
+	prototest.BenchmarkRequest(b, v3, &rawproduce.Request{
+		TransactionalID: "1234",
+		Acks:            1,
+		Timeout:         500,
+		Topics: []rawproduce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []rawproduce.RequestPartition{
+					{
+						Partition: 0,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: nil},
+						), 1, 0),
+					},
+					{
+						Partition: 1,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0")},
+							protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+							protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+						), 1, 0),
+					},
+				},
+			},
+		},
+	})
+
+	headers := []protocol.Header{
+		{Key: "key-1", Value: []byte("value-1")},
+		{Key: "key-2", Value: []byte("value-2")},
+		{Key: "key-3", Value: []byte("value-3")},
+	}
+
+	prototest.BenchmarkRequest(b, v5, &rawproduce.Request{
+		TransactionalID: "1234",
+		Acks:            1,
+		Timeout:         500,
+		Topics: []rawproduce.RequestTopic{
+			{
+				Topic: "topic-1",
+				Partitions: []rawproduce.RequestPartition{
+					{
+						Partition: 1,
+						RecordSet: NewRawRecordSet(protocol.NewRecordReader(
+							protocol.Record{Offset: 0, Time: t0, Key: nil, Value: prototest.String("msg-0"), Headers: headers},
+							protocol.Record{Offset: 1, Time: t1, Key: nil, Value: prototest.String("msg-1")},
+							protocol.Record{Offset: 2, Time: t2, Key: prototest.Bytes([]byte{1}), Value: prototest.String("msg-2")},
+						), 2, 0),
+					},
+				},
+			},
+		},
+	})
+}
diff -pruN 0.2.1-1.1/protocol/record.go 0.4.49+ds1-1/protocol/record.go
--- 0.2.1-1.1/protocol/record.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/record.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,354 @@
+package protocol
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/segmentio/kafka-go/compress"
+)
+
+// Attributes is a bitset representing special attributes set on records.
+type Attributes int16
+
+const (
+	Gzip          Attributes = Attributes(compress.Gzip)   // 1
+	Snappy        Attributes = Attributes(compress.Snappy) // 2
+	Lz4           Attributes = Attributes(compress.Lz4)    // 3
+	Zstd          Attributes = Attributes(compress.Zstd)   // 4
+	Transactional Attributes = 1 << 4
+	Control       Attributes = 1 << 5
+)
+
+func (a Attributes) Compression() compress.Compression {
+	return compress.Compression(a & 7)
+}
+
+func (a Attributes) Transactional() bool {
+	return (a & Transactional) != 0
+}
+
+func (a Attributes) Control() bool {
+	return (a & Control) != 0
+}
+
+func (a Attributes) String() string {
+	s := a.Compression().String()
+	if a.Transactional() {
+		s += "+transactional"
+	}
+	if a.Control() {
+		s += "+control"
+	}
+	return s
+}
+
+// Header represents a single entry in a list of record headers.
+type Header struct {
+	Key   string
+	Value []byte
+}
+
+// Record is an interface representing a single kafka record.
+//
+// Record values are not safe to use concurrently from multiple goroutines.
+type Record struct {
+	// The offset at which the record exists in a topic partition. This value
+	// is ignored in produce requests.
+	Offset int64
+
+	// Returns the time of the record. This value may be omitted in produce
+	// requests to let kafka set the time when it saves the record.
+	Time time.Time
+
+	// Returns a byte sequence containing the key of this record. The returned
+	// sequence may be nil to indicate that the record has no key. If the record
+	// is part of a RecordSet, the content of the key must remain valid at least
+	// until the record set is closed (or until the key is closed).
+	Key Bytes
+
+	// Returns a byte sequence containing the value of this record. The returned
+	// sequence may be nil to indicate that the record has no value. If the
+	// record is part of a RecordSet, the content of the value must remain valid
+	// at least until the record set is closed (or until the value is closed).
+	Value Bytes
+
+	// Returns the list of headers associated with this record. The returned
+	// slice may be reused across calls, the program should use it as an
+	// immutable value.
+	Headers []Header
+}
+
+// RecordSet represents a sequence of records in Produce requests and Fetch
+// responses. All v0, v1, and v2 formats are supported.
+type RecordSet struct {
+	// The message version that this record set will be represented as, valid
+	// values are 1, or 2.
+	//
+	// When reading, this is the value of the highest version used in the
+	// batches that compose the record set.
+	//
+	// When writing, this value dictates the format that the records will be
+	// encoded in.
+	Version int8
+
+	// Attributes set on the record set.
+	//
+	// When reading, the attributes are the combination of all attributes in
+	// the batches that compose the record set.
+	//
+	// When writing, the attributes apply to the whole sequence of records in
+	// the set.
+	Attributes Attributes
+
+	// A reader exposing the sequence of records.
+	//
+	// When reading a RecordSet from an io.Reader, the Records field will be a
+	// *RecordStream. If the program needs to access the details of each batch
+	// that compose the stream, it may use type assertions to access the
+	// underlying types of each batch.
+	Records RecordReader
+}
+
+// bufferedReader is an interface implemented by types like bufio.Reader, which
+// we use to optimize prefix reads by accessing the internal buffer directly
+// through calls to Peek.
+type bufferedReader interface {
+	Discard(int) (int, error)
+	Peek(int) ([]byte, error)
+}
+
+// bytesBuffer is an interface implemented by types like bytes.Buffer, which we
+// use to optimize prefix reads by accessing the internal buffer directly
+// through calls to Bytes.
+type bytesBuffer interface {
+	Bytes() []byte
+}
+
+// magicByteOffset is the position of the magic byte in all versions of record
+// sets in the kafka protocol.
+const magicByteOffset = 16
+
+// ReadFrom reads the representation of a record set from r into rs, returning
+// the number of bytes consumed from r, and an non-nil error if the record set
+// could not be read.
+func (rs *RecordSet) ReadFrom(r io.Reader) (int64, error) {
+	d, _ := r.(*decoder)
+	if d == nil {
+		d = &decoder{
+			reader: r,
+			remain: 4,
+		}
+	}
+
+	*rs = RecordSet{}
+	limit := d.remain
+	size := d.readInt32()
+
+	if d.err != nil {
+		return int64(limit - d.remain), d.err
+	}
+
+	if size <= 0 {
+		return 4, nil
+	}
+
+	stream := &RecordStream{
+		Records: make([]RecordReader, 0, 4),
+	}
+
+	var err error
+	d.remain = int(size)
+
+	for d.remain > 0 && err == nil {
+		var version byte
+
+		if d.remain < (magicByteOffset + 1) {
+			if len(stream.Records) != 0 {
+				break
+			}
+			return 4, fmt.Errorf("impossible record set shorter than %d bytes", magicByteOffset+1)
+		}
+
+		switch r := d.reader.(type) {
+		case bufferedReader:
+			b, err := r.Peek(magicByteOffset + 1)
+			if err != nil {
+				n, _ := r.Discard(len(b))
+				return 4 + int64(n), dontExpectEOF(err)
+			}
+			version = b[magicByteOffset]
+		case bytesBuffer:
+			version = r.Bytes()[magicByteOffset]
+		default:
+			b := make([]byte, magicByteOffset+1)
+			if n, err := io.ReadFull(d.reader, b); err != nil {
+				return 4 + int64(n), dontExpectEOF(err)
+			}
+			version = b[magicByteOffset]
+			// Reconstruct the prefix that we had to read to determine the version
+			// of the record set from the magic byte.
+			//
+			// Technically this may recursively stack readers when consuming all
+			// items of the batch, which could hurt performance. In practice this
+			// path should not be taken tho, since the decoder would read from a
+			// *bufio.Reader which implements the bufferedReader interface.
+			d.reader = io.MultiReader(bytes.NewReader(b), d.reader)
+		}
+
+		var tmp RecordSet
+		switch version {
+		case 0, 1:
+			err = tmp.readFromVersion1(d)
+		case 2:
+			err = tmp.readFromVersion2(d)
+		default:
+			err = fmt.Errorf("unsupported message version %d for message of size %d", version, size)
+		}
+
+		if tmp.Version > rs.Version {
+			rs.Version = tmp.Version
+		}
+
+		rs.Attributes |= tmp.Attributes
+
+		if tmp.Records != nil {
+			stream.Records = append(stream.Records, tmp.Records)
+		}
+	}
+
+	if len(stream.Records) != 0 {
+		rs.Records = stream
+		// Ignore errors if we've successfully read records, so the
+		// program can keep making progress.
+		err = nil
+	}
+
+	d.discardAll()
+	rn := 4 + (int(size) - d.remain)
+	d.remain = limit - rn
+	return int64(rn), err
+}
+
+// WriteTo writes the representation of rs into w. The value of rs.Version
+// dictates which format that the record set will be represented as.
+//
+// The error will be ErrNoRecord if rs contained no records.
+//
+// Note: since this package is only compatible with kafka 0.10 and above, the
+// method never produces messages in version 0. If rs.Version is zero, the
+// method defaults to producing messages in version 1.
+func (rs *RecordSet) WriteTo(w io.Writer) (int64, error) {
+	if rs.Records == nil {
+		return 0, ErrNoRecord
+	}
+
+	// This optimization avoids rendering the record set in an intermediary
+	// buffer when the writer is already a pageBuffer, which is a common case
+	// due to the way WriteRequest and WriteResponse are implemented.
+	buffer, _ := w.(*pageBuffer)
+	bufferOffset := int64(0)
+
+	if buffer != nil {
+		bufferOffset = buffer.Size()
+	} else {
+		buffer = newPageBuffer()
+		defer buffer.unref()
+	}
+
+	size := packUint32(0)
+	buffer.Write(size[:]) // size placeholder
+
+	var err error
+	switch rs.Version {
+	case 0, 1:
+		err = rs.writeToVersion1(buffer, bufferOffset+4)
+	case 2:
+		err = rs.writeToVersion2(buffer, bufferOffset+4)
+	default:
+		err = fmt.Errorf("unsupported record set version %d", rs.Version)
+	}
+	if err != nil {
+		return 0, err
+	}
+
+	n := buffer.Size() - bufferOffset
+	if n == 0 {
+		size = packUint32(^uint32(0))
+	} else {
+		size = packUint32(uint32(n) - 4)
+	}
+	buffer.WriteAt(size[:], bufferOffset)
+
+	// This condition indicates that the output writer received by `WriteTo` was
+	// not a *pageBuffer, in which case we need to flush the buffered records
+	// data into it.
+	if buffer != w {
+		return buffer.WriteTo(w)
+	}
+
+	return n, nil
+}
+
+// RawRecordSet represents a record set for a RawProduce request. The record set is
+// represented as a raw sequence of pre-encoded record set bytes.
+type RawRecordSet struct {
+	// Reader exposes the raw sequence of record set bytes.
+	Reader io.Reader
+}
+
+// ReadFrom reads the representation of a record set from r into rrs. It re-uses the
+// existing RecordSet.ReadFrom implementation to first read/decode data into a RecordSet,
+// then writes/encodes the RecordSet to a buffer referenced by the RawRecordSet.
+//
+// Note: re-using the RecordSet.ReadFrom implementation makes this suboptimal from a
+// performance standpoint as it requires an extra copy of the record bytes. Holding off
+// on optimizing, as this code path is only invoked in tests.
+func (rrs *RawRecordSet) ReadFrom(r io.Reader) (int64, error) {
+	rs := &RecordSet{}
+	n, err := rs.ReadFrom(r)
+	if err != nil {
+		return 0, err
+	}
+
+	buf := &bytes.Buffer{}
+	rs.WriteTo(buf)
+	*rrs = RawRecordSet{
+		Reader: buf,
+	}
+
+	return n, nil
+}
+
+// WriteTo writes the RawRecordSet to an io.Writer. Since this is a raw record set representation, all that is
+// done here is copying bytes from the underlying reader to the specified writer.
+func (rrs *RawRecordSet) WriteTo(w io.Writer) (int64, error) {
+	if rrs.Reader == nil {
+		return 0, ErrNoRecord
+	}
+
+	return io.Copy(w, rrs.Reader)
+}
+
+func makeTime(t int64) time.Time {
+	return time.Unix(t/1000, (t%1000)*int64(time.Millisecond))
+}
+
+func timestamp(t time.Time) int64 {
+	if t.IsZero() {
+		return 0
+	}
+	return t.UnixNano() / int64(time.Millisecond)
+}
+
+func packUint32(u uint32) (b [4]byte) {
+	binary.BigEndian.PutUint32(b[:], u)
+	return
+}
+
+func packUint64(u uint64) (b [8]byte) {
+	binary.BigEndian.PutUint64(b[:], u)
+	return
+}
diff -pruN 0.2.1-1.1/protocol/record_batch.go 0.4.49+ds1-1/protocol/record_batch.go
--- 0.2.1-1.1/protocol/record_batch.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/record_batch.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,358 @@
+package protocol
+
+import (
+	"errors"
+	"io"
+	"time"
+)
+
+// RecordReader is an interface representing a sequence of records. Record sets
+// are used in both produce and fetch requests to represent the sequence of
+// records that are sent to or receive from kafka brokers.
+//
+// RecordSet values are not safe to use concurrently from multiple goroutines.
+type RecordReader interface {
+	// Returns the next record in the set, or io.EOF if the end of the sequence
+	// has been reached.
+	//
+	// The returned Record is guaranteed to be valid until the next call to
+	// ReadRecord. If the program needs to retain the Record value it must make
+	// a copy.
+	ReadRecord() (*Record, error)
+}
+
+// NewRecordReader constructs a reader exposing the records passed as arguments.
+func NewRecordReader(records ...Record) RecordReader {
+	switch len(records) {
+	case 0:
+		return emptyRecordReader{}
+	default:
+		r := &recordReader{records: make([]Record, len(records))}
+		copy(r.records, records)
+		return r
+	}
+}
+
+// MultiRecordReader merges multiple record batches into one.
+func MultiRecordReader(batches ...RecordReader) RecordReader {
+	switch len(batches) {
+	case 0:
+		return emptyRecordReader{}
+	case 1:
+		return batches[0]
+	default:
+		m := &multiRecordReader{batches: make([]RecordReader, len(batches))}
+		copy(m.batches, batches)
+		return m
+	}
+}
+
+func forEachRecord(r RecordReader, f func(int, *Record) error) error {
+	for i := 0; ; i++ {
+		rec, err := r.ReadRecord()
+
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				err = nil
+			}
+			return err
+		}
+
+		if err := handleRecord(i, rec, f); err != nil {
+			return err
+		}
+	}
+}
+
+func handleRecord(i int, r *Record, f func(int, *Record) error) error {
+	if r.Key != nil {
+		defer r.Key.Close()
+	}
+	if r.Value != nil {
+		defer r.Value.Close()
+	}
+	return f(i, r)
+}
+
+type recordReader struct {
+	records []Record
+	index   int
+}
+
+func (r *recordReader) ReadRecord() (*Record, error) {
+	if i := r.index; i >= 0 && i < len(r.records) {
+		r.index++
+		return &r.records[i], nil
+	}
+	return nil, io.EOF
+}
+
+type multiRecordReader struct {
+	batches []RecordReader
+	index   int
+}
+
+func (m *multiRecordReader) ReadRecord() (*Record, error) {
+	for {
+		if m.index == len(m.batches) {
+			return nil, io.EOF
+		}
+		r, err := m.batches[m.index].ReadRecord()
+		if err == nil {
+			return r, nil
+		}
+		if !errors.Is(err, io.EOF) {
+			return nil, err
+		}
+		m.index++
+	}
+}
+
+// optimizedRecordReader is an implementation of a RecordReader which exposes a
+// sequence.
+type optimizedRecordReader struct {
+	records []optimizedRecord
+	index   int
+	buffer  Record
+	headers [][]Header
+}
+
+func (r *optimizedRecordReader) ReadRecord() (*Record, error) {
+	if i := r.index; i >= 0 && i < len(r.records) {
+		rec := &r.records[i]
+		r.index++
+		r.buffer = Record{
+			Offset: rec.offset,
+			Time:   rec.time(),
+			Key:    rec.key(),
+			Value:  rec.value(),
+		}
+		if i < len(r.headers) {
+			r.buffer.Headers = r.headers[i]
+		}
+		return &r.buffer, nil
+	}
+	return nil, io.EOF
+}
+
+type optimizedRecord struct {
+	offset    int64
+	timestamp int64
+	keyRef    *pageRef
+	valueRef  *pageRef
+}
+
+func (r *optimizedRecord) time() time.Time {
+	return makeTime(r.timestamp)
+}
+
+func (r *optimizedRecord) key() Bytes {
+	return makeBytes(r.keyRef)
+}
+
+func (r *optimizedRecord) value() Bytes {
+	return makeBytes(r.valueRef)
+}
+
+func makeBytes(ref *pageRef) Bytes {
+	if ref == nil {
+		return nil
+	}
+	return ref
+}
+
+type emptyRecordReader struct{}
+
+func (emptyRecordReader) ReadRecord() (*Record, error) { return nil, io.EOF }
+
+// ControlRecord represents a record read from a control batch.
+type ControlRecord struct {
+	Offset  int64
+	Time    time.Time
+	Version int16
+	Type    int16
+	Data    []byte
+	Headers []Header
+}
+
+func ReadControlRecord(r *Record) (*ControlRecord, error) {
+	if r.Key != nil {
+		defer r.Key.Close()
+	}
+	if r.Value != nil {
+		defer r.Value.Close()
+	}
+
+	k, err := ReadAll(r.Key)
+	if err != nil {
+		return nil, err
+	}
+	if k == nil {
+		return nil, Error("invalid control record with nil key")
+	}
+	if len(k) != 4 {
+		return nil, Errorf("invalid control record with key of size %d", len(k))
+	}
+
+	v, err := ReadAll(r.Value)
+	if err != nil {
+		return nil, err
+	}
+
+	c := &ControlRecord{
+		Offset:  r.Offset,
+		Time:    r.Time,
+		Version: readInt16(k[:2]),
+		Type:    readInt16(k[2:]),
+		Data:    v,
+		Headers: r.Headers,
+	}
+
+	return c, nil
+}
+
+func (cr *ControlRecord) Key() Bytes {
+	k := make([]byte, 4)
+	writeInt16(k[:2], cr.Version)
+	writeInt16(k[2:], cr.Type)
+	return NewBytes(k)
+}
+
+func (cr *ControlRecord) Value() Bytes {
+	return NewBytes(cr.Data)
+}
+
+func (cr *ControlRecord) Record() Record {
+	return Record{
+		Offset:  cr.Offset,
+		Time:    cr.Time,
+		Key:     cr.Key(),
+		Value:   cr.Value(),
+		Headers: cr.Headers,
+	}
+}
+
+// ControlBatch is an implementation of the RecordReader interface representing
+// control batches returned by kafka brokers.
+type ControlBatch struct {
+	Attributes           Attributes
+	PartitionLeaderEpoch int32
+	BaseOffset           int64
+	ProducerID           int64
+	ProducerEpoch        int16
+	BaseSequence         int32
+	Records              RecordReader
+}
+
+// NewControlBatch constructs a control batch from the list of records passed as
+// arguments.
+func NewControlBatch(records ...ControlRecord) *ControlBatch {
+	rawRecords := make([]Record, len(records))
+	for i, cr := range records {
+		rawRecords[i] = cr.Record()
+	}
+	return &ControlBatch{
+		Records: NewRecordReader(rawRecords...),
+	}
+}
+
+func (c *ControlBatch) ReadRecord() (*Record, error) {
+	return c.Records.ReadRecord()
+}
+
+func (c *ControlBatch) ReadControlRecord() (*ControlRecord, error) {
+	r, err := c.ReadRecord()
+	if err != nil {
+		return nil, err
+	}
+	if r.Key != nil {
+		defer r.Key.Close()
+	}
+	if r.Value != nil {
+		defer r.Value.Close()
+	}
+	return ReadControlRecord(r)
+}
+
+func (c *ControlBatch) Offset() int64 {
+	return c.BaseOffset
+}
+
+func (c *ControlBatch) Version() int {
+	return 2
+}
+
+// RecordBatch is an implementation of the RecordReader interface representing
+// regular record batches (v2).
+type RecordBatch struct {
+	Attributes           Attributes
+	PartitionLeaderEpoch int32
+	BaseOffset           int64
+	ProducerID           int64
+	ProducerEpoch        int16
+	BaseSequence         int32
+	Records              RecordReader
+}
+
+func (r *RecordBatch) ReadRecord() (*Record, error) {
+	return r.Records.ReadRecord()
+}
+
+func (r *RecordBatch) Offset() int64 {
+	return r.BaseOffset
+}
+
+func (r *RecordBatch) Version() int {
+	return 2
+}
+
+// MessageSet is an implementation of the RecordReader interface representing
+// regular message sets (v1).
+type MessageSet struct {
+	Attributes Attributes
+	BaseOffset int64
+	Records    RecordReader
+}
+
+func (m *MessageSet) ReadRecord() (*Record, error) {
+	return m.Records.ReadRecord()
+}
+
+func (m *MessageSet) Offset() int64 {
+	return m.BaseOffset
+}
+
+func (m *MessageSet) Version() int {
+	return 1
+}
+
+// RecordStream is an implementation of the RecordReader interface which
+// combines multiple underlying RecordReader and only expose records that
+// are not from control batches.
+type RecordStream struct {
+	Records []RecordReader
+	index   int
+}
+
+func (s *RecordStream) ReadRecord() (*Record, error) {
+	for {
+		if s.index < 0 || s.index >= len(s.Records) {
+			return nil, io.EOF
+		}
+
+		if _, isControl := s.Records[s.index].(*ControlBatch); isControl {
+			s.index++
+			continue
+		}
+
+		r, err := s.Records[s.index].ReadRecord()
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				s.index++
+				continue
+			}
+		}
+
+		return r, err
+	}
+}
diff -pruN 0.2.1-1.1/protocol/record_batch_test.go 0.4.49+ds1-1/protocol/record_batch_test.go
--- 0.2.1-1.1/protocol/record_batch_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/record_batch_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,200 @@
+package protocol
+
+import (
+	"errors"
+	"io"
+	"reflect"
+	"testing"
+	"time"
+)
+
+type memoryRecord struct {
+	offset  int64
+	time    time.Time
+	key     []byte
+	value   []byte
+	headers []Header
+}
+
+func (m *memoryRecord) Record() Record {
+	return Record{
+		Offset:  m.offset,
+		Time:    m.time,
+		Key:     NewBytes(m.key),
+		Value:   NewBytes(m.value),
+		Headers: m.headers,
+	}
+}
+
+func makeRecords(memoryRecords []memoryRecord) []Record {
+	records := make([]Record, len(memoryRecords))
+	for i, m := range memoryRecords {
+		records[i] = m.Record()
+	}
+	return records
+}
+
+func TestRecordReader(t *testing.T) {
+	now := time.Now()
+
+	records := []memoryRecord{
+		{
+			offset: 1,
+			time:   now,
+			key:    []byte("key-1"),
+		},
+		{
+			offset: 2,
+			time:   now.Add(time.Millisecond),
+			value:  []byte("value-1"),
+		},
+		{
+			offset: 3,
+			time:   now.Add(time.Second),
+			key:    []byte("key-3"),
+			value:  []byte("value-3"),
+			headers: []Header{
+				{Key: "answer", Value: []byte("42")},
+			},
+		},
+	}
+
+	r1 := NewRecordReader(makeRecords(records)...)
+	r2 := NewRecordReader(makeRecords(records)...)
+	assertRecords(t, r1, r2)
+}
+
+func TestMultiRecordReader(t *testing.T) {
+	now := time.Now()
+
+	records := []memoryRecord{
+		{
+			offset: 1,
+			time:   now,
+			key:    []byte("key-1"),
+		},
+		{
+			offset: 2,
+			time:   now.Add(time.Millisecond),
+			value:  []byte("value-1"),
+		},
+		{
+			offset: 3,
+			time:   now.Add(time.Second),
+			key:    []byte("key-3"),
+			value:  []byte("value-3"),
+			headers: []Header{
+				{Key: "answer", Value: []byte("42")},
+			},
+		},
+	}
+
+	r1 := NewRecordReader(makeRecords(records)...)
+	r2 := MultiRecordReader(
+		NewRecordReader(makeRecords(records[:1])...),
+		NewRecordReader(makeRecords(records[1:])...),
+	)
+	assertRecords(t, r1, r2)
+}
+
+func TestControlRecord(t *testing.T) {
+	now := time.Now()
+
+	records := []ControlRecord{
+		{
+			Offset:  1,
+			Time:    now,
+			Version: 2,
+			Type:    3,
+		},
+		{
+			Offset:  2,
+			Time:    now.Add(time.Second),
+			Version: 4,
+			Type:    5,
+			Data:    []byte("Hello World!"),
+			Headers: []Header{
+				{Key: "answer", Value: []byte("42")},
+			},
+		},
+	}
+
+	batch := NewControlBatch(records...)
+	found := make([]ControlRecord, 0, len(records))
+
+	for {
+		r, err := batch.ReadControlRecord()
+		if err != nil {
+			if !errors.Is(err, io.EOF) {
+				t.Fatal(err)
+			}
+			break
+		}
+		found = append(found, *r)
+	}
+
+	if !reflect.DeepEqual(records, found) {
+		t.Error("control records mismatch")
+	}
+}
+
+func assertRecords(t *testing.T, r1, r2 RecordReader) {
+	t.Helper()
+
+	for {
+		rec1, err1 := r1.ReadRecord()
+		rec2, err2 := r2.ReadRecord()
+
+		if err1 != nil || err2 != nil {
+			if !errors.Is(err1, err2) {
+				t.Error("errors mismatch:")
+				t.Log("expected:", err2)
+				t.Log("found:   ", err1)
+			}
+			return
+		}
+
+		if !equalRecords(rec1, rec2) {
+			t.Error("records mismatch:")
+			t.Logf("expected: %+v", rec2)
+			t.Logf("found:    %+v", rec1)
+		}
+	}
+}
+
+func equalRecords(r1, r2 *Record) bool {
+	if r1.Offset != r2.Offset {
+		return false
+	}
+
+	if !r1.Time.Equal(r2.Time) {
+		return false
+	}
+
+	k1 := readAll(r1.Key)
+	k2 := readAll(r2.Key)
+
+	if !reflect.DeepEqual(k1, k2) {
+		return false
+	}
+
+	v1 := readAll(r1.Value)
+	v2 := readAll(r2.Value)
+
+	if !reflect.DeepEqual(v1, v2) {
+		return false
+	}
+
+	return reflect.DeepEqual(r1.Headers, r2.Headers)
+}
+
+func readAll(bytes Bytes) []byte {
+	if bytes != nil {
+		defer bytes.Close()
+	}
+	b, err := ReadAll(bytes)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
diff -pruN 0.2.1-1.1/protocol/record_v1.go 0.4.49+ds1-1/protocol/record_v1.go
--- 0.2.1-1.1/protocol/record_v1.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/record_v1.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,243 @@
+package protocol
+
+import (
+	"errors"
+	"hash/crc32"
+	"io"
+	"math"
+	"time"
+)
+
+func readMessage(b *pageBuffer, d *decoder) (attributes int8, baseOffset, timestamp int64, key, value Bytes, err error) {
+	md := decoder{
+		reader: d,
+		remain: 12,
+	}
+
+	baseOffset = md.readInt64()
+	md.remain = int(md.readInt32())
+
+	crc := uint32(md.readInt32())
+	md.setCRC(crc32.IEEETable)
+	magicByte := md.readInt8()
+	attributes = md.readInt8()
+	timestamp = int64(0)
+
+	if magicByte != 0 {
+		timestamp = md.readInt64()
+	}
+
+	keyOffset := b.Size()
+	keyLength := int(md.readInt32())
+	hasKey := keyLength >= 0
+	if hasKey {
+		md.writeTo(b, keyLength)
+		key = b.ref(keyOffset, b.Size())
+	}
+
+	valueOffset := b.Size()
+	valueLength := int(md.readInt32())
+	hasValue := valueLength >= 0
+	if hasValue {
+		md.writeTo(b, valueLength)
+		value = b.ref(valueOffset, b.Size())
+	}
+
+	if md.crc32 != crc {
+		err = Errorf("crc32 checksum mismatch (computed=%d found=%d)", md.crc32, crc)
+	} else {
+		err = dontExpectEOF(md.err)
+	}
+
+	return
+}
+
+func (rs *RecordSet) readFromVersion1(d *decoder) error {
+	var records RecordReader
+
+	b := newPageBuffer()
+	defer b.unref()
+
+	attributes, baseOffset, timestamp, key, value, err := readMessage(b, d)
+	if err != nil {
+		return err
+	}
+
+	if compression := Attributes(attributes).Compression(); compression == 0 {
+		records = &message{
+			Record: Record{
+				Offset: baseOffset,
+				Time:   makeTime(timestamp),
+				Key:    key,
+				Value:  value,
+			},
+		}
+	} else {
+		// Can we have a non-nil key when reading a compressed message?
+		if key != nil {
+			key.Close()
+		}
+		if value == nil {
+			records = emptyRecordReader{}
+		} else {
+			defer value.Close()
+
+			codec := compression.Codec()
+			if codec == nil {
+				return Errorf("unsupported compression codec: %d", compression)
+			}
+			decompressor := codec.NewReader(value)
+			defer decompressor.Close()
+
+			b := newPageBuffer()
+			defer b.unref()
+
+			d := &decoder{
+				reader: decompressor,
+				remain: math.MaxInt32,
+			}
+
+			r := &recordReader{
+				records: make([]Record, 0, 32),
+			}
+
+			for !d.done() {
+				_, offset, timestamp, key, value, err := readMessage(b, d)
+				if err != nil {
+					if errors.Is(err, io.ErrUnexpectedEOF) {
+						break
+					}
+					for _, rec := range r.records {
+						closeBytes(rec.Key)
+						closeBytes(rec.Value)
+					}
+					return err
+				}
+				r.records = append(r.records, Record{
+					Offset: offset,
+					Time:   makeTime(timestamp),
+					Key:    key,
+					Value:  value,
+				})
+			}
+
+			if baseOffset != 0 {
+				// https://kafka.apache.org/documentation/#messageset
+				//
+				// In version 1, to avoid server side re-compression, only the
+				// wrapper message will be assigned an offset. The inner messages
+				// will have relative offsets. The absolute offset can be computed
+				// using the offset from the outer message, which corresponds to the
+				// offset assigned to the last inner message.
+				lastRelativeOffset := int64(len(r.records)) - 1
+
+				for i := range r.records {
+					r.records[i].Offset = baseOffset - (lastRelativeOffset - r.records[i].Offset)
+				}
+			}
+
+			records = r
+		}
+	}
+
+	*rs = RecordSet{
+		Version:    1,
+		Attributes: Attributes(attributes),
+		Records:    records,
+	}
+
+	return nil
+}
+
+func (rs *RecordSet) writeToVersion1(buffer *pageBuffer, bufferOffset int64) error {
+	attributes := rs.Attributes
+	records := rs.Records
+
+	if compression := attributes.Compression(); compression != 0 {
+		if codec := compression.Codec(); codec != nil {
+			// In the message format version 1, compression is achieved by
+			// compressing the value of a message which recursively contains
+			// the representation of the compressed message set.
+			subset := *rs
+			subset.Attributes &= ^7 // erase compression
+
+			if err := subset.writeToVersion1(buffer, bufferOffset); err != nil {
+				return err
+			}
+
+			compressed := newPageBuffer()
+			defer compressed.unref()
+
+			compressor := codec.NewWriter(compressed)
+			defer compressor.Close()
+
+			var err error
+			buffer.pages.scan(bufferOffset, buffer.Size(), func(b []byte) bool {
+				_, err = compressor.Write(b)
+				return err == nil
+			})
+			if err != nil {
+				return err
+			}
+			if err := compressor.Close(); err != nil {
+				return err
+			}
+
+			buffer.Truncate(int(bufferOffset))
+
+			records = &message{
+				Record: Record{
+					Value: compressed,
+				},
+			}
+		}
+	}
+
+	e := encoder{writer: buffer}
+	currentTimestamp := timestamp(time.Now())
+
+	return forEachRecord(records, func(i int, r *Record) error {
+		t := timestamp(r.Time)
+		if t == 0 {
+			t = currentTimestamp
+		}
+
+		messageOffset := buffer.Size()
+		e.writeInt64(int64(i))
+		e.writeInt32(0) // message size placeholder
+		e.writeInt32(0) // crc32 placeholder
+		e.setCRC(crc32.IEEETable)
+		e.writeInt8(1) // magic byte: version 1
+		e.writeInt8(int8(attributes))
+		e.writeInt64(t)
+
+		if err := e.writeNullBytesFrom(r.Key); err != nil {
+			return err
+		}
+
+		if err := e.writeNullBytesFrom(r.Value); err != nil {
+			return err
+		}
+
+		b0 := packUint32(uint32(buffer.Size() - (messageOffset + 12)))
+		b1 := packUint32(e.crc32)
+
+		buffer.WriteAt(b0[:], messageOffset+8)
+		buffer.WriteAt(b1[:], messageOffset+12)
+		e.setCRC(nil)
+		return nil
+	})
+}
+
+type message struct {
+	Record Record
+	read   bool
+}
+
+func (m *message) ReadRecord() (*Record, error) {
+	if m.read {
+		return nil, io.EOF
+	}
+	m.read = true
+	return &m.Record, nil
+}
diff -pruN 0.2.1-1.1/protocol/record_v2.go 0.4.49+ds1-1/protocol/record_v2.go
--- 0.2.1-1.1/protocol/record_v2.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/record_v2.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,315 @@
+package protocol
+
+import (
+	"fmt"
+	"hash/crc32"
+	"io"
+	"time"
+)
+
+func (rs *RecordSet) readFromVersion2(d *decoder) error {
+	baseOffset := d.readInt64()
+	batchLength := d.readInt32()
+
+	if int(batchLength) > d.remain || d.err != nil {
+		d.discardAll()
+		return nil
+	}
+
+	dec := &decoder{
+		reader: d,
+		remain: int(batchLength),
+	}
+
+	partitionLeaderEpoch := dec.readInt32()
+	magicByte := dec.readInt8()
+	crc := dec.readInt32()
+
+	dec.setCRC(crc32.MakeTable(crc32.Castagnoli))
+
+	attributes := dec.readInt16()
+	lastOffsetDelta := dec.readInt32()
+	firstTimestamp := dec.readInt64()
+	maxTimestamp := dec.readInt64()
+	producerID := dec.readInt64()
+	producerEpoch := dec.readInt16()
+	baseSequence := dec.readInt32()
+	numRecords := dec.readInt32()
+	reader := io.Reader(dec)
+
+	// unused
+	_ = lastOffsetDelta
+	_ = maxTimestamp
+
+	if compression := Attributes(attributes).Compression(); compression != 0 {
+		codec := compression.Codec()
+		if codec == nil {
+			return fmt.Errorf("unsupported compression codec (%d)", compression)
+		}
+		decompressor := codec.NewReader(reader)
+		defer decompressor.Close()
+		reader = decompressor
+	}
+
+	buffer := newPageBuffer()
+	defer buffer.unref()
+
+	_, err := buffer.ReadFrom(reader)
+	if err != nil {
+		return err
+	}
+	if dec.crc32 != uint32(crc) {
+		return fmt.Errorf("crc32 checksum mismatch (computed=%d found=%d)", dec.crc32, uint32(crc))
+	}
+
+	recordsLength := buffer.Len()
+	dec.reader = buffer
+	dec.remain = recordsLength
+
+	records := make([]optimizedRecord, numRecords)
+	// These are two lazy allocators that will be used to optimize allocation of
+	// page references for keys and values.
+	//
+	// By default, no memory is allocated and on first use, numRecords page refs
+	// are allocated in a contiguous memory space, and the allocators return
+	// pointers into those arrays for each page ref that get requested.
+	//
+	// The reasoning is that kafka partitions typically have records of a single
+	// form, which either have no keys, no values, or both keys and values.
+	// Using lazy allocators adapts nicely to these patterns to only allocate
+	// the memory that is needed by the program, while still reducing the number
+	// of malloc calls made by the program.
+	//
+	// Using a single allocator for both keys and values keeps related values
+	// close by in memory, making access to the records more friendly to CPU
+	// caches.
+	alloc := pageRefAllocator{size: int(numRecords)}
+	// Following the same reasoning that kafka partitions will typically have
+	// records with repeating formats, we expect to either find records with
+	// no headers, or records which always contain headers.
+	//
+	// To reduce the memory footprint when records have no headers, the Header
+	// slices are lazily allocated in a separate array.
+	headers := ([][]Header)(nil)
+
+	for i := range records {
+		r := &records[i]
+		_ = dec.readVarInt() // record length (unused)
+		_ = dec.readInt8()   // record attributes (unused)
+		timestampDelta := dec.readVarInt()
+		offsetDelta := dec.readVarInt()
+
+		r.offset = baseOffset + offsetDelta
+		r.timestamp = firstTimestamp + timestampDelta
+
+		keyLength := dec.readVarInt()
+		keyOffset := int64(recordsLength - dec.remain)
+		if keyLength > 0 {
+			dec.discard(int(keyLength))
+		}
+
+		valueLength := dec.readVarInt()
+		valueOffset := int64(recordsLength - dec.remain)
+		if valueLength > 0 {
+			dec.discard(int(valueLength))
+		}
+
+		if numHeaders := dec.readVarInt(); numHeaders > 0 {
+			if headers == nil {
+				headers = make([][]Header, numRecords)
+			}
+
+			h := make([]Header, numHeaders)
+
+			for i := range h {
+				h[i] = Header{
+					Key:   dec.readVarString(),
+					Value: dec.readVarBytes(),
+				}
+			}
+
+			headers[i] = h
+		}
+
+		if dec.err != nil {
+			records = records[:i]
+			break
+		}
+
+		if keyLength >= 0 {
+			r.keyRef = alloc.newPageRef()
+			buffer.refTo(r.keyRef, keyOffset, keyOffset+keyLength)
+		}
+
+		if valueLength >= 0 {
+			r.valueRef = alloc.newPageRef()
+			buffer.refTo(r.valueRef, valueOffset, valueOffset+valueLength)
+		}
+	}
+
+	// Note: it's unclear whether kafka 0.11+ still truncates the responses,
+	// all attempts I made at constructing a test to trigger a truncation have
+	// failed. I kept this code here as a safeguard but it may never execute.
+	if dec.err != nil && len(records) == 0 {
+		return dec.err
+	}
+
+	*rs = RecordSet{
+		Version:    magicByte,
+		Attributes: Attributes(attributes),
+		Records: &optimizedRecordReader{
+			records: records,
+			headers: headers,
+		},
+	}
+
+	if rs.Attributes.Control() {
+		rs.Records = &ControlBatch{
+			Attributes:           rs.Attributes,
+			PartitionLeaderEpoch: partitionLeaderEpoch,
+			BaseOffset:           baseOffset,
+			ProducerID:           producerID,
+			ProducerEpoch:        producerEpoch,
+			BaseSequence:         baseSequence,
+			Records:              rs.Records,
+		}
+	} else {
+		rs.Records = &RecordBatch{
+			Attributes:           rs.Attributes,
+			PartitionLeaderEpoch: partitionLeaderEpoch,
+			BaseOffset:           baseOffset,
+			ProducerID:           producerID,
+			ProducerEpoch:        producerEpoch,
+			BaseSequence:         baseSequence,
+			Records:              rs.Records,
+		}
+	}
+
+	return nil
+}
+
+func (rs *RecordSet) writeToVersion2(buffer *pageBuffer, bufferOffset int64) error {
+	records := rs.Records
+	numRecords := int32(0)
+
+	e := &encoder{writer: buffer}
+	e.writeInt64(0)                    // base offset                         |  0 +8
+	e.writeInt32(0)                    // placeholder for record batch length |  8 +4
+	e.writeInt32(-1)                   // partition leader epoch              | 12 +3
+	e.writeInt8(2)                     // magic byte                          | 16 +1
+	e.writeInt32(0)                    // placeholder for crc32 checksum      | 17 +4
+	e.writeInt16(int16(rs.Attributes)) // attributes                          | 21 +2
+	e.writeInt32(0)                    // placeholder for lastOffsetDelta     | 23 +4
+	e.writeInt64(0)                    // placeholder for firstTimestamp      | 27 +8
+	e.writeInt64(0)                    // placeholder for maxTimestamp        | 35 +8
+	e.writeInt64(-1)                   // producer id                         | 43 +8
+	e.writeInt16(-1)                   // producer epoch                      | 51 +2
+	e.writeInt32(-1)                   // base sequence                       | 53 +4
+	e.writeInt32(0)                    // placeholder for numRecords          | 57 +4
+
+	var compressor io.WriteCloser
+	if compression := rs.Attributes.Compression(); compression != 0 {
+		if codec := compression.Codec(); codec != nil {
+			compressor = codec.NewWriter(buffer)
+			e.writer = compressor
+		}
+	}
+
+	currentTimestamp := timestamp(time.Now())
+	lastOffsetDelta := int32(0)
+	firstTimestamp := int64(0)
+	maxTimestamp := int64(0)
+
+	err := forEachRecord(records, func(i int, r *Record) error {
+		t := timestamp(r.Time)
+		if t == 0 {
+			t = currentTimestamp
+		}
+		if i == 0 {
+			firstTimestamp = t
+		}
+		if t > maxTimestamp {
+			maxTimestamp = t
+		}
+
+		timestampDelta := t - firstTimestamp
+		offsetDelta := int64(i)
+		lastOffsetDelta = int32(offsetDelta)
+
+		length := 1 + // attributes
+			sizeOfVarInt(timestampDelta) +
+			sizeOfVarInt(offsetDelta) +
+			sizeOfVarNullBytesIface(r.Key) +
+			sizeOfVarNullBytesIface(r.Value) +
+			sizeOfVarInt(int64(len(r.Headers)))
+
+		for _, h := range r.Headers {
+			length += sizeOfVarString(h.Key) + sizeOfVarNullBytes(h.Value)
+		}
+
+		e.writeVarInt(int64(length))
+		e.writeInt8(0) // record attributes (unused)
+		e.writeVarInt(timestampDelta)
+		e.writeVarInt(offsetDelta)
+
+		if err := e.writeVarNullBytesFrom(r.Key); err != nil {
+			return err
+		}
+
+		if err := e.writeVarNullBytesFrom(r.Value); err != nil {
+			return err
+		}
+
+		e.writeVarInt(int64(len(r.Headers)))
+
+		for _, h := range r.Headers {
+			e.writeVarString(h.Key)
+			e.writeVarNullBytes(h.Value)
+		}
+
+		numRecords++
+		return nil
+	})
+
+	if err != nil {
+		return err
+	}
+
+	if compressor != nil {
+		if err := compressor.Close(); err != nil {
+			return err
+		}
+	}
+
+	if numRecords == 0 {
+		return ErrNoRecord
+	}
+
+	b2 := packUint32(uint32(lastOffsetDelta))
+	b3 := packUint64(uint64(firstTimestamp))
+	b4 := packUint64(uint64(maxTimestamp))
+	b5 := packUint32(uint32(numRecords))
+
+	buffer.WriteAt(b2[:], bufferOffset+23)
+	buffer.WriteAt(b3[:], bufferOffset+27)
+	buffer.WriteAt(b4[:], bufferOffset+35)
+	buffer.WriteAt(b5[:], bufferOffset+57)
+
+	totalLength := buffer.Size() - bufferOffset
+	batchLength := totalLength - 12
+
+	checksum := uint32(0)
+	crcTable := crc32.MakeTable(crc32.Castagnoli)
+
+	buffer.pages.scan(bufferOffset+21, bufferOffset+totalLength, func(chunk []byte) bool {
+		checksum = crc32.Update(checksum, crcTable, chunk)
+		return true
+	})
+
+	b0 := packUint32(uint32(batchLength))
+	b1 := packUint32(checksum)
+
+	buffer.WriteAt(b0[:], bufferOffset+8)
+	buffer.WriteAt(b1[:], bufferOffset+17)
+	return nil
+}
diff -pruN 0.2.1-1.1/protocol/reflect.go 0.4.49+ds1-1/protocol/reflect.go
--- 0.2.1-1.1/protocol/reflect.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/reflect.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,102 @@
+//go:build !unsafe
+// +build !unsafe
+
+package protocol
+
+import (
+	"reflect"
+)
+
+type index []int
+
+type _type struct{ typ reflect.Type }
+
+func typeOf(x interface{}) _type {
+	return makeType(reflect.TypeOf(x))
+}
+
+func elemTypeOf(x interface{}) _type {
+	return makeType(reflect.TypeOf(x).Elem())
+}
+
+func makeType(t reflect.Type) _type {
+	return _type{typ: t}
+}
+
+type value struct {
+	val reflect.Value
+}
+
+func nonAddressableValueOf(x interface{}) value {
+	return value{val: reflect.ValueOf(x)}
+}
+
+func valueOf(x interface{}) value {
+	return value{val: reflect.ValueOf(x).Elem()}
+}
+
+func (v value) bool() bool { return v.val.Bool() }
+
+func (v value) int8() int8 { return int8(v.int64()) }
+
+func (v value) int16() int16 { return int16(v.int64()) }
+
+func (v value) int32() int32 { return int32(v.int64()) }
+
+func (v value) int64() int64 { return v.val.Int() }
+
+func (v value) float64() float64 { return v.val.Float() }
+
+func (v value) string() string { return v.val.String() }
+
+func (v value) bytes() []byte { return v.val.Bytes() }
+
+func (v value) iface(t reflect.Type) interface{} { return v.val.Addr().Interface() }
+
+func (v value) array(t reflect.Type) array { return array(v) }
+
+func (v value) setBool(b bool) { v.val.SetBool(b) }
+
+func (v value) setInt8(i int8) { v.setInt64(int64(i)) }
+
+func (v value) setInt16(i int16) { v.setInt64(int64(i)) }
+
+func (v value) setInt32(i int32) { v.setInt64(int64(i)) }
+
+func (v value) setInt64(i int64) { v.val.SetInt(i) }
+
+func (v value) setFloat64(f float64) { v.val.SetFloat(f) }
+
+func (v value) setString(s string) { v.val.SetString(s) }
+
+func (v value) setBytes(b []byte) { v.val.SetBytes(b) }
+
+func (v value) setArray(a array) {
+	if a.val.IsValid() {
+		v.val.Set(a.val)
+	} else {
+		v.val.Set(reflect.Zero(v.val.Type()))
+	}
+}
+
+func (v value) fieldByIndex(i index) value {
+	return value{val: v.val.FieldByIndex(i)}
+}
+
+type array struct {
+	val reflect.Value
+}
+
+func makeArray(t reflect.Type, n int) array {
+	return array{val: reflect.MakeSlice(reflect.SliceOf(t), n, n)}
+}
+
+func (a array) index(i int) value { return value{val: a.val.Index(i)} }
+
+func (a array) length() int { return a.val.Len() }
+
+func (a array) isNil() bool { return a.val.IsNil() }
+
+func indexOf(s reflect.StructField) index { return index(s.Index) }
+
+func bytesToString(b []byte) string { return string(b) }
diff -pruN 0.2.1-1.1/protocol/reflect_unsafe.go 0.4.49+ds1-1/protocol/reflect_unsafe.go
--- 0.2.1-1.1/protocol/reflect_unsafe.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/reflect_unsafe.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,143 @@
+//go:build unsafe
+// +build unsafe
+
+package protocol
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type iface struct {
+	typ unsafe.Pointer
+	ptr unsafe.Pointer
+}
+
+type slice struct {
+	ptr unsafe.Pointer
+	len int
+	cap int
+}
+
+type index uintptr
+
+type _type struct {
+	ptr unsafe.Pointer
+}
+
+func typeOf(x interface{}) _type {
+	return _type{ptr: ((*iface)(unsafe.Pointer(&x))).typ}
+}
+
+func elemTypeOf(x interface{}) _type {
+	return makeType(reflect.TypeOf(x).Elem())
+}
+
+func makeType(t reflect.Type) _type {
+	return _type{ptr: ((*iface)(unsafe.Pointer(&t))).ptr}
+}
+
+type value struct {
+	ptr unsafe.Pointer
+}
+
+func nonAddressableValueOf(x interface{}) value {
+	return valueOf(x)
+}
+
+func valueOf(x interface{}) value {
+	return value{ptr: ((*iface)(unsafe.Pointer(&x))).ptr}
+}
+
+func makeValue(t reflect.Type) value {
+	return value{ptr: unsafe.Pointer(reflect.New(t).Pointer())}
+}
+
+func (v value) bool() bool { return *(*bool)(v.ptr) }
+
+func (v value) int8() int8 { return *(*int8)(v.ptr) }
+
+func (v value) int16() int16 { return *(*int16)(v.ptr) }
+
+func (v value) int32() int32 { return *(*int32)(v.ptr) }
+
+func (v value) int64() int64 { return *(*int64)(v.ptr) }
+
+func (v value) float64() float64 { return *(*float64)(v.ptr) }
+
+func (v value) string() string { return *(*string)(v.ptr) }
+
+func (v value) bytes() []byte { return *(*[]byte)(v.ptr) }
+
+func (v value) iface(t reflect.Type) interface{} {
+	return *(*interface{})(unsafe.Pointer(&iface{
+		typ: ((*iface)(unsafe.Pointer(&t))).ptr,
+		ptr: v.ptr,
+	}))
+}
+
+func (v value) array(t reflect.Type) array {
+	return array{
+		size: uintptr(t.Size()),
+		elem: ((*slice)(v.ptr)).ptr,
+		len:  ((*slice)(v.ptr)).len,
+	}
+}
+
+func (v value) setBool(b bool) { *(*bool)(v.ptr) = b }
+
+func (v value) setInt8(i int8) { *(*int8)(v.ptr) = i }
+
+func (v value) setInt16(i int16) { *(*int16)(v.ptr) = i }
+
+func (v value) setInt32(i int32) { *(*int32)(v.ptr) = i }
+
+func (v value) setInt64(i int64) { *(*int64)(v.ptr) = i }
+
+func (v value) setFloat64(f float64) { *(*float64)(v.ptr) = f }
+
+func (v value) setString(s string) { *(*string)(v.ptr) = s }
+
+func (v value) setBytes(b []byte) { *(*[]byte)(v.ptr) = b }
+
+func (v value) setArray(a array) { *(*slice)(v.ptr) = slice{ptr: a.elem, len: a.len, cap: a.len} }
+
+func (v value) fieldByIndex(i index) value {
+	return value{ptr: unsafe.Pointer(uintptr(v.ptr) + uintptr(i))}
+}
+
+type array struct {
+	elem unsafe.Pointer
+	size uintptr
+	len  int
+}
+
+var (
+	emptyArray struct{}
+)
+
+func makeArray(t reflect.Type, n int) array {
+	var elem unsafe.Pointer
+	var size = uintptr(t.Size())
+	if n == 0 {
+		elem = unsafe.Pointer(&emptyArray)
+	} else {
+		elem = unsafe_NewArray(((*iface)(unsafe.Pointer(&t))).ptr, n)
+	}
+	return array{elem: elem, size: size, len: n}
+}
+
+func (a array) index(i int) value {
+	return value{ptr: unsafe.Pointer(uintptr(a.elem) + (uintptr(i) * a.size))}
+}
+
+func (a array) length() int { return a.len }
+
+func (a array) isNil() bool { return a.elem == nil }
+
+func indexOf(s reflect.StructField) index { return index(s.Offset) }
+
+func bytesToString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) }
+
+//go:linkname unsafe_NewArray reflect.unsafe_NewArray
+func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
diff -pruN 0.2.1-1.1/protocol/request.go 0.4.49+ds1-1/protocol/request.go
--- 0.2.1-1.1/protocol/request.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/request.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,134 @@
+package protocol
+
+import (
+	"fmt"
+	"io"
+)
+
+func ReadRequest(r io.Reader) (apiVersion int16, correlationID int32, clientID string, msg Message, err error) {
+	d := &decoder{reader: r, remain: 4}
+	size := d.readInt32()
+
+	if err = d.err; err != nil {
+		err = dontExpectEOF(err)
+		return
+	}
+
+	d.remain = int(size)
+	apiKey := ApiKey(d.readInt16())
+	apiVersion = d.readInt16()
+	correlationID = d.readInt32()
+	clientID = d.readString()
+
+	if i := int(apiKey); i < 0 || i >= len(apiTypes) {
+		err = fmt.Errorf("unsupported api key: %d", i)
+		return
+	}
+
+	if err = d.err; err != nil {
+		err = dontExpectEOF(err)
+		return
+	}
+
+	t := &apiTypes[apiKey]
+	if t == nil {
+		err = fmt.Errorf("unsupported api: %s", apiNames[apiKey])
+		return
+	}
+
+	minVersion := t.minVersion()
+	maxVersion := t.maxVersion()
+
+	if apiVersion < minVersion || apiVersion > maxVersion {
+		err = fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion)
+		return
+	}
+
+	req := &t.requests[apiVersion-minVersion]
+
+	if req.flexible {
+		// In the flexible case, there's a tag buffer at the end of the request header
+		taggedCount := int(d.readUnsignedVarInt())
+		for i := 0; i < taggedCount; i++ {
+			d.readUnsignedVarInt() // tagID
+			size := d.readUnsignedVarInt()
+
+			// Just throw away the values for now
+			d.read(int(size))
+		}
+	}
+
+	msg = req.new()
+	req.decode(d, valueOf(msg))
+	d.discardAll()
+
+	if err = d.err; err != nil {
+		err = dontExpectEOF(err)
+	}
+
+	return
+}
+
+func WriteRequest(w io.Writer, apiVersion int16, correlationID int32, clientID string, msg Message) error {
+	apiKey := msg.ApiKey()
+
+	if i := int(apiKey); i < 0 || i >= len(apiTypes) {
+		return fmt.Errorf("unsupported api key: %d", i)
+	}
+
+	t := &apiTypes[apiKey]
+	if t == nil {
+		return fmt.Errorf("unsupported api: %s", apiNames[apiKey])
+	}
+
+	if typedMessage, ok := msg.(OverrideTypeMessage); ok {
+		typeKey := typedMessage.TypeKey()
+		overrideType := overrideApiTypes[apiKey][typeKey]
+		t = &overrideType
+	}
+
+	minVersion := t.minVersion()
+	maxVersion := t.maxVersion()
+
+	if apiVersion < minVersion || apiVersion > maxVersion {
+		return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion)
+	}
+
+	r := &t.requests[apiVersion-minVersion]
+	v := valueOf(msg)
+	b := newPageBuffer()
+	defer b.unref()
+
+	e := &encoder{writer: b}
+	e.writeInt32(0) // placeholder for the request size
+	e.writeInt16(int16(apiKey))
+	e.writeInt16(apiVersion)
+	e.writeInt32(correlationID)
+
+	if r.flexible {
+		// Flexible messages use a nullable string for the client ID, then extra space for a
+		// tag buffer, which begins with a size value. Since we're not writing any fields into the
+		// latter, we can just write zero for now.
+		//
+		// See
+		// https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
+		// for details.
+		e.writeNullString(clientID)
+		e.writeUnsignedVarInt(0)
+	} else {
+		// Technically, recent versions of kafka interpret this field as a nullable
+		// string, however kafka 0.10 expected a non-nullable string and fails with
+		// a NullPointerException when it receives a null client id.
+		e.writeString(clientID)
+	}
+	r.encode(e, v)
+	err := e.err
+
+	if err == nil {
+		size := packUint32(uint32(b.Size()) - 4)
+		b.WriteAt(size[:], 0)
+		_, err = b.WriteTo(w)
+	}
+
+	return err
+}
diff -pruN 0.2.1-1.1/protocol/response.go 0.4.49+ds1-1/protocol/response.go
--- 0.2.1-1.1/protocol/response.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/response.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,157 @@
+package protocol
+
+import (
+	"crypto/tls"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+)
+
+func ReadResponse(r io.Reader, apiKey ApiKey, apiVersion int16) (correlationID int32, msg Message, err error) {
+	if i := int(apiKey); i < 0 || i >= len(apiTypes) {
+		err = fmt.Errorf("unsupported api key: %d", i)
+		return
+	}
+
+	t := &apiTypes[apiKey]
+	if t == nil {
+		err = fmt.Errorf("unsupported api: %s", apiNames[apiKey])
+		return
+	}
+
+	minVersion := t.minVersion()
+	maxVersion := t.maxVersion()
+
+	if apiVersion < minVersion || apiVersion > maxVersion {
+		err = fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion)
+		return
+	}
+
+	d := &decoder{reader: r, remain: 4}
+	size := d.readInt32()
+
+	if err = d.err; err != nil {
+		err = dontExpectEOF(err)
+		return
+	}
+
+	d.remain = int(size)
+	correlationID = d.readInt32()
+	if err = d.err; err != nil {
+		if errors.Is(err, io.ErrUnexpectedEOF) {
+			// If a Writer/Reader is configured without TLS and connects
+			// to a broker expecting TLS the only message we return to the
+			// caller is io.ErrUnexpetedEOF which is opaque. This section
+			// tries to determine if that's what has happened.
+			// We first deconstruct the initial 4 bytes of the message
+			// from the size which was read earlier.
+			// Next, we examine those bytes to see if they looks like a TLS
+			// error message. If they do we wrap the io.ErrUnexpectedEOF
+			// with some context.
+			if looksLikeUnexpectedTLS(size) {
+				err = fmt.Errorf("%w: broker appears to be expecting TLS", io.ErrUnexpectedEOF)
+			}
+			return
+		}
+		err = dontExpectEOF(err)
+		return
+	}
+
+	res := &t.responses[apiVersion-minVersion]
+
+	if res.flexible {
+		// In the flexible case, there's a tag buffer at the end of the response header
+		taggedCount := int(d.readUnsignedVarInt())
+		for i := 0; i < taggedCount; i++ {
+			d.readUnsignedVarInt() // tagID
+			size := d.readUnsignedVarInt()
+
+			// Just throw away the values for now
+			d.read(int(size))
+		}
+	}
+
+	msg = res.new()
+	res.decode(d, valueOf(msg))
+	d.discardAll()
+
+	if err = d.err; err != nil {
+		err = dontExpectEOF(err)
+	}
+
+	return
+}
+
+func WriteResponse(w io.Writer, apiVersion int16, correlationID int32, msg Message) error {
+	apiKey := msg.ApiKey()
+
+	if i := int(apiKey); i < 0 || i >= len(apiTypes) {
+		return fmt.Errorf("unsupported api key: %d", i)
+	}
+
+	t := &apiTypes[apiKey]
+	if t == nil {
+		return fmt.Errorf("unsupported api: %s", apiNames[apiKey])
+	}
+
+	if typedMessage, ok := msg.(OverrideTypeMessage); ok {
+		typeKey := typedMessage.TypeKey()
+		overrideType := overrideApiTypes[apiKey][typeKey]
+		t = &overrideType
+	}
+
+	minVersion := t.minVersion()
+	maxVersion := t.maxVersion()
+
+	if apiVersion < minVersion || apiVersion > maxVersion {
+		return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion)
+	}
+
+	r := &t.responses[apiVersion-minVersion]
+	v := valueOf(msg)
+	b := newPageBuffer()
+	defer b.unref()
+
+	e := &encoder{writer: b}
+	e.writeInt32(0) // placeholder for the response size
+	e.writeInt32(correlationID)
+	if r.flexible {
+		// Flexible messages use extra space for a tag buffer,
+		// which begins with a size value. Since we're not writing any fields into the
+		// latter, we can just write zero for now.
+		//
+		// See
+		// https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
+		// for details.
+		e.writeUnsignedVarInt(0)
+	}
+	r.encode(e, v)
+	err := e.err
+
+	if err == nil {
+		size := packUint32(uint32(b.Size()) - 4)
+		b.WriteAt(size[:], 0)
+		_, err = b.WriteTo(w)
+	}
+
+	return err
+}
+
+const (
+	tlsAlertByte byte = 0x15
+)
+
+// looksLikeUnexpectedTLS returns true if the size passed in resemble
+// the TLS alert message that is returned to a client which sends
+// an invalid ClientHello message.
+func looksLikeUnexpectedTLS(size int32) bool {
+	var sizeBytes [4]byte
+	binary.BigEndian.PutUint32(sizeBytes[:], uint32(size))
+
+	if sizeBytes[0] != tlsAlertByte {
+		return false
+	}
+	version := int(sizeBytes[1])<<8 | int(sizeBytes[2])
+	return version <= tls.VersionTLS13 && version >= tls.VersionTLS10
+}
diff -pruN 0.2.1-1.1/protocol/response_test.go 0.4.49+ds1-1/protocol/response_test.go
--- 0.2.1-1.1/protocol/response_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/response_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,28 @@
+package protocol
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"strings"
+	"testing"
+)
+
+func TestReadResponseUnexpectedTLSDetection(t *testing.T) {
+	var buf bytes.Buffer
+
+	buf.Write([]byte{tlsAlertByte, 0x03, 0x03, 10, 0, 0, 0})
+
+	correlationID, _, err := ReadResponse(&buf, ApiVersions, 0)
+	if !errors.Is(err, io.ErrUnexpectedEOF) {
+		t.Fatalf("expected an io.ErrUnexpectedEOF from ReadResponse got %v", err)
+	}
+
+	if !strings.Contains(err.Error(), "broker appears to be expecting TLS") {
+		t.Fatalf("expected error messae to contain %s got %s", "broker appears to be expecting TLS", err.Error())
+	}
+
+	if correlationID != 0 {
+		t.Fatalf("expected correlationID of 0 got %d", correlationID)
+	}
+}
diff -pruN 0.2.1-1.1/protocol/roundtrip.go 0.4.49+ds1-1/protocol/roundtrip.go
--- 0.2.1-1.1/protocol/roundtrip.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/roundtrip.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,28 @@
+package protocol
+
+import (
+	"io"
+)
+
+// RoundTrip sends a request to a kafka broker and returns the response.
+func RoundTrip(rw io.ReadWriter, apiVersion int16, correlationID int32, clientID string, req Message) (Message, error) {
+	if err := WriteRequest(rw, apiVersion, correlationID, clientID, req); err != nil {
+		return nil, err
+	}
+	if !hasResponse(req) {
+		return nil, nil
+	}
+	id, res, err := ReadResponse(rw, req.ApiKey(), apiVersion)
+	if err != nil {
+		return nil, err
+	}
+	if id != correlationID {
+		return nil, Errorf("correlation id mismatch (expected=%d, found=%d)", correlationID, id)
+	}
+	return res, nil
+}
+
+func hasResponse(msg Message) bool {
+	x, _ := msg.(interface{ HasResponse() bool })
+	return x == nil || x.HasResponse()
+}
diff -pruN 0.2.1-1.1/protocol/saslauthenticate/saslauthenticate.go 0.4.49+ds1-1/protocol/saslauthenticate/saslauthenticate.go
--- 0.2.1-1.1/protocol/saslauthenticate/saslauthenticate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/saslauthenticate/saslauthenticate.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,66 @@
+package saslauthenticate
+
+import (
+	"encoding/binary"
+	"io"
+
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	AuthBytes []byte `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) RawExchange(rw io.ReadWriter) (protocol.Message, error) {
+	if err := r.writeTo(rw); err != nil {
+		return nil, err
+	}
+	return r.readResp(rw)
+}
+
+func (*Request) Required(versions map[protocol.ApiKey]int16) bool {
+	const v0 = 0
+	return versions[protocol.SaslHandshake] == v0
+}
+
+func (r *Request) writeTo(w io.Writer) error {
+	size := len(r.AuthBytes) + 4
+	buf := make([]byte, size)
+	binary.BigEndian.PutUint32(buf[:4], uint32(len(r.AuthBytes)))
+	copy(buf[4:], r.AuthBytes)
+	_, err := w.Write(buf)
+	return err
+}
+
+func (r *Request) readResp(read io.Reader) (protocol.Message, error) {
+	var lenBuf [4]byte
+	if _, err := io.ReadFull(read, lenBuf[:]); err != nil {
+		return nil, err
+	}
+	respLen := int32(binary.BigEndian.Uint32(lenBuf[:]))
+	data := make([]byte, respLen)
+
+	if _, err := io.ReadFull(read, data[:]); err != nil {
+		return nil, err
+	}
+	return &Response{
+		AuthBytes: data,
+	}, nil
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.SaslAuthenticate }
+
+type Response struct {
+	ErrorCode         int16  `kafka:"min=v0,max=v1"`
+	ErrorMessage      string `kafka:"min=v0,max=v1,nullable"`
+	AuthBytes         []byte `kafka:"min=v0,max=v1"`
+	SessionLifetimeMs int64  `kafka:"min=v1,max=v1"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.SaslAuthenticate }
+
+var _ protocol.RawExchanger = (*Request)(nil)
diff -pruN 0.2.1-1.1/protocol/saslhandshake/saslhandshake.go 0.4.49+ds1-1/protocol/saslhandshake/saslhandshake.go
--- 0.2.1-1.1/protocol/saslhandshake/saslhandshake.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/saslhandshake/saslhandshake.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,20 @@
+package saslhandshake
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	Mechanism string `kafka:"min=v0,max=v1"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.SaslHandshake }
+
+type Response struct {
+	ErrorCode  int16    `kafka:"min=v0,max=v1"`
+	Mechanisms []string `kafka:"min=v0,max=v1"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.SaslHandshake }
diff -pruN 0.2.1-1.1/protocol/size.go 0.4.49+ds1-1/protocol/size.go
--- 0.2.1-1.1/protocol/size.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/size.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,33 @@
+package protocol
+
+import (
+	"math/bits"
+)
+
+func sizeOfVarString(s string) int {
+	return sizeOfVarInt(int64(len(s))) + len(s)
+}
+
+func sizeOfVarNullBytes(b []byte) int {
+	if b == nil {
+		return sizeOfVarInt(-1)
+	}
+	n := len(b)
+	return sizeOfVarInt(int64(n)) + n
+}
+
+func sizeOfVarNullBytesIface(b Bytes) int {
+	if b == nil {
+		return sizeOfVarInt(-1)
+	}
+	n := b.Len()
+	return sizeOfVarInt(int64(n)) + n
+}
+
+func sizeOfVarInt(i int64) int {
+	return sizeOfUnsignedVarInt(uint64((i << 1) ^ (i >> 63))) // zig-zag encoding
+}
+
+func sizeOfUnsignedVarInt(i uint64) int {
+	return (bits.Len64(i|1) + 6) / 7
+}
diff -pruN 0.2.1-1.1/protocol/syncgroup/syncgroup.go 0.4.49+ds1-1/protocol/syncgroup/syncgroup.go
--- 0.2.1-1.1/protocol/syncgroup/syncgroup.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/syncgroup/syncgroup.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,50 @@
+package syncgroup
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v5,tag"`
+
+	GroupID         string              `kafka:"min=v0,max=v3|min=v4,max=v5,compact"`
+	GenerationID    int32               `kafka:"min=v0,max=v5|min=v4,max=v5,compact"`
+	MemberID        string              `kafka:"min=v0,max=v3|min=v4,max=v5,compact"`
+	GroupInstanceID string              `kafka:"min=v3,max=v3,nullable|min=v4,max=v5,nullable,compact"`
+	ProtocolType    string              `kafka:"min=v5,max=v5"`
+	ProtocolName    string              `kafka:"min=v5,max=v5"`
+	Assignments     []RequestAssignment `kafka:"min=v0,max=v5"`
+}
+
+type RequestAssignment struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v5,tag"`
+
+	MemberID   string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"`
+	Assignment []byte `kafka:"min=v0,max=v3|min=v4,max=v5,compact"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.SyncGroup }
+
+func (r *Request) Group() string { return r.GroupID }
+
+var _ protocol.GroupMessage = (*Request)(nil)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v4,max=v5,tag"`
+
+	ThrottleTimeMS int32  `kafka:"min=v1,max=v5"`
+	ErrorCode      int16  `kafka:"min=v0,max=v5"`
+	ProtocolType   string `kafka:"min=v5,max=v5"`
+	ProtocolName   string `kafka:"min=v5,max=v5"`
+	Assignments    []byte `kafka:"min=v0,max=v3|min=v4,max=v5,compact"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.SyncGroup }
diff -pruN 0.2.1-1.1/protocol/syncgroup/syncgroup_test.go 0.4.49+ds1-1/protocol/syncgroup/syncgroup_test.go
--- 0.2.1-1.1/protocol/syncgroup/syncgroup_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/syncgroup/syncgroup_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,93 @@
+package syncgroup_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/prototest"
+	"github.com/segmentio/kafka-go/protocol/syncgroup"
+)
+
+func TestSyncGroupReq(t *testing.T) {
+	for _, version := range []int16{0, 1, 2} {
+		prototest.TestRequest(t, version, &syncgroup.Request{
+			GroupID:      "group-id-1",
+			GenerationID: 10,
+			MemberID:     "member-id-1",
+			Assignments: []syncgroup.RequestAssignment{
+				{
+					MemberID:   "member-id-2",
+					Assignment: []byte{0, 1, 2, 3, 4},
+				},
+			},
+		})
+	}
+
+	// Version 3 added:
+	// GroupInstanceID
+	for _, version := range []int16{3, 4} {
+		prototest.TestRequest(t, version, &syncgroup.Request{
+			GroupID:         "group-id-1",
+			GenerationID:    10,
+			MemberID:        "member-id-1",
+			GroupInstanceID: "group-instance-id",
+			Assignments: []syncgroup.RequestAssignment{
+				{
+					MemberID:   "member-id-2",
+					Assignment: []byte{0, 1, 2, 3, 4},
+				},
+			},
+		})
+	}
+
+	// Version 5 added
+	// ProtocolType
+	// ProtocolName
+	for _, version := range []int16{5} {
+		prototest.TestRequest(t, version, &syncgroup.Request{
+			GroupID:         "group-id-1",
+			GenerationID:    10,
+			MemberID:        "member-id-1",
+			GroupInstanceID: "group-instance-id",
+			ProtocolType:    "protocol-type",
+			ProtocolName:    "protocol-name",
+			Assignments: []syncgroup.RequestAssignment{
+				{
+					MemberID:   "member-id-2",
+					Assignment: []byte{0, 1, 2, 3, 4},
+				},
+			},
+		})
+	}
+}
+
+func TestSyncGroupResp(t *testing.T) {
+	for _, version := range []int16{0} {
+		prototest.TestResponse(t, version, &syncgroup.Response{
+			ErrorCode:   10,
+			Assignments: []byte{0, 1, 2, 3, 4},
+		})
+	}
+
+	// Version 1 added
+	// ThrottleTimeMS
+	for _, version := range []int16{1, 2, 3, 4} {
+		prototest.TestResponse(t, version, &syncgroup.Response{
+			ErrorCode:      10,
+			ThrottleTimeMS: 1,
+			Assignments:    []byte{0, 1, 2, 3, 4},
+		})
+	}
+
+	// Version 5 added
+	// ProtocolType
+	// ProtocolName
+	for _, version := range []int16{5} {
+		prototest.TestResponse(t, version, &syncgroup.Response{
+			ErrorCode:      10,
+			ThrottleTimeMS: 1,
+			ProtocolType:   "protocol-type",
+			ProtocolName:   "protocol-name",
+			Assignments:    []byte{0, 1, 2, 3, 4},
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol/txnoffsetcommit/txnoffsetcommit.go 0.4.49+ds1-1/protocol/txnoffsetcommit/txnoffsetcommit.go
--- 0.2.1-1.1/protocol/txnoffsetcommit/txnoffsetcommit.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/txnoffsetcommit/txnoffsetcommit.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,77 @@
+package txnoffsetcommit
+
+import "github.com/segmentio/kafka-go/protocol"
+
+func init() {
+	protocol.Register(&Request{}, &Response{})
+}
+
+type Request struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	TransactionalID string         `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	GroupID         string         `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	ProducerID      int64          `kafka:"min=v0,max=v3"`
+	ProducerEpoch   int16          `kafka:"min=v0,max=v3"`
+	GenerationID    int32          `kafka:"min=v3,max=v3"`
+	MemberID        string         `kafka:"min=v3,max=v3,compact"`
+	GroupInstanceID string         `kafka:"min=v3,max=v3,compact,nullable"`
+	Topics          []RequestTopic `kafka:"min=v0,max=v3"`
+}
+
+type RequestTopic struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	Name       string             `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	Partitions []RequestPartition `kafka:"min=v0,max=v3"`
+}
+
+type RequestPartition struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	Partition            int32  `kafka:"min=v0,max=v3"`
+	CommittedOffset      int64  `kafka:"min=v0,max=v3"`
+	CommittedLeaderEpoch int32  `kafka:"min=v2,max=v3"`
+	CommittedMetadata    string `kafka:"min=v0,max=v2|min=v3,max=v3,nullable,compact"`
+}
+
+func (r *Request) ApiKey() protocol.ApiKey { return protocol.TxnOffsetCommit }
+
+func (r *Request) Group() string { return r.GroupID }
+
+var _ protocol.GroupMessage = (*Request)(nil)
+
+type Response struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	ThrottleTimeMs int32           `kafka:"min=v0,max=v3"`
+	Topics         []ResponseTopic `kafka:"min=v0,max=v3"`
+}
+
+type ResponseTopic struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	Name       string              `kafka:"min=v0,max=v2|min=v3,max=v3,compact"`
+	Partitions []ResponsePartition `kafka:"min=v0,max=v3"`
+}
+
+type ResponsePartition struct {
+	// We need at least one tagged field to indicate that this is a "flexible" message
+	// type.
+	_ struct{} `kafka:"min=v3,max=v3,tag"`
+
+	Partition int32 `kafka:"min=v0,max=v3"`
+	ErrorCode int16 `kafka:"min=v0,max=v3"`
+}
+
+func (r *Response) ApiKey() protocol.ApiKey { return protocol.TxnOffsetCommit }
diff -pruN 0.2.1-1.1/protocol/txnoffsetcommit/txnoffsetcommit_test.go 0.4.49+ds1-1/protocol/txnoffsetcommit/txnoffsetcommit_test.go
--- 0.2.1-1.1/protocol/txnoffsetcommit/txnoffsetcommit_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/protocol/txnoffsetcommit/txnoffsetcommit_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,186 @@
+package txnoffsetcommit_test
+
+import (
+	"testing"
+
+	"github.com/segmentio/kafka-go/protocol/prototest"
+	"github.com/segmentio/kafka-go/protocol/txnoffsetcommit"
+)
+
+func TestTxnOffsetCommitRequest(t *testing.T) {
+	for _, version := range []int16{0, 1} {
+		prototest.TestRequest(t, version, &txnoffsetcommit.Request{
+			TransactionalID: "transactional-id-0",
+			GroupID:         "group-0",
+			ProducerID:      10,
+			ProducerEpoch:   100,
+			Topics: []txnoffsetcommit.RequestTopic{
+				{
+					Name: "topic-0",
+					Partitions: []txnoffsetcommit.RequestPartition{
+						{
+							Partition:         0,
+							CommittedOffset:   10,
+							CommittedMetadata: "meta-0-0",
+						},
+						{
+							Partition:         1,
+							CommittedOffset:   10,
+							CommittedMetadata: "meta-0-1",
+						},
+					},
+				},
+				{
+					Name: "topic-1",
+					Partitions: []txnoffsetcommit.RequestPartition{
+						{
+							Partition:         0,
+							CommittedOffset:   10,
+							CommittedMetadata: "meta-1-0",
+						},
+						{
+							Partition:         1,
+							CommittedOffset:   10,
+							CommittedMetadata: "meta-1-1",
+						},
+					},
+				},
+			},
+		})
+	}
+
+	// Version 2 added:
+	// Topics.RequestTopic.Partitions.CommittedLeaderEpoch
+	for _, version := range []int16{2} {
+		prototest.TestRequest(t, version, &txnoffsetcommit.Request{
+			TransactionalID: "transactional-id-0",
+			GroupID:         "group-0",
+			ProducerID:      10,
+			ProducerEpoch:   100,
+			Topics: []txnoffsetcommit.RequestTopic{
+				{
+					Name: "topic-0",
+					Partitions: []txnoffsetcommit.RequestPartition{
+						{
+							Partition:            0,
+							CommittedOffset:      10,
+							CommittedLeaderEpoch: 100,
+							CommittedMetadata:    "meta-0-0",
+						},
+						{
+							Partition:            1,
+							CommittedOffset:      10,
+							CommittedLeaderEpoch: 100,
+							CommittedMetadata:    "meta-0-1",
+						},
+					},
+				},
+				{
+					Name: "topic-1",
+					Partitions: []txnoffsetcommit.RequestPartition{
+						{
+							Partition:            0,
+							CommittedOffset:      10,
+							CommittedLeaderEpoch: 100,
+							CommittedMetadata:    "meta-1-0",
+						},
+						{
+							Partition:            1,
+							CommittedOffset:      10,
+							CommittedLeaderEpoch: 100,
+							CommittedMetadata:    "meta-1-1",
+						},
+					},
+				},
+			},
+		})
+	}
+
+	// Version 3 added:
+	// GenerationID
+	// MemberID
+	// GroupInstanceID
+	for _, version := range []int16{3} {
+		prototest.TestRequest(t, version, &txnoffsetcommit.Request{
+			TransactionalID: "transactional-id-0",
+			GroupID:         "group-0",
+			ProducerID:      10,
+			ProducerEpoch:   100,
+			GenerationID:    2,
+			MemberID:        "member-0",
+			GroupInstanceID: "group-instance-id-0",
+			Topics: []txnoffsetcommit.RequestTopic{
+				{
+					Name: "topic-0",
+					Partitions: []txnoffsetcommit.RequestPartition{
+						{
+							Partition:            0,
+							CommittedOffset:      10,
+							CommittedLeaderEpoch: 100,
+							CommittedMetadata:    "meta-0-0",
+						},
+						{
+							Partition:            1,
+							CommittedOffset:      10,
+							CommittedLeaderEpoch: 100,
+							CommittedMetadata:    "meta-0-1",
+						},
+					},
+				},
+				{
+					Name: "topic-1",
+					Partitions: []txnoffsetcommit.RequestPartition{
+						{
+							Partition:            0,
+							CommittedOffset:      10,
+							CommittedLeaderEpoch: 100,
+							CommittedMetadata:    "meta-1-0",
+						},
+						{
+							Partition:            1,
+							CommittedOffset:      10,
+							CommittedLeaderEpoch: 100,
+							CommittedMetadata:    "meta-1-1",
+						},
+					},
+				},
+			},
+		})
+	}
+}
+
+func TestTxnOffsetCommitResponse(t *testing.T) {
+	for _, version := range []int16{0, 1, 2, 3} {
+		prototest.TestResponse(t, version, &txnoffsetcommit.Response{
+			ThrottleTimeMs: 10,
+			Topics: []txnoffsetcommit.ResponseTopic{
+				{
+					Name: "topic-0",
+					Partitions: []txnoffsetcommit.ResponsePartition{
+						{
+							Partition: 0,
+							ErrorCode: 0,
+						},
+						{
+							Partition: 1,
+							ErrorCode: 10,
+						},
+					},
+				},
+				{
+					Name: "topic-1",
+					Partitions: []txnoffsetcommit.ResponsePartition{
+						{
+							Partition: 0,
+							ErrorCode: 0,
+						},
+						{
+							Partition: 1,
+							ErrorCode: 10,
+						},
+					},
+				},
+			},
+		})
+	}
+}
diff -pruN 0.2.1-1.1/protocol.go 0.4.49+ds1-1/protocol.go
--- 0.2.1-1.1/protocol.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/protocol.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,40 +1,170 @@
 package kafka
 
 import (
-	"bufio"
 	"encoding/binary"
 	"fmt"
+	"strconv"
 )
 
+type ApiVersion struct {
+	ApiKey     int16
+	MinVersion int16
+	MaxVersion int16
+}
+
+func (v ApiVersion) Format(w fmt.State, r rune) {
+	switch r {
+	case 's':
+		fmt.Fprint(w, apiKey(v.ApiKey))
+	case 'd':
+		switch {
+		case w.Flag('-'):
+			fmt.Fprint(w, v.MinVersion)
+		case w.Flag('+'):
+			fmt.Fprint(w, v.MaxVersion)
+		default:
+			fmt.Fprint(w, v.ApiKey)
+		}
+	case 'v':
+		switch {
+		case w.Flag('-'):
+			fmt.Fprintf(w, "v%d", v.MinVersion)
+		case w.Flag('+'):
+			fmt.Fprintf(w, "v%d", v.MaxVersion)
+		case w.Flag('#'):
+			fmt.Fprintf(w, "kafka.ApiVersion{ApiKey:%d MinVersion:%d MaxVersion:%d}", v.ApiKey, v.MinVersion, v.MaxVersion)
+		default:
+			fmt.Fprintf(w, "%s[v%d:v%d]", apiKey(v.ApiKey), v.MinVersion, v.MaxVersion)
+		}
+	}
+}
+
 type apiKey int16
 
 const (
-	produceRequest          apiKey = 0
-	fetchRequest            apiKey = 1
-	listOffsetRequest       apiKey = 2
-	metadataRequest         apiKey = 3
-	offsetCommitRequest     apiKey = 8
-	offsetFetchRequest      apiKey = 9
-	groupCoordinatorRequest apiKey = 10
-	joinGroupRequest        apiKey = 11
-	heartbeatRequest        apiKey = 12
-	leaveGroupRequest       apiKey = 13
-	syncGroupRequest        apiKey = 14
-	describeGroupsRequest   apiKey = 15
-	listGroupsRequest       apiKey = 16
-	createTopicsRequest     apiKey = 19
-	deleteTopicsRequest     apiKey = 20
+	produce                     apiKey = 0
+	fetch                       apiKey = 1
+	listOffsets                 apiKey = 2
+	metadata                    apiKey = 3
+	leaderAndIsr                apiKey = 4
+	stopReplica                 apiKey = 5
+	updateMetadata              apiKey = 6
+	controlledShutdown          apiKey = 7
+	offsetCommit                apiKey = 8
+	offsetFetch                 apiKey = 9
+	findCoordinator             apiKey = 10
+	joinGroup                   apiKey = 11
+	heartbeat                   apiKey = 12
+	leaveGroup                  apiKey = 13
+	syncGroup                   apiKey = 14
+	describeGroups              apiKey = 15
+	listGroups                  apiKey = 16
+	saslHandshake               apiKey = 17
+	apiVersions                 apiKey = 18
+	createTopics                apiKey = 19
+	deleteTopics                apiKey = 20
+	deleteRecords               apiKey = 21
+	initProducerId              apiKey = 22
+	offsetForLeaderEpoch        apiKey = 23
+	addPartitionsToTxn          apiKey = 24
+	addOffsetsToTxn             apiKey = 25
+	endTxn                      apiKey = 26
+	writeTxnMarkers             apiKey = 27
+	txnOffsetCommit             apiKey = 28
+	describeAcls                apiKey = 29
+	createAcls                  apiKey = 30
+	deleteAcls                  apiKey = 31
+	describeConfigs             apiKey = 32
+	alterConfigs                apiKey = 33
+	alterReplicaLogDirs         apiKey = 34
+	describeLogDirs             apiKey = 35
+	saslAuthenticate            apiKey = 36
+	createPartitions            apiKey = 37
+	createDelegationToken       apiKey = 38
+	renewDelegationToken        apiKey = 39
+	expireDelegationToken       apiKey = 40
+	describeDelegationToken     apiKey = 41
+	deleteGroups                apiKey = 42
+	electLeaders                apiKey = 43
+	incrementalAlterConfigs     apiKey = 44
+	alterPartitionReassignments apiKey = 45
+	listPartitionReassignments  apiKey = 46
+	offsetDelete                apiKey = 47
 )
 
+func (k apiKey) String() string {
+	if i := int(k); i >= 0 && i < len(apiKeyStrings) {
+		return apiKeyStrings[i]
+	}
+	return strconv.Itoa(int(k))
+}
+
 type apiVersion int16
 
 const (
-	v0 apiVersion = 0
-	v1 apiVersion = 1
-	v2 apiVersion = 2
-	v3 apiVersion = 3
+	v0  = 0
+	v1  = 1
+	v2  = 2
+	v3  = 3
+	v5  = 5
+	v6  = 6
+	v7  = 7
+	v10 = 10
+
+	// Unused protocol versions: v4, v8, v9.
 )
 
+var apiKeyStrings = [...]string{
+	produce:                     "Produce",
+	fetch:                       "Fetch",
+	listOffsets:                 "ListOffsets",
+	metadata:                    "Metadata",
+	leaderAndIsr:                "LeaderAndIsr",
+	stopReplica:                 "StopReplica",
+	updateMetadata:              "UpdateMetadata",
+	controlledShutdown:          "ControlledShutdown",
+	offsetCommit:                "OffsetCommit",
+	offsetFetch:                 "OffsetFetch",
+	findCoordinator:             "FindCoordinator",
+	joinGroup:                   "JoinGroup",
+	heartbeat:                   "Heartbeat",
+	leaveGroup:                  "LeaveGroup",
+	syncGroup:                   "SyncGroup",
+	describeGroups:              "DescribeGroups",
+	listGroups:                  "ListGroups",
+	saslHandshake:               "SaslHandshake",
+	apiVersions:                 "ApiVersions",
+	createTopics:                "CreateTopics",
+	deleteTopics:                "DeleteTopics",
+	deleteRecords:               "DeleteRecords",
+	initProducerId:              "InitProducerId",
+	offsetForLeaderEpoch:        "OffsetForLeaderEpoch",
+	addPartitionsToTxn:          "AddPartitionsToTxn",
+	addOffsetsToTxn:             "AddOffsetsToTxn",
+	endTxn:                      "EndTxn",
+	writeTxnMarkers:             "WriteTxnMarkers",
+	txnOffsetCommit:             "TxnOffsetCommit",
+	describeAcls:                "DescribeAcls",
+	createAcls:                  "CreateAcls",
+	deleteAcls:                  "DeleteAcls",
+	describeConfigs:             "DescribeConfigs",
+	alterConfigs:                "AlterConfigs",
+	alterReplicaLogDirs:         "AlterReplicaLogDirs",
+	describeLogDirs:             "DescribeLogDirs",
+	saslAuthenticate:            "SaslAuthenticate",
+	createPartitions:            "CreatePartitions",
+	createDelegationToken:       "CreateDelegationToken",
+	renewDelegationToken:        "RenewDelegationToken",
+	expireDelegationToken:       "ExpireDelegationToken",
+	describeDelegationToken:     "DescribeDelegationToken",
+	deleteGroups:                "DeleteGroups",
+	electLeaders:                "ElectLeaders",
+	incrementalAlterConfigs:     "IncrementalAlfterConfigs",
+	alterPartitionReassignments: "AlterPartitionReassignments",
+	listPartitionReassignments:  "ListPartitionReassignments",
+	offsetDelete:                "OffsetDelete",
+}
+
 type requestHeader struct {
 	Size          int32
 	ApiKey        int16
@@ -47,17 +177,17 @@ func (h requestHeader) size() int32 {
 	return 4 + 2 + 2 + 4 + sizeofString(h.ClientID)
 }
 
-func (h requestHeader) writeTo(w *bufio.Writer) {
-	writeInt32(w, h.Size)
-	writeInt16(w, h.ApiKey)
-	writeInt16(w, h.ApiVersion)
-	writeInt32(w, h.CorrelationID)
-	writeString(w, h.ClientID)
+func (h requestHeader) writeTo(wb *writeBuffer) {
+	wb.writeInt32(h.Size)
+	wb.writeInt16(h.ApiKey)
+	wb.writeInt16(h.ApiVersion)
+	wb.writeInt32(h.CorrelationID)
+	wb.writeString(h.ClientID)
 }
 
 type request interface {
 	size() int32
-	writeTo(*bufio.Writer)
+	writable
 }
 
 func makeInt8(b []byte) int8 {
diff -pruN 0.2.1-1.1/protocol_test.go 0.4.49+ds1-1/protocol_test.go
--- 0.2.1-1.1/protocol_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/protocol_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -8,9 +8,30 @@ import (
 	"testing"
 )
 
-func TestProtocol(t *testing.T) {
-	t.Parallel()
+func TestApiVersionsFormat(t *testing.T) {
+	for _, test := range []struct {
+		version ApiVersion
+		format  string
+		output  string
+	}{
+		{version: ApiVersion{1, 2, 5}, format: "%s", output: "Fetch"},
+		{version: ApiVersion{1, 2, 5}, format: "%d", output: "1"},
+		{version: ApiVersion{1, 2, 5}, format: "%-d", output: "2"},
+		{version: ApiVersion{1, 2, 5}, format: "%+d", output: "5"},
+		{version: ApiVersion{1, 2, 5}, format: "%v", output: "Fetch[v2:v5]"},
+		{version: ApiVersion{1, 2, 5}, format: "%-v", output: "v2"},
+		{version: ApiVersion{1, 2, 5}, format: "%+v", output: "v5"},
+		{version: ApiVersion{1, 2, 5}, format: "%#v", output: "kafka.ApiVersion{ApiKey:1 MinVersion:2 MaxVersion:5}"},
+	} {
+		t.Run(test.output, func(t *testing.T) {
+			if s := fmt.Sprintf(test.format, test.version); s != test.output {
+				t.Error("output mismatch:", s, "!=", test.output)
+			}
+		})
+	}
+}
 
+func TestProtocol(t *testing.T) {
 	tests := []interface{}{
 		int8(42),
 		int16(42),
@@ -23,7 +44,7 @@ func TestProtocol(t *testing.T) {
 
 		requestHeader{
 			Size:          26,
-			ApiKey:        int16(offsetCommitRequest),
+			ApiKey:        int16(offsetCommit),
 			ApiVersion:    int16(v2),
 			CorrelationID: 42,
 			ClientID:      "Hello World!",
@@ -41,7 +62,7 @@ func TestProtocol(t *testing.T) {
 		metadataResponseV1{
 			Brokers: []brokerMetadataV1{
 				{NodeID: 1, Host: "localhost", Port: 9001},
-				{NodeID: 2, Host: "localhost", Port: 9002, Rack:"rack2"},
+				{NodeID: 2, Host: "localhost", Port: 9002, Rack: "rack2"},
 			},
 			ControllerID: 2,
 			Topics: []topicMetadataV1{
@@ -55,6 +76,30 @@ func TestProtocol(t *testing.T) {
 			},
 		},
 
+		topicMetadataRequestV6{
+			Topics:                 []string{"A", "B", "C"},
+			AllowAutoTopicCreation: true,
+		},
+
+		metadataResponseV6{
+			Brokers: []brokerMetadataV1{
+				{NodeID: 1, Host: "localhost", Port: 9001},
+				{NodeID: 2, Host: "localhost", Port: 9002, Rack: "rack2"},
+			},
+			ClusterId:    "cluster",
+			ControllerID: 2,
+			Topics: []topicMetadataV6{
+				{TopicErrorCode: 0, Internal: true, Partitions: []partitionMetadataV6{{
+					PartitionErrorCode: 0,
+					PartitionID:        1,
+					Leader:             2,
+					Replicas:           []int32{1},
+					Isr:                []int32{1},
+					OfflineReplicas:    []int32{1},
+				}}},
+			},
+		},
+
 		listOffsetRequestV1{
 			ReplicaID: 1,
 			Topics: []listOffsetRequestTopicV1{
@@ -87,13 +132,8 @@ func TestProtocol(t *testing.T) {
 		t.Run(fmt.Sprintf("%T", test), func(t *testing.T) {
 			b := &bytes.Buffer{}
 			r := bufio.NewReader(b)
-			w := bufio.NewWriter(b)
-
-			write(w, test)
-
-			if err := w.Flush(); err != nil {
-				t.Fatal(err)
-			}
+			w := &writeBuffer{w: b}
+			w.write(test)
 
 			if size := int(sizeof(test)); size != b.Len() {
 				t.Error("invalid size:", size, "!=", b.Len())
diff -pruN 0.2.1-1.1/rawproduce.go 0.4.49+ds1-1/rawproduce.go
--- 0.2.1-1.1/rawproduce.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/rawproduce.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,103 @@
+package kafka
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/segmentio/kafka-go/protocol"
+	produceAPI "github.com/segmentio/kafka-go/protocol/produce"
+	"github.com/segmentio/kafka-go/protocol/rawproduce"
+)
+
+// RawProduceRequest represents a request sent to a kafka broker to produce records
+// to a topic partition. The request contains a pre-encoded/raw record set.
+type RawProduceRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The topic to produce the records to.
+	Topic string
+
+	// The partition to produce the records to.
+	Partition int
+
+	// The level of required acknowledgements to ask the kafka broker for.
+	RequiredAcks RequiredAcks
+
+	// The message format version used when encoding the records.
+	//
+	// By default, the client automatically determine which version should be
+	// used based on the version of the Produce API supported by the server.
+	MessageVersion int
+
+	// An optional transaction id when producing to the kafka broker is part of
+	// a transaction.
+	TransactionalID string
+
+	// The sequence of records to produce to the topic partition.
+	RawRecords protocol.RawRecordSet
+}
+
+// RawProduce sends a raw produce request to a kafka broker and returns the response.
+//
+// If the request contained no records, an error wrapping protocol.ErrNoRecord
+// is returned.
+//
+// When the request is configured with RequiredAcks=none, both the response and
+// the error will be nil on success.
+func (c *Client) RawProduce(ctx context.Context, req *RawProduceRequest) (*ProduceResponse, error) {
+	m, err := c.roundTrip(ctx, req.Addr, &rawproduce.Request{
+		TransactionalID: req.TransactionalID,
+		Acks:            int16(req.RequiredAcks),
+		Timeout:         c.timeoutMs(ctx, defaultProduceTimeout),
+		Topics: []rawproduce.RequestTopic{{
+			Topic: req.Topic,
+			Partitions: []rawproduce.RequestPartition{{
+				Partition: int32(req.Partition),
+				RecordSet: req.RawRecords,
+			}},
+		}},
+	})
+
+	switch {
+	case err == nil:
+	case errors.Is(err, protocol.ErrNoRecord):
+		return new(ProduceResponse), nil
+	default:
+		return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", err)
+	}
+
+	if req.RequiredAcks == RequireNone {
+		return nil, nil
+	}
+
+	res := m.(*produceAPI.Response)
+	if len(res.Topics) == 0 {
+		return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", protocol.ErrNoTopic)
+	}
+	topic := &res.Topics[0]
+	if len(topic.Partitions) == 0 {
+		return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", protocol.ErrNoPartition)
+	}
+	partition := &topic.Partitions[0]
+
+	ret := &ProduceResponse{
+		Throttle:       makeDuration(res.ThrottleTimeMs),
+		Error:          makeError(partition.ErrorCode, partition.ErrorMessage),
+		BaseOffset:     partition.BaseOffset,
+		LogAppendTime:  makeTime(partition.LogAppendTime),
+		LogStartOffset: partition.LogStartOffset,
+	}
+
+	if len(partition.RecordErrors) != 0 {
+		ret.RecordErrors = make(map[int]error, len(partition.RecordErrors))
+
+		for _, recErr := range partition.RecordErrors {
+			ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage)
+		}
+	}
+
+	return ret, nil
+}
diff -pruN 0.2.1-1.1/rawproduce_test.go 0.4.49+ds1-1/rawproduce_test.go
--- 0.2.1-1.1/rawproduce_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/rawproduce_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,123 @@
+package kafka
+
+import (
+	"bytes"
+	"context"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientRawProduce(t *testing.T) {
+	// The RawProduce request records are encoded in the format introduced in Kafka 0.11.0.
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		t.Skip("Skipping because the RawProduce request is not supported by Kafka versions below 0.11.0")
+	}
+
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	now := time.Now()
+
+	res, err := client.RawProduce(context.Background(), &RawProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		RawRecords: NewRawRecordSet(NewRecordReader(
+			Record{Time: now, Value: NewBytes([]byte(`hello-1`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-2`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-3`))},
+		), 0),
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res.Error != nil {
+		t.Error(res.Error)
+	}
+
+	for index, err := range res.RecordErrors {
+		t.Errorf("record at index %d produced an error: %v", index, err)
+	}
+}
+
+func TestClientRawProduceCompressed(t *testing.T) {
+	// The RawProduce request records are encoded in the format introduced in Kafka 0.11.0.
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		t.Skip("Skipping because the RawProduce request is not supported by Kafka versions below 0.11.0")
+	}
+
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	now := time.Now()
+
+	res, err := client.RawProduce(context.Background(), &RawProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		RawRecords: NewRawRecordSet(NewRecordReader(
+			Record{Time: now, Value: NewBytes([]byte(`hello-1`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-2`))},
+			Record{Time: now, Value: NewBytes([]byte(`hello-3`))},
+		), protocol.Gzip),
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res.Error != nil {
+		t.Error(res.Error)
+	}
+
+	for index, err := range res.RecordErrors {
+		t.Errorf("record at index %d produced an error: %v", index, err)
+	}
+}
+
+func TestClientRawProduceNilRecords(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	_, err := client.RawProduce(context.Background(), &RawProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		RawRecords:   protocol.RawRecordSet{Reader: nil},
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestClientRawProduceEmptyRecords(t *testing.T) {
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	_, err := client.Produce(context.Background(), &ProduceRequest{
+		Topic:        topic,
+		Partition:    0,
+		RequiredAcks: -1,
+		Records:      NewRecordReader(),
+	})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func NewRawRecordSet(reader protocol.RecordReader, attr protocol.Attributes) protocol.RawRecordSet {
+	rs := protocol.RecordSet{Version: 2, Attributes: attr, Records: reader}
+	buf := &bytes.Buffer{}
+	rs.WriteTo(buf)
+
+	return protocol.RawRecordSet{
+		Reader: buf,
+	}
+}
diff -pruN 0.2.1-1.1/read.go 0.4.49+ds1-1/read.go
--- 0.2.1-1.1/read.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/read.go	2025-08-21 19:15:53.000000000 +0000
@@ -8,10 +8,6 @@ import (
 	"reflect"
 )
 
-type readable interface {
-	readFrom(*bufio.Reader, int) (int, error)
-}
-
 var errShortRead = errors.New("not enough bytes available to load the response")
 
 func peekRead(r *bufio.Reader, sz int, n int, f func([]byte)) (int, error) {
@@ -42,6 +38,55 @@ func readInt64(r *bufio.Reader, sz int,
 	return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) })
 }
 
+func readVarInt(r *bufio.Reader, sz int, v *int64) (remain int, err error) {
+	// Optimistically assume that most of the time, there will be data buffered
+	// in the reader. If this is not the case, the buffer will be refilled after
+	// consuming zero bytes from the input.
+	input, _ := r.Peek(r.Buffered())
+	x := uint64(0)
+	s := uint(0)
+
+	for {
+		if len(input) > sz {
+			input = input[:sz]
+		}
+
+		for i, b := range input {
+			if b < 0x80 {
+				x |= uint64(b) << s
+				*v = int64(x>>1) ^ -(int64(x) & 1)
+				n, err := r.Discard(i + 1)
+				return sz - n, err
+			}
+
+			x |= uint64(b&0x7f) << s
+			s += 7
+		}
+
+		// Make room in the input buffer to load more data from the underlying
+		// stream. The x and s variables are left untouched, ensuring that the
+		// varint decoding can continue on the next loop iteration.
+		n, _ := r.Discard(len(input))
+		sz -= n
+		if sz == 0 {
+			return 0, errShortRead
+		}
+
+		// Fill the buffer: ask for one more byte, but in practice the reader
+		// will load way more from the underlying stream.
+		if _, err := r.Peek(1); err != nil {
+			if errors.Is(err, io.EOF) {
+				err = errShortRead
+			}
+			return sz, err
+		}
+
+		// Grab as many bytes as possible from the buffer, then go on to the
+		// next loop iteration which is going to consume it.
+		input, _ = r.Peek(r.Buffered())
+	}
+}
+
 func readBool(r *bufio.Reader, sz int, v *bool) (int, error) {
 	return peekRead(r, sz, 1, func(b []byte) { *v = b[0] != 0 })
 }
@@ -83,13 +128,12 @@ func readBytes(r *bufio.Reader, sz int,
 
 func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) {
 	var err error
-	var len int32
+	var n int
 
-	if sz, err = readInt32(r, sz, &len); err != nil {
+	if sz, err = readArrayLen(r, sz, &n); err != nil {
 		return sz, err
 	}
 
-	n := int(len)
 	if n > sz {
 		return sz, errShortRead
 	}
@@ -100,17 +144,37 @@ func readBytesWith(r *bufio.Reader, sz i
 func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) {
 	var err error
 	var b []byte
+	var shortRead bool
 
 	if n > 0 {
+		if sz < n {
+			n = sz
+			shortRead = true
+		}
+
 		b = make([]byte, n)
 		n, err = io.ReadFull(r, b)
 		b = b[:n]
 		sz -= n
+
+		if err == nil && shortRead {
+			err = errShortRead
+		}
 	}
 
 	return b, sz, err
 }
 
+func readArrayLen(r *bufio.Reader, sz int, n *int) (int, error) {
+	var err error
+	var len int32
+	if sz, err = readInt32(r, sz, &len); err != nil {
+		return sz, err
+	}
+	*n = int(len)
+	return sz, nil
+}
+
 func readArrayWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int) (int, error)) (int, error) {
 	var err error
 	var len int32
@@ -207,39 +271,6 @@ func read(r *bufio.Reader, sz int, a int
 	}
 }
 
-func readAll(r *bufio.Reader, sz int, ptrs ...interface{}) (int, error) {
-	var err error
-
-	for _, ptr := range ptrs {
-		if sz, err = readPtr(r, sz, ptr); err != nil {
-			break
-		}
-	}
-
-	return sz, err
-}
-
-func readPtr(r *bufio.Reader, sz int, ptr interface{}) (int, error) {
-	switch v := ptr.(type) {
-	case *int8:
-		return readInt8(r, sz, v)
-	case *int16:
-		return readInt16(r, sz, v)
-	case *int32:
-		return readInt32(r, sz, v)
-	case *int64:
-		return readInt64(r, sz, v)
-	case *string:
-		return readString(r, sz, v)
-	case *[]byte:
-		return readBytes(r, sz, v)
-	case readable:
-		return v.readFrom(r, sz)
-	default:
-		panic(fmt.Sprintf("unsupported type: %T", v))
-	}
-}
-
 func readStruct(r *bufio.Reader, sz int, v reflect.Value) (int, error) {
 	var err error
 	for i, n := 0, v.NumField(); i != n; i++ {
@@ -273,7 +304,7 @@ func readSlice(r *bufio.Reader, sz int,
 	return sz, nil
 }
 
-func readFetchResponseHeader(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) {
+func readFetchResponseHeaderV2(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) {
 	var n int32
 	var p struct {
 		Partition           int32
@@ -335,42 +366,197 @@ func readFetchResponseHeader(r *bufio.Re
 	return
 }
 
-func readMessageHeader(r *bufio.Reader, sz int) (offset int64, attributes int8, timestamp int64, remain int, err error) {
-	var version int8
+func readFetchResponseHeaderV5(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) {
+	var n int32
+	type AbortedTransaction struct {
+		ProducerId  int64
+		FirstOffset int64
+	}
+	var p struct {
+		Partition           int32
+		ErrorCode           int16
+		HighwaterMarkOffset int64
+		LastStableOffset    int64
+		LogStartOffset      int64
+	}
+	var messageSetSize int32
+	var abortedTransactions []AbortedTransaction
 
-	if remain, err = readInt64(r, sz, &offset); err != nil {
+	if remain, err = readInt32(r, size, &throttle); err != nil {
 		return
 	}
 
-	// On discarding the message size and CRC:
-	// ---------------------------------------
-	//
-	// - Not sure why kafka gives the message size here, we already have the
-	// number of remaining bytes in the response and kafka should only truncate
-	// the trailing message.
-	//
-	// - TCP is already taking care of ensuring data integrity, no need to
-	// waste resources doing it a second time so we just skip the message CRC.
-	//
-	if remain, err = discardN(r, remain, 8); err != nil {
+	if remain, err = readInt32(r, remain, &n); err != nil {
 		return
 	}
 
-	if remain, err = readInt8(r, remain, &version); err != nil {
+	// This error should never trigger, unless there's a bug in the kafka client
+	// or server.
+	if n != 1 {
+		err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n)
 		return
 	}
 
-	if remain, err = readInt8(r, remain, &attributes); err != nil {
+	// We ignore the topic name because we've requests messages for a single
+	// topic, unless there's a bug in the kafka server we will have received
+	// the name of the topic that we requested.
+	if remain, err = discardString(r, remain); err != nil {
 		return
 	}
 
-	switch version {
-	case 0:
-	case 1:
-		remain, err = readInt64(r, remain, &timestamp)
-	default:
-		err = fmt.Errorf("unsupported message version %d found in fetch response", version)
+	if remain, err = readInt32(r, remain, &n); err != nil {
+		return
+	}
+
+	// This error should never trigger, unless there's a bug in the kafka client
+	// or server.
+	if n != 1 {
+		err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n)
+		return
+	}
+
+	if remain, err = read(r, remain, &p); err != nil {
+		return
+	}
+
+	var abortedTransactionLen int
+	if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil {
+		return
+	}
+
+	if abortedTransactionLen == -1 {
+		abortedTransactions = nil
+	} else {
+		abortedTransactions = make([]AbortedTransaction, abortedTransactionLen)
+		for i := 0; i < abortedTransactionLen; i++ {
+			if remain, err = read(r, remain, &abortedTransactions[i]); err != nil {
+				return
+			}
+		}
 	}
 
+	if p.ErrorCode != 0 {
+		err = Error(p.ErrorCode)
+		return
+	}
+
+	remain, err = readInt32(r, remain, &messageSetSize)
+	if err != nil {
+		return
+	}
+
+	// This error should never trigger, unless there's a bug in the kafka client
+	// or server.
+	if remain != int(messageSetSize) {
+		err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain)
+		return
+	}
+
+	watermark = p.HighwaterMarkOffset
 	return
+
+}
+
+func readFetchResponseHeaderV10(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) {
+	var n int32
+	var errorCode int16
+	type AbortedTransaction struct {
+		ProducerId  int64
+		FirstOffset int64
+	}
+	var p struct {
+		Partition           int32
+		ErrorCode           int16
+		HighwaterMarkOffset int64
+		LastStableOffset    int64
+		LogStartOffset      int64
+	}
+	var messageSetSize int32
+	var abortedTransactions []AbortedTransaction
+
+	if remain, err = readInt32(r, size, &throttle); err != nil {
+		return
+	}
+
+	if remain, err = readInt16(r, remain, &errorCode); err != nil {
+		return
+	}
+	if errorCode != 0 {
+		err = Error(errorCode)
+		return
+	}
+
+	if remain, err = discardInt32(r, remain); err != nil {
+		return
+	}
+
+	if remain, err = readInt32(r, remain, &n); err != nil {
+		return
+	}
+
+	// This error should never trigger, unless there's a bug in the kafka client
+	// or server.
+	if n != 1 {
+		err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n)
+		return
+	}
+
+	// We ignore the topic name because we've requests messages for a single
+	// topic, unless there's a bug in the kafka server we will have received
+	// the name of the topic that we requested.
+	if remain, err = discardString(r, remain); err != nil {
+		return
+	}
+
+	if remain, err = readInt32(r, remain, &n); err != nil {
+		return
+	}
+
+	// This error should never trigger, unless there's a bug in the kafka client
+	// or server.
+	if n != 1 {
+		err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n)
+		return
+	}
+
+	if remain, err = read(r, remain, &p); err != nil {
+		return
+	}
+
+	var abortedTransactionLen int
+	if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil {
+		return
+	}
+
+	if abortedTransactionLen == -1 {
+		abortedTransactions = nil
+	} else {
+		abortedTransactions = make([]AbortedTransaction, abortedTransactionLen)
+		for i := 0; i < abortedTransactionLen; i++ {
+			if remain, err = read(r, remain, &abortedTransactions[i]); err != nil {
+				return
+			}
+		}
+	}
+
+	if p.ErrorCode != 0 {
+		err = Error(p.ErrorCode)
+		return
+	}
+
+	remain, err = readInt32(r, remain, &messageSetSize)
+	if err != nil {
+		return
+	}
+
+	// This error should never trigger, unless there's a bug in the kafka client
+	// or server.
+	if remain != int(messageSetSize) {
+		err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain)
+		return
+	}
+
+	watermark = p.HighwaterMarkOffset
+	return
+
 }
diff -pruN 0.2.1-1.1/read_test.go 0.4.49+ds1-1/read_test.go
--- 0.2.1-1.1/read_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/read_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,10 +3,58 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"errors"
+	"io/ioutil"
+	"math"
 	"reflect"
 	"testing"
 )
 
+type VarIntTestCase struct {
+	v  int64
+	r  int
+	tc []byte
+}
+
+func TestReadVarInt(t *testing.T) {
+	testCases := []*VarIntTestCase{
+		{v: 0, r: 3, tc: []byte{0, 1, 10, 0}},
+		{v: -1, r: 3, tc: []byte{1, 1, 10, 0}},
+		{v: 1, r: 3, tc: []byte{2, 1, 10, 0}},
+		{v: -2, r: 3, tc: []byte{3, 1, 10, 0}},
+		{v: 2, r: 3, tc: []byte{4, 1, 10, 0}},
+		{v: 64, r: 2, tc: []byte{128, 1, 10, 0}},
+		{v: -64, r: 3, tc: []byte{127, 1, 10, 0}},
+		{v: -196, r: 2, tc: []byte{135, 3, 10, 0}},
+		{v: -24772, r: 1, tc: []byte{135, 131, 3, 0}},
+	}
+
+	for _, tc := range testCases {
+		var v int64
+		rd := bufio.NewReader(bytes.NewReader(tc.tc))
+		remain, err := readVarInt(rd, len(tc.tc), &v)
+		if err != nil {
+			t.Errorf("Failure during reading: %v", err)
+		}
+		if v != tc.v {
+			t.Errorf("Expected %v; got %v", tc.v, v)
+		}
+		if remain != tc.r {
+			t.Errorf("Expected remain %v; got %v", tc.r, remain)
+		}
+	}
+}
+
+func TestReadVarIntFailing(t *testing.T) {
+	var v int64
+	testCase := []byte{135, 135}
+	rd := bufio.NewReader(bytes.NewReader(testCase))
+	_, err := readVarInt(rd, len(testCase), &v)
+	if !errors.Is(err, errShortRead) {
+		t.Errorf("Expected error while parsing var int: %v", err)
+	}
+}
+
 func TestReadStringArray(t *testing.T) {
 	testCases := map[string]struct {
 		Value []string
@@ -21,14 +69,12 @@ func TestReadStringArray(t *testing.T) {
 
 	for label, test := range testCases {
 		t.Run(label, func(t *testing.T) {
-			buf := bytes.NewBuffer(nil)
-
-			w := bufio.NewWriter(buf)
-			writeStringArray(w, test.Value)
-			w.Flush()
+			b := bytes.NewBuffer(nil)
+			w := &writeBuffer{w: b}
+			w.writeStringArray(test.Value)
 
 			var actual []string
-			readStringArray(bufio.NewReader(buf), buf.Len(), &actual)
+			readStringArray(bufio.NewReader(b), b.Len(), &actual)
 			if !reflect.DeepEqual(test.Value, actual) {
 				t.Errorf("expected %v; got %v", test.Value, actual)
 			}
@@ -52,21 +98,118 @@ func TestReadMapStringInt32(t *testing.T
 
 	for label, test := range testCases {
 		t.Run(label, func(t *testing.T) {
-			buf := bytes.NewBuffer(nil)
+			b := bytes.NewBuffer(nil)
+			w := &writeBuffer{w: b}
+			w.writeInt32(int32(len(test.Data)))
 
-			w := bufio.NewWriter(buf)
-			writeInt32(w, int32(len(test.Data)))
 			for key, values := range test.Data {
-				writeString(w, key)
-				writeInt32Array(w, values)
+				w.writeString(key)
+				w.writeInt32Array(values)
 			}
-			w.Flush()
 
 			var actual map[string][]int32
-			readMapStringInt32(bufio.NewReader(buf), buf.Len(), &actual)
+			readMapStringInt32(bufio.NewReader(b), b.Len(), &actual)
 			if !reflect.DeepEqual(test.Data, actual) {
 				t.Errorf("expected %#v; got %#v", test.Data, actual)
 			}
 		})
 	}
 }
+
+func TestReadNewBytes(t *testing.T) {
+
+	t.Run("reads new bytes", func(t *testing.T) {
+		r := bufio.NewReader(bytes.NewReader([]byte("foobar")))
+
+		b, remain, err := readNewBytes(r, 6, 3)
+		if string(b) != "foo" {
+			t.Error("should have returned 3 bytes")
+		}
+		if remain != 3 {
+			t.Error("should have calculated remaining correctly")
+		}
+		if err != nil {
+			t.Error("should not have errored")
+		}
+
+		b, remain, err = readNewBytes(r, remain, 3)
+		if string(b) != "bar" {
+			t.Error("should have returned 3 bytes")
+		}
+		if remain != 0 {
+			t.Error("should have calculated remaining correctly")
+		}
+		if err != nil {
+			t.Error("should not have errored")
+		}
+
+		b, err = r.Peek(0)
+		if len(b) > 0 {
+			t.Error("not all bytes were consumed")
+		}
+		if err != nil {
+			t.Error("should not have errored during peek")
+		}
+	})
+
+	t.Run("discards bytes when insufficient", func(t *testing.T) {
+		r := bufio.NewReader(bytes.NewReader([]byte("foo")))
+		b, remain, err := readNewBytes(bufio.NewReader(r), 3, 4)
+		if string(b) != "foo" {
+			t.Error("should have returned available bytes")
+		}
+		if remain != 0 {
+			t.Error("all bytes should have been consumed")
+		}
+		if !errors.Is(err, errShortRead) {
+			t.Error("should have returned errShortRead")
+		}
+		b, err = r.Peek(0)
+		if len(b) > 0 {
+			t.Error("not all bytes were consumed")
+		}
+		if err != nil {
+			t.Error("should not have errored during peek")
+		}
+	})
+}
+
+func BenchmarkWriteVarInt(b *testing.B) {
+	wb := &writeBuffer{w: ioutil.Discard}
+
+	for i := 0; i < b.N; i++ {
+		wb.writeVarInt(math.MaxInt64)
+	}
+}
+
+func BenchmarkReadVarInt(b *testing.B) {
+	b1 := new(bytes.Buffer)
+	wb := &writeBuffer{w: b1}
+
+	const N = math.MaxInt64
+	wb.writeVarInt(N)
+
+	b2 := bytes.NewReader(b1.Bytes())
+	rb := bufio.NewReader(b2)
+	n := b1.Len()
+
+	for i := 0; i < b.N; i++ {
+		v := int64(0)
+		r, err := readVarInt(rb, n, &v)
+
+		if err != nil {
+			b.Fatalf("unexpected error reading a varint from the input: %v", err)
+		}
+
+		if r != 0 {
+			b.Fatalf("unexpected bytes remaining to be read in the input (%d B)", r)
+		}
+
+		if v != N {
+			b.Fatalf("value mismatch, expected %d but found %d", N, v)
+		}
+
+		b2.Reset(b1.Bytes())
+		rb.Reset(b2)
+	}
+}
diff -pruN 0.2.1-1.1/reader.go 0.4.49+ds1-1/reader.go
--- 0.2.1-1.1/reader.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/reader.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,13 +1,10 @@
 package kafka
 
 import (
-	"bufio"
-	"bytes"
 	"context"
 	"errors"
 	"fmt"
 	"io"
-	"log"
 	"math"
 	"sort"
 	"strconv"
@@ -18,52 +15,47 @@ import (
 
 const (
 	LastOffset  int64 = -1 // The most recent offset available for a partition.
-	FirstOffset       = -2 // The least recent offset available for a partition.
+	FirstOffset int64 = -2 // The least recent offset available for a partition.
 )
 
 const (
-	// defaultCommitRetries holds the number commit attempts to make
-	// before giving up
+	// defaultCommitRetries holds the number of commit attempts to make
+	// before giving up.
 	defaultCommitRetries = 3
 )
 
+const (
+	// defaultFetchMinBytes of 1 byte means that fetch requests are answered as
+	// soon as a single byte of data is available or the fetch request times out
+	// waiting for data to arrive.
+	defaultFetchMinBytes = 1
+)
+
 var (
 	errOnlyAvailableWithGroup = errors.New("unavailable when GroupID is not set")
 	errNotAvailableWithGroup  = errors.New("unavailable when GroupID is set")
 )
 
 const (
-	// defaultProtocolType holds the default protocol type documented in the
-	// kafka protocol
-	//
-	// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI
-	defaultProtocolType = "consumer"
-
-	// defaultHeartbeatInterval contains the default time between heartbeats.  If
-	// the coordinator does not receive a heartbeat within the session timeout interval,
-	// the consumer will be considered dead and the coordinator will rebalance the
-	// group.
-	//
-	// As a rule, the heartbeat interval should be no greater than 1/3 the session timeout
-	defaultHeartbeatInterval = 3 * time.Second
-
-	// defaultSessionTimeout contains the default interval the coordinator will wait
-	// for a heartbeat before marking a consumer as dead
-	defaultSessionTimeout = 30 * time.Second
-
-	// defaultRebalanceTimeout contains the amount of time the coordinator will wait
-	// for consumers to issue a join group once a rebalance has been requested
-	defaultRebalanceTimeout = 30 * time.Second
-
-	// defaultRetentionTime holds the length of time a the consumer group will be
-	// saved by kafka
-	defaultRetentionTime = time.Hour * 24
+	// defaultReadBackoffMax/Min sets the boundaries for how long the reader wait before
+	// polling for new messages.
+	defaultReadBackoffMin = 100 * time.Millisecond
+	defaultReadBackoffMax = 1 * time.Second
 )
 
 // Reader provides a high-level API for consuming messages from kafka.
 //
 // A Reader automatically manages reconnections to a kafka server, and
 // blocking methods have context support for asynchronous cancellations.
+//
+// Note that it is important to call `Close()` on a `Reader` when a process exits.
+// The kafka server needs a graceful disconnect to stop it from continuing to
+// attempt to send messages to the connected clients. The given example will not
+// call `Close()` if the process is terminated with SIGINT (ctrl-c at the shell) or
+// SIGTERM (as docker stop or a kubernetes restart does). This can result in a
+// delay when a new reader on the same topic connects (e.g. new process started
+// or new container running). Use a `signal.Notify` handler to close the reader on
+// process shutdown.
 type Reader struct {
 	// immutable fields of the reader
 	config ReaderConfig
@@ -72,23 +64,26 @@ type Reader struct {
 	msgs chan readerMessage
 
 	// mutable fields of the reader (synchronized on the mutex)
-	mutex        sync.Mutex
-	join         sync.WaitGroup
-	cancel       context.CancelFunc
-	stop         context.CancelFunc
-	done         chan struct{}
-	commits      chan commitRequest
-	version      int64 // version holds the generation of the spawned readers
-	offset       int64
-	lag          int64
-	closed       bool
-	address      string // address of group coordinator
-	generationID int32  // generationID of group
-	memberID     string // memberID of group
-
-	// offsetStash should only be managed by the commitLoopInterval.  We store
-	// it here so that it survives rebalances
-	offsetStash offsetStash
+	mutex   sync.Mutex
+	join    sync.WaitGroup
+	cancel  context.CancelFunc
+	stop    context.CancelFunc
+	done    chan struct{}
+	commits chan commitRequest
+	version int64 // version holds the generation of the spawned readers
+	offset  int64
+	lag     int64
+	closed  bool
+
+	// Without a group subscription (when Reader.config.GroupID == ""),
+	// when errors occur, the Reader gets a synthetic readerMessage with
+	// a non-nil err set. With group subscriptions however, when an error
+	// occurs in Reader.run, there's no reader running (sic, cf. reader vs.
+	// Reader) and there's no way to let the high-level methods like
+	// FetchMessage know that an error indeed occurred. If an error in run
+	// occurs, it will be non-block-sent to this unbuffered channel, where
+	// the high-level methods can select{} on it and notify the caller.
+	runError chan error
 
 	// reader stats are all made of atomic values, no need for synchronization.
 	once  uint32
@@ -101,579 +96,55 @@ type Reader struct {
 // useConsumerGroup indicates whether the Reader is part of a consumer group.
 func (r *Reader) useConsumerGroup() bool { return r.config.GroupID != "" }
 
-// useSyncCommits indicates whether the Reader is configured to perform sync or
-// async commits.
-func (r *Reader) useSyncCommits() bool { return r.config.CommitInterval == 0 }
-
-// membership returns the group generationID and memberID of the reader.
-//
-// Only used when config.GroupID != ""
-func (r *Reader) membership() (generationID int32, memberID string) {
-	r.mutex.Lock()
-	generationID = r.generationID
-	memberID = r.memberID
-	r.mutex.Unlock()
-	return
-}
-
-// lookupCoordinator scans the brokers and looks up the address of the
-// coordinator for the group.
-//
-// Only used when config.GroupID != ""
-func (r *Reader) lookupCoordinator() (string, error) {
-	conn, err := r.connect()
-	if err != nil {
-		return "", fmt.Errorf("unable to coordinator to any connect for group, %v: %v\n", r.config.GroupID, err)
-	}
-	defer conn.Close()
-
-	out, err := conn.findCoordinator(findCoordinatorRequestV0{
-		CoordinatorKey: r.config.GroupID,
-	})
-	if err != nil {
-		return "", fmt.Errorf("unable to find coordinator for group, %v: %v", r.config.GroupID, err)
-	}
-
-	address := fmt.Sprintf("%v:%v", out.Coordinator.Host, out.Coordinator.Port)
-	return address, nil
-}
-
-// refreshCoordinator updates the value of r.address
-func (r *Reader) refreshCoordinator() (err error) {
-	const (
-		backoffDelayMin = 100 * time.Millisecond
-		backoffDelayMax = 1 * time.Second
-	)
-
-	for attempt := 0; true; attempt++ {
-		if attempt != 0 {
-			if !sleep(r.stctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) {
-				return r.stctx.Err()
-			}
-		}
-
-		address, err := r.lookupCoordinator()
-		if err != nil {
-			continue
-		}
-
-		r.mutex.Lock()
-		oldAddress := r.address
-		r.address = address
-		r.mutex.Unlock()
-
-		if address != oldAddress {
-			r.withLogger(func(l *log.Logger) {
-				l.Printf("coordinator for group, %v, set to %v\n", r.config.GroupID, address)
-			})
-		}
-
-		break
-	}
-
-	return nil
-}
-
-// makejoinGroupRequestV1 handles the logic of constructing a joinGroup
-// request
-func (r *Reader) makejoinGroupRequestV1() (joinGroupRequestV1, error) {
-	_, memberID := r.membership()
-
-	request := joinGroupRequestV1{
-		GroupID:          r.config.GroupID,
-		MemberID:         memberID,
-		SessionTimeout:   int32(r.config.SessionTimeout / time.Millisecond),
-		RebalanceTimeout: int32(r.config.RebalanceTimeout / time.Millisecond),
-		ProtocolType:     defaultProtocolType,
-	}
-
-	for _, balancer := range r.config.GroupBalancers {
-		userData, err := balancer.UserData()
-		if err != nil {
-			return joinGroupRequestV1{}, fmt.Errorf("unable to construct protocol metadata for member, %v: %v\n", balancer.ProtocolName(), err)
-		}
-		request.GroupProtocols = append(request.GroupProtocols, joinGroupRequestGroupProtocolV1{
-			ProtocolName: balancer.ProtocolName(),
-			ProtocolMetadata: groupMetadata{
-				Version:  1,
-				Topics:   []string{r.config.Topic},
-				UserData: userData,
-			}.bytes(),
-		})
-	}
-
-	return request, nil
-}
-
-// makeMemberProtocolMetadata maps encoded member metadata ([]byte) into []GroupMember
-func (r *Reader) makeMemberProtocolMetadata(in []joinGroupResponseMemberV1) ([]GroupMember, error) {
-	members := make([]GroupMember, 0, len(in))
-	for _, item := range in {
-		metadata := groupMetadata{}
-		reader := bufio.NewReader(bytes.NewReader(item.MemberMetadata))
-		if remain, err := (&metadata).readFrom(reader, len(item.MemberMetadata)); err != nil || remain != 0 {
-			return nil, fmt.Errorf("unable to read metadata for member, %v: %v\n", item.MemberID, err)
-		}
-
-		members = append(members, GroupMember{
-			ID:       item.MemberID,
-			Topics:   metadata.Topics,
-			UserData: metadata.UserData,
-		})
-	}
-	return members, nil
-}
-
-// partitionReader is an internal interface used to simplify unit testing
-type partitionReader interface {
-	// ReadPartitions mirrors Conn.ReadPartitions
-	ReadPartitions(topics ...string) (partitions []Partition, err error)
-}
-
-// assignTopicPartitions uses the selected GroupBalancer to assign members to
-// their various partitions
-func (r *Reader) assignTopicPartitions(conn partitionReader, group joinGroupResponseV1) (GroupMemberAssignments, error) {
-	r.withLogger(func(l *log.Logger) {
-		l.Println("selected as leader for group,", r.config.GroupID)
-	})
-
-	balancer, ok := findGroupBalancer(group.GroupProtocol, r.config.GroupBalancers)
-	if !ok {
-		return nil, fmt.Errorf("unable to find selected balancer, %v, for group, %v", group.GroupProtocol, r.config.GroupID)
-	}
-
-	members, err := r.makeMemberProtocolMetadata(group.Members)
-	if err != nil {
-		return nil, fmt.Errorf("unable to construct MemberProtocolMetadata: %v", err)
-	}
-
-	topics := extractTopics(members)
-	partitions, err := conn.ReadPartitions(topics...)
-	if err != nil {
-		return nil, fmt.Errorf("unable to read partitions: %v", err)
-	}
-
-	r.withLogger(func(l *log.Logger) {
-		l.Printf("using '%v' balancer to assign group, %v\n", group.GroupProtocol, r.config.GroupID)
-		for _, member := range members {
-			l.Printf("found member: %v/%#v", member.ID, member.UserData)
-		}
-		for _, partition := range partitions {
-			l.Printf("found topic/partition: %v/%v", partition.Topic, partition.ID)
-		}
-	})
-
-	return balancer.AssignGroups(members, partitions), nil
-}
-
-func (r *Reader) leaveGroup(conn *Conn) error {
-	_, memberID := r.membership()
-	_, err := conn.leaveGroup(leaveGroupRequestV0{
-		GroupID:  r.config.GroupID,
-		MemberID: memberID,
-	})
-	if err != nil {
-		return fmt.Errorf("leave group failed for group, %v, and member, %v: %v", r.config.GroupID, memberID, err)
-	}
-
-	return nil
-}
-
-// joinGroup attempts to join the reader to the consumer group.
-// Returns GroupMemberAssignments is this Reader was selected as
-// the leader.  Otherwise, GroupMemberAssignments will be nil.
-//
-// Possible kafka error codes returned:
-//  * GroupLoadInProgress:
-//  * GroupCoordinatorNotAvailable:
-//  * NotCoordinatorForGroup:
-//  * InconsistentGroupProtocol:
-//  * InvalidSessionTimeout:
-//  * GroupAuthorizationFailed:
-func (r *Reader) joinGroup() (GroupMemberAssignments, error) {
-	conn, err := r.coordinator()
-	if err != nil {
-		return nil, err
-	}
-	defer conn.Close()
-
-	request, err := r.makejoinGroupRequestV1()
-	if err != nil {
-		return nil, err
-	}
-
-	response, err := conn.joinGroup(request)
-	if err != nil {
-		switch err {
-		case UnknownMemberId:
-			r.mutex.Lock()
-			r.memberID = ""
-			r.mutex.Unlock()
-			return nil, fmt.Errorf("joinGroup failed: %v", err)
-
-		default:
-			return nil, fmt.Errorf("joinGroup failed: %v", err)
-		}
-	}
-
-	// Extract our membership and generationID from the response
-	r.mutex.Lock()
-	oldGenerationID := r.generationID
-	oldMemberID := r.memberID
-	r.generationID = response.GenerationID
-	r.memberID = response.MemberID
-	r.mutex.Unlock()
-
-	if oldGenerationID != response.GenerationID || oldMemberID != response.MemberID {
-		r.withLogger(func(l *log.Logger) {
-			l.Printf("response membership changed.  generationID: %v => %v, memberID: '%v' => '%v'\n",
-				oldGenerationID,
-				response.GenerationID,
-				oldMemberID,
-				response.MemberID,
-			)
-		})
-	}
-
-	var assignments GroupMemberAssignments
-	if iAmLeader := response.MemberID == response.LeaderID; iAmLeader {
-		v, err := r.assignTopicPartitions(conn, response)
-		if err != nil {
-			_ = r.leaveGroup(conn)
-			return nil, err
-		}
-		assignments = v
-
-		r.withLogger(func(l *log.Logger) {
-			for memberID, assignment := range assignments {
-				for topic, partitions := range assignment {
-					l.Printf("assigned member/topic/partitions %v/%v/%v\n", memberID, topic, partitions)
-				}
-			}
-		})
-	}
-
-	r.withLogger(func(l *log.Logger) {
-		l.Printf("joinGroup succeeded for response, %v.  generationID=%v, memberID=%v\n", r.config.GroupID, response.GenerationID, response.MemberID)
-	})
-
-	return assignments, nil
-}
-
-func (r *Reader) makeSyncGroupRequestV0(memberAssignments GroupMemberAssignments) syncGroupRequestV0 {
-	generationID, memberID := r.membership()
-	request := syncGroupRequestV0{
-		GroupID:      r.config.GroupID,
-		GenerationID: generationID,
-		MemberID:     memberID,
-	}
-
-	if memberAssignments != nil {
-		request.GroupAssignments = make([]syncGroupRequestGroupAssignmentV0, 0, 1)
-
-		for memberID, topics := range memberAssignments {
-			topics32 := make(map[string][]int32)
-			for topic, partitions := range topics {
-				partitions32 := make([]int32, len(partitions))
-				for i := range partitions {
-					partitions32[i] = int32(partitions[i])
-				}
-				topics32[topic] = partitions32
-			}
-			request.GroupAssignments = append(request.GroupAssignments, syncGroupRequestGroupAssignmentV0{
-				MemberID: memberID,
-				MemberAssignments: groupAssignment{
-					Version: 1,
-					Topics:  topics32,
-				}.bytes(),
-			})
-		}
-
-		r.withErrorLogger(func(logger *log.Logger) {
-			logger.Printf("Syncing %d assignments for generation %d as member %s", len(request.GroupAssignments), generationID, memberID)
-		})
-	}
-
-	return request
-}
-
-// syncGroup completes the consumer group handshake by accepting the
-// memberAssignments (if this Reader is the leader) and returning this
-// Readers subscriptions topic => partitions
-//
-// Possible kafka error codes returned:
-//  * GroupCoordinatorNotAvailable:
-//  * NotCoordinatorForGroup:
-//  * IllegalGeneration:
-//  * RebalanceInProgress:
-//  * GroupAuthorizationFailed:
-func (r *Reader) syncGroup(memberAssignments GroupMemberAssignments) (map[string][]int32, error) {
-	conn, err := r.coordinator()
-	if err != nil {
-		return nil, err
-	}
-	defer conn.Close()
-
-	request := r.makeSyncGroupRequestV0(memberAssignments)
-	response, err := conn.syncGroups(request)
-	if err != nil {
-		switch err {
-		case RebalanceInProgress:
-			// don't leave the group
-			return nil, fmt.Errorf("syncGroup failed: %v", err)
-
-		case UnknownMemberId:
-			r.mutex.Lock()
-			r.memberID = ""
-			r.mutex.Unlock()
-			_ = r.leaveGroup(conn)
-			return nil, fmt.Errorf("syncGroup failed: %v", err)
-
-		default:
-			_ = r.leaveGroup(conn)
-			return nil, fmt.Errorf("syncGroup failed: %v", err)
-		}
-	}
-
-	assignments := groupAssignment{}
-	reader := bufio.NewReader(bytes.NewReader(response.MemberAssignments))
-	if _, err := (&assignments).readFrom(reader, len(response.MemberAssignments)); err != nil {
-		_ = r.leaveGroup(conn)
-		return nil, fmt.Errorf("unable to read SyncGroup response for group, %v: %v\n", r.config.GroupID, err)
-	}
-
-	if len(assignments.Topics) == 0 {
-		generation, memberID := r.membership()
-		return nil, fmt.Errorf("received empty assignments for group, %v as member %s for generation %d", r.config.GroupID, memberID, generation)
+func (r *Reader) getTopics() []string {
+	if len(r.config.GroupTopics) > 0 {
+		return r.config.GroupTopics[:]
 	}
 
-	r.withLogger(func(l *log.Logger) {
-		l.Printf("sync group finished for group, %v\n", r.config.GroupID)
-	})
-
-	return assignments.Topics, nil
+	return []string{r.config.Topic}
 }
 
-func (r *Reader) rebalance() (map[string][]int32, error) {
-	r.withLogger(func(l *log.Logger) {
-		l.Printf("rebalancing consumer group, %v", r.config.GroupID)
-	})
-
-	if err := r.refreshCoordinator(); err != nil {
-		return nil, err
-	}
-
-	members, err := r.joinGroup()
-	if err != nil {
-		return nil, err
-	}
-
-	assignments, err := r.syncGroup(members)
-	if err != nil {
-		return nil, err
-	}
-
-	return assignments, nil
-}
+// useSyncCommits indicates whether the Reader is configured to perform sync or
+// async commits.
+func (r *Reader) useSyncCommits() bool { return r.config.CommitInterval == 0 }
 
-func (r *Reader) unsubscribe() error {
+func (r *Reader) unsubscribe() {
 	r.cancel()
 	r.join.Wait()
-	return nil
-}
-
-func (r *Reader) fetchOffsets(subs map[string][]int32) (map[int]int64, error) {
-	conn, err := r.coordinator()
-	if err != nil {
-		return nil, err
-	}
-	defer conn.Close()
-
-	partitions := subs[r.config.Topic]
-	offsets, err := conn.offsetFetch(offsetFetchRequestV1{
-		GroupID: r.config.GroupID,
-		Topics: []offsetFetchRequestV1Topic{
-			{
-				Topic:      r.config.Topic,
-				Partitions: partitions,
-			},
-		},
-	})
-	if err != nil {
-		return nil, err
-	}
-
-	offsetsByPartition := map[int]int64{}
-	for _, pr := range offsets.Responses[0].PartitionResponses {
-		for _, partition := range partitions {
-			if partition == pr.Partition {
-				offset := pr.Offset
-				if offset < 0 {
-					// No offset stored
-					offset = FirstOffset
-				}
-				offsetsByPartition[int(partition)] = offset
+	// it would be interesting to drain the r.msgs channel at this point since
+	// it will contain buffered messages for partitions that may not be
+	// re-assigned to this reader in the next consumer group generation.
+	// however, draining the channel could race with the client calling
+	// ReadMessage, which could result in messages delivered and/or committed
+	// with gaps in the offset.  for now, we will err on the side of caution and
+	// potentially have those messages be reprocessed in the next generation by
+	// another consumer to avoid such a race.
+}
+
+func (r *Reader) subscribe(allAssignments map[string][]PartitionAssignment) {
+	offsets := make(map[topicPartition]int64)
+	for topic, assignments := range allAssignments {
+		for _, assignment := range assignments {
+			key := topicPartition{
+				topic:     topic,
+				partition: int32(assignment.ID),
 			}
+			offsets[key] = assignment.Offset
 		}
 	}
 
-	return offsetsByPartition, nil
-}
-
-func (r *Reader) subscribe(subs map[string][]int32) error {
-	if len(subs[r.config.Topic]) == 0 {
-		return nil
-	}
-
-	offsetsByPartition, err := r.fetchOffsets(subs)
-	if err != nil {
-		if conn, err := r.coordinator(); err == nil {
-			// make an attempt at leaving the group
-			_ = r.leaveGroup(conn)
-			conn.Close()
-		}
-
-		return err
-	}
-
-	r.mutex.Lock()
-	r.start(offsetsByPartition)
-	r.mutex.Unlock()
-
-	r.withLogger(func(l *log.Logger) {
-		l.Printf("subscribed to partitions: %+v", offsetsByPartition)
-	})
-
-	return nil
-}
-
-// connect returns a connection to ANY broker
-func (r *Reader) connect() (conn *Conn, err error) {
-	for _, broker := range r.config.Brokers {
-		if conn, err = r.config.Dialer.Dial("tcp", broker); err == nil {
-			return
-		}
-	}
-	return // err will be non-nil
-}
-
-// coordinator returns a connection to the coordinator for this group
-func (r *Reader) coordinator() (*Conn, error) {
 	r.mutex.Lock()
-	address := r.address
+	r.start(offsets)
 	r.mutex.Unlock()
 
-	conn, err := r.config.Dialer.DialContext(r.stctx, "tcp", address)
-	if err != nil {
-		return nil, fmt.Errorf("unable to connect to coordinator, %v", address)
-	}
-
-	return conn, nil
-}
-
-func (r *Reader) waitThrottleTime(throttleTimeMS int32) {
-	if throttleTimeMS == 0 {
-		return
-	}
-
-	t := time.NewTimer(time.Duration(throttleTimeMS) * time.Millisecond)
-	defer t.Stop()
-
-	select {
-	case <-r.stctx.Done():
-		return
-	case <-t.C:
-	}
-}
-
-// heartbeat sends heartbeat to coordinator at the interval defined by
-// ReaderConfig.HeartbeatInterval
-func (r *Reader) heartbeat(conn *Conn) error {
-	generationID, memberID := r.membership()
-	if generationID == 0 && memberID == "" {
-		return nil
-	}
-
-	_, err := conn.heartbeat(heartbeatRequestV0{
-		GroupID:      r.config.GroupID,
-		GenerationID: generationID,
-		MemberID:     memberID,
+	r.withLogger(func(l Logger) {
+		l.Printf("subscribed to topics and partitions: %+v", offsets)
 	})
-	if err != nil {
-		return fmt.Errorf("heartbeat failed: %v", err)
-	}
-
-	return nil
-}
-
-func (r *Reader) heartbeatLoop(conn *Conn) func(stop <-chan struct{}) {
-	return func(stop <-chan struct{}) {
-		r.withLogger(func(l *log.Logger) {
-			l.Printf("started heartbeat for group, %v [%v]", r.config.GroupID, r.config.HeartbeatInterval)
-		})
-		defer r.withLogger(func(l *log.Logger) {
-			l.Println("stopped heartbeat for group,", r.config.GroupID)
-		})
-
-		ticker := time.NewTicker(r.config.HeartbeatInterval)
-		defer ticker.Stop()
-
-		for {
-			select {
-			case <-ticker.C:
-				if err := r.heartbeat(conn); err != nil {
-					return
-				}
-
-			case <-stop:
-				return
-			}
-		}
-	}
-}
-
-type offsetCommitter interface {
-	offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error)
-}
-
-func (r *Reader) commitOffsets(conn offsetCommitter, offsetStash offsetStash) error {
-	if len(offsetStash) == 0 {
-		return nil
-	}
-
-	generationID, memberID := r.membership()
-	request := offsetCommitRequestV2{
-		GroupID:       r.config.GroupID,
-		GenerationID:  generationID,
-		MemberID:      memberID,
-		RetentionTime: int64(r.config.RetentionTime / time.Millisecond),
-	}
-
-	for topic, partitions := range offsetStash {
-		t := offsetCommitRequestV2Topic{Topic: topic}
-		for partition, offset := range partitions {
-			t.Partitions = append(t.Partitions, offsetCommitRequestV2Partition{
-				Partition: int32(partition),
-				Offset:    offset,
-			})
-		}
-		request.Topics = append(request.Topics, t)
-	}
-
-	if _, err := conn.offsetCommit(request); err != nil {
-		return fmt.Errorf("unable to commit offsets for group, %v: %v", r.config.GroupID, err)
-	}
-
-	r.withLogger(func(l *log.Logger) {
-		l.Printf("committed offsets: %v", offsetStash)
-	})
-
-	return nil
 }
 
 // commitOffsetsWithRetry attempts to commit the specified offsets and retries
-// up to the specified number of times
-func (r *Reader) commitOffsetsWithRetry(conn offsetCommitter, offsetStash offsetStash, retries int) (err error) {
+// up to the specified number of times.
+func (r *Reader) commitOffsetsWithRetry(gen *Generation, offsetStash offsetStash, retries int) (err error) {
 	const (
 		backoffDelayMin = 100 * time.Millisecond
 		backoffDelayMax = 5 * time.Second
@@ -686,7 +157,7 @@ func (r *Reader) commitOffsetsWithRetry(
 			}
 		}
 
-		if err = r.commitOffsets(conn, offsetStash); err == nil {
+		if err = gen.CommitOffsets(offsetStash); err == nil {
 			return
 		}
 	}
@@ -694,10 +165,10 @@ func (r *Reader) commitOffsetsWithRetry(
 	return // err will not be nil
 }
 
-// offsetStash holds offsets by topic => partition => offset
+// offsetStash holds offsets by topic => partition => offset.
 type offsetStash map[string]map[int]int64
 
-// merge updates the offsetStash with the offsets from the provided messages
+// merge updates the offsetStash with the offsets from the provided messages.
 func (o offsetStash) merge(commits []commit) {
 	for _, c := range commits {
 		offsetsByPartition, ok := o[c.topic]
@@ -712,47 +183,79 @@ func (o offsetStash) merge(commits []com
 	}
 }
 
-// reset clears the contents of the offsetStash
+// reset clears the contents of the offsetStash.
 func (o offsetStash) reset() {
 	for key := range o {
 		delete(o, key)
 	}
 }
 
-// commitLoopImmediate handles each commit synchronously
-func (r *Reader) commitLoopImmediate(conn offsetCommitter, stop <-chan struct{}) {
-	offsetsByTopicAndPartition := offsetStash{}
+// commitLoopImmediate handles each commit synchronously.
+func (r *Reader) commitLoopImmediate(ctx context.Context, gen *Generation) {
+	offsets := offsetStash{}
 
 	for {
 		select {
-		case <-stop:
+		case <-ctx.Done():
+			// drain the commit channel and prepare a single, final commit.
+			// the commit will combine any outstanding requests and the result
+			// will be sent back to all the callers of CommitMessages so that
+			// they can return.
+			var errchs []chan<- error
+			for hasCommits := true; hasCommits; {
+				select {
+				case req := <-r.commits:
+					offsets.merge(req.commits)
+					errchs = append(errchs, req.errch)
+				default:
+					hasCommits = false
+				}
+			}
+			err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries)
+			for _, errch := range errchs {
+				// NOTE : this will be a buffered channel and will not block.
+				errch <- err
+			}
 			return
 
 		case req := <-r.commits:
-			offsetsByTopicAndPartition.merge(req.commits)
-			req.errch <- r.commitOffsetsWithRetry(conn, offsetsByTopicAndPartition, defaultCommitRetries)
-			offsetsByTopicAndPartition.reset()
+			offsets.merge(req.commits)
+			req.errch <- r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries)
+			offsets.reset()
 		}
 	}
 }
 
 // commitLoopInterval handles each commit asynchronously with a period defined
-// by ReaderConfig.CommitInterval
-func (r *Reader) commitLoopInterval(conn offsetCommitter, stop <-chan struct{}) {
-	ticker := time.NewTicker(r.config.HeartbeatInterval)
+// by ReaderConfig.CommitInterval.
+func (r *Reader) commitLoopInterval(ctx context.Context, gen *Generation) {
+	ticker := time.NewTicker(r.config.CommitInterval)
 	defer ticker.Stop()
 
+	// the offset stash should not survive rebalances b/c the consumer may
+	// receive new assignments.
+	offsets := offsetStash{}
+
 	commit := func() {
-		if err := r.commitOffsetsWithRetry(conn, r.offsetStash, defaultCommitRetries); err != nil {
-			r.withErrorLogger(func(l *log.Logger) { l.Print(err) })
+		if err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries); err != nil {
+			r.withErrorLogger(func(l Logger) { l.Printf("%v", err) })
 		} else {
-			r.offsetStash.reset()
+			offsets.reset()
 		}
 	}
 
 	for {
 		select {
-		case <-stop:
+		case <-ctx.Done():
+			// drain the commit channel in order to prepare the final commit.
+			for hasCommits := true; hasCommits; {
+				select {
+				case req := <-r.commits:
+					offsets.merge(req.commits)
+				default:
+					hasCommits = false
+				}
+			}
 			commit()
 			return
 
@@ -760,89 +263,87 @@ func (r *Reader) commitLoopInterval(conn
 			commit()
 
 		case req := <-r.commits:
-			r.offsetStash.merge(req.commits)
+			offsets.merge(req.commits)
 		}
 	}
 }
 
-// commitLoop processes commits off the commit chan
-func (r *Reader) commitLoop(conn *Conn) func(stop <-chan struct{}) {
-	return func(stop <-chan struct{}) {
-		r.withLogger(func(l *log.Logger) {
-			l.Println("started commit for group,", r.config.GroupID)
-		})
-		defer r.withLogger(func(l *log.Logger) {
-			l.Println("stopped commit for group,", r.config.GroupID)
-		})
-
-		if r.config.CommitInterval == 0 {
-			r.commitLoopImmediate(conn, stop)
-		} else {
-			r.commitLoopInterval(conn, stop)
-		}
-	}
-}
-
-// handshake performs the necessary incantations to join this Reader to the desired
-// consumer group.  handshake will be called whenever the group is disrupted
-// (member join, member leave, coordinator changed, etc)
-func (r *Reader) handshake() error {
-	// always clear prior to subscribe
-	r.unsubscribe()
-
-	// rebalance and fetch assignments
-	assignments, err := r.rebalance()
-	if err != nil {
-		return fmt.Errorf("rebalance failed for consumer group, %v: %v", r.config.GroupID, err)
-	}
-
-	conn, err := r.coordinator()
-	if err != nil {
-		return fmt.Errorf("heartbeat: unable to connect to coordinator: %v", err)
-	}
-	defer conn.Close()
-
-	rg := &runGroup{}
-	rg = rg.WithContext(r.stctx)
-	rg.Go(r.heartbeatLoop(conn))
-	rg.Go(r.commitLoop(conn))
+// commitLoop processes commits off the commit chan.
+func (r *Reader) commitLoop(ctx context.Context, gen *Generation) {
+	r.withLogger(func(l Logger) {
+		l.Printf("started commit for group %s\n", r.config.GroupID)
+	})
+	defer r.withLogger(func(l Logger) {
+		l.Printf("stopped commit for group %s\n", r.config.GroupID)
+	})
 
-	// subscribe to assignments
-	if err := r.subscribe(assignments); err != nil {
-		rg.Stop()
-		return fmt.Errorf("subscribe failed for consumer group, %v: %v\n", r.config.GroupID, err)
+	if r.useSyncCommits() {
+		r.commitLoopImmediate(ctx, gen)
+	} else {
+		r.commitLoopInterval(ctx, gen)
 	}
-
-	rg.Wait()
-
-	return nil
 }
 
 // run provides the main consumer group management loop.  Each iteration performs the
 // handshake to join the Reader to the consumer group.
-func (r *Reader) run() {
+//
+// This function is responsible for closing the consumer group upon exit.
+func (r *Reader) run(cg *ConsumerGroup) {
 	defer close(r.done)
+	defer cg.Close()
 
-	if !r.useConsumerGroup() {
-		return
-	}
-
-	r.withLogger(func(l *log.Logger) {
+	r.withLogger(func(l Logger) {
 		l.Printf("entering loop for consumer group, %v\n", r.config.GroupID)
 	})
 
 	for {
-		if err := r.handshake(); err != nil {
-			r.withErrorLogger(func(l *log.Logger) {
-				l.Println(err)
+		// Limit the number of attempts at waiting for the next
+		// consumer generation.
+		var err error
+		var gen *Generation
+		for attempt := 1; attempt <= r.config.MaxAttempts; attempt++ {
+			gen, err = cg.Next(r.stctx)
+			if err == nil {
+				break
+			}
+			if errors.Is(err, r.stctx.Err()) {
+				return
+			}
+			r.stats.errors.observe(1)
+			r.withErrorLogger(func(l Logger) {
+				l.Printf("%v", err)
 			})
+			// Continue with next attempt...
 		}
-
-		select {
-		case <-r.stctx.Done():
-			return
-		default:
+		if err != nil {
+			// All attempts have failed.
+			select {
+			case r.runError <- err:
+				// If somebody's receiving on the runError, let
+				// them know the error occurred.
+			default:
+				// Otherwise, don't block to allow healing.
+			}
+			continue
 		}
+
+		r.stats.rebalances.observe(1)
+
+		r.subscribe(gen.Assignments)
+
+		gen.Start(func(ctx context.Context) {
+			r.commitLoop(ctx, gen)
+		})
+		gen.Start(func(ctx context.Context) {
+			// wait for the generation to end and then unsubscribe.
+			select {
+			case <-ctx.Done():
+				// continue to next generation
+			case <-r.stctx.Done():
+				// this will be the last loop because the reader is closed.
+			}
+			r.unsubscribe()
+		})
 	}
 }
 
@@ -856,6 +357,11 @@ type ReaderConfig struct {
 	// Partition should NOT be specified e.g. 0
 	GroupID string
 
+	// GroupTopics allows specifying multiple topics, but can only be used in
+	// combination with GroupID, as it is a consumer-group feature. As such, if
+	// GroupID is set, then either Topic or GroupTopics must be defined.
+	GroupTopics []string
+
 	// The topic to read messages from.
 	Topic string
 
@@ -871,14 +377,32 @@ type ReaderConfig struct {
 	// set.
 	QueueCapacity int
 
-	// Min and max number of bytes to fetch from kafka in each request.
+	// MinBytes indicates to the broker the minimum batch size that the consumer
+	// will accept. Setting a high minimum when consuming from a low-volume topic
+	// may result in delayed delivery when the broker does not have enough data to
+	// satisfy the defined minimum.
+	//
+	// Default: 1
 	MinBytes int
+
+	// MaxBytes indicates to the broker the maximum batch size that the consumer
+	// will accept. The broker will truncate a message to satisfy this maximum, so
+	// choose a value that is high enough for your largest message size.
+	//
+	// Default: 1MB
 	MaxBytes int
 
 	// Maximum amount of time to wait for new data to come when fetching batches
 	// of messages from kafka.
+	//
+	// Default: 10s
 	MaxWait time.Duration
 
+	// ReadBatchTimeout amount of time to wait to fetch message from kafka messages batch.
+	//
+	// Default: 10s
+	ReadBatchTimeout time.Duration
+
 	// ReadLagInterval sets the frequency at which the reader lag is updated.
 	// Setting this field to a negative value disables lag reporting.
 	ReadLagInterval time.Duration
@@ -903,11 +427,24 @@ type ReaderConfig struct {
 	// CommitInterval indicates the interval at which offsets are committed to
 	// the broker.  If 0, commits will be handled synchronously.
 	//
-	// Defaults to 1s
+	// Default: 0
 	//
 	// Only used when GroupID is set
 	CommitInterval time.Duration
 
+	// PartitionWatchInterval indicates how often a reader checks for partition changes.
+	// If a reader sees a partition change (such as a partition add) it will rebalance the group
+	// picking up new partitions.
+	//
+	// Default: 5s
+	//
+	// Only used when GroupID is set and WatchPartitionChanges is set.
+	PartitionWatchInterval time.Duration
+
+	// WatchForPartitionChanges is used to inform kafka-go that a consumer group should be
+	// polling the brokers and rebalancing if any partition changes happen to the topic.
+	WatchPartitionChanges bool
+
 	// SessionTimeout optionally sets the length of time that may pass without a heartbeat
 	// before the coordinator considers the consumer dead and initiates a rebalance.
 	//
@@ -925,28 +462,118 @@ type ReaderConfig struct {
 	// Only used when GroupID is set
 	RebalanceTimeout time.Duration
 
+	// JoinGroupBackoff optionally sets the length of time to wait between re-joining
+	// the consumer group after an error.
+	//
+	// Default: 5s
+	JoinGroupBackoff time.Duration
+
 	// RetentionTime optionally sets the length of time the consumer group will be saved
-	// by the broker
+	// by the broker. -1 will disable the setting and leave the
+	// retention up to the broker's offsets.retention.minutes property. By
+	// default, that setting is 1 day for kafka < 2.0 and 7 days for kafka >= 2.0.
 	//
-	// Default: 24h
+	// Default: -1
 	//
 	// Only used when GroupID is set
 	RetentionTime time.Duration
 
+	// StartOffset determines from whence the consumer group should begin
+	// consuming when it finds a partition without a committed offset.  If
+	// non-zero, it must be set to one of FirstOffset or LastOffset.
+	//
+	// Default: FirstOffset
+	//
+	// Only used when GroupID is set
+	StartOffset int64
+
+	// BackoffDelayMin optionally sets the smallest amount of time the reader will wait before
+	// polling for new messages
+	//
+	// Default: 100ms
+	ReadBackoffMin time.Duration
+
+	// BackoffDelayMax optionally sets the maximum amount of time the reader will wait before
+	// polling for new messages
+	//
+	// Default: 1s
+	ReadBackoffMax time.Duration
+
 	// If not nil, specifies a logger used to report internal changes within the
 	// reader.
-	Logger *log.Logger
+	Logger Logger
 
 	// ErrorLogger is the logger used to report errors. If nil, the reader falls
 	// back to using Logger instead.
-	ErrorLogger *log.Logger
+	ErrorLogger Logger
+
+	// IsolationLevel controls the visibility of transactional records.
+	// ReadUncommitted makes all records visible. With ReadCommitted only
+	// non-transactional and committed records are visible.
+	IsolationLevel IsolationLevel
+
+	// Limit of how many attempts to connect will be made before returning the error.
+	//
+	// The default is to try 3 times.
+	MaxAttempts int
+
+	// OffsetOutOfRangeError indicates that the reader should return an error in
+	// the event of an OffsetOutOfRange error, rather than retrying indefinitely.
+	// This flag is being added to retain backwards-compatibility, so it will be
+	// removed in a future version of kafka-go.
+	OffsetOutOfRangeError bool
+}
+
+// Validate method validates ReaderConfig properties.
+func (config *ReaderConfig) Validate() error {
+	if len(config.Brokers) == 0 {
+		return errors.New("cannot create a new kafka reader with an empty list of broker addresses")
+	}
+
+	if config.Partition < 0 || config.Partition >= math.MaxInt32 {
+		return fmt.Errorf("partition number out of bounds: %d", config.Partition)
+	}
+
+	if config.MinBytes < 0 {
+		return fmt.Errorf("invalid negative minimum batch size (min = %d)", config.MinBytes)
+	}
+
+	if config.MaxBytes < 0 {
+		return fmt.Errorf("invalid negative maximum batch size (max = %d)", config.MaxBytes)
+	}
+
+	if config.GroupID != "" {
+		if config.Partition != 0 {
+			return errors.New("either Partition or GroupID may be specified, but not both")
+		}
+
+		if len(config.Topic) == 0 && len(config.GroupTopics) == 0 {
+			return errors.New("either Topic or GroupTopics must be specified with GroupID")
+		}
+	} else if len(config.Topic) == 0 {
+		return errors.New("cannot create a new kafka reader with an empty topic")
+	}
+
+	if config.MinBytes > config.MaxBytes {
+		return fmt.Errorf("minimum batch size greater than the maximum (min = %d, max = %d)", config.MinBytes, config.MaxBytes)
+	}
+
+	if config.ReadBackoffMax < 0 {
+		return fmt.Errorf("ReadBackoffMax out of bounds: %d", config.ReadBackoffMax)
+	}
+
+	if config.ReadBackoffMin < 0 {
+		return fmt.Errorf("ReadBackoffMin out of bounds: %d", config.ReadBackoffMin)
+	}
+
+	return nil
 }
 
 // ReaderStats is a data structure returned by a call to Reader.Stats that exposes
 // details about the behavior of the reader.
 type ReaderStats struct {
 	Dials      int64 `metric:"kafka.reader.dial.count"      type:"counter"`
-	Fetches    int64 `metric:"kafak.reader.fetch.count"     type:"counter"` // typo here, but I'm reluctant to fix it
+	Fetches    int64 `metric:"kafka.reader.fetch.count"     type:"counter"`
 	Messages   int64 `metric:"kafka.reader.message.count"   type:"counter"`
 	Bytes      int64 `metric:"kafka.reader.message.bytes"   type:"counter"`
 	Rebalances int64 `metric:"kafka.reader.rebalance.count" type:"counter"`
@@ -970,6 +597,12 @@ type ReaderStats struct {
 	ClientID  string `tag:"client_id"`
 	Topic     string `tag:"topic"`
 	Partition string `tag:"partition"`
+
+	// The original `Fetches` field had a typo where the metric name was called
+	// "kafak..." instead of "kafka...", in order to offer time to fix monitors
+	// that may be relying on this mistake we are temporarily introducing this
+	// field.
+	DeprecatedFetchesWithTypo int64 `metric:"kafak.reader.fetch.count" type:"counter"`
 }
 
 // readerStats is a struct that contains statistics on a reader.
@@ -994,32 +627,8 @@ type readerStats struct {
 // NewReader creates and returns a new Reader configured with config.
 // The offset is initialized to FirstOffset.
 func NewReader(config ReaderConfig) *Reader {
-	if len(config.Brokers) == 0 {
-		panic("cannot create a new kafka reader with an empty list of broker addresses")
-	}
-
-	if len(config.Topic) == 0 {
-		panic("cannot create a new kafka reader with an empty topic")
-	}
-
-	if config.Partition < 0 || config.Partition >= math.MaxInt32 {
-		panic(fmt.Sprintf("partition number out of bounds: %d", config.Partition))
-	}
-
-	if config.MinBytes > config.MaxBytes {
-		panic(fmt.Sprintf("minimum batch size greater than the maximum (min = %d, max = %d)", config.MinBytes, config.MaxBytes))
-	}
-
-	if config.MinBytes < 0 {
-		panic(fmt.Sprintf("invalid negative minimum batch size (min = %d)", config.MinBytes))
-	}
-
-	if config.MaxBytes < 0 {
-		panic(fmt.Sprintf("invalid negative maximum batch size (max = %d)", config.MaxBytes))
-	}
-
-	if config.GroupID != "" && config.Partition != 0 {
-		panic("either Partition or GroupID may be specified, but not both")
+	if err := config.Validate(); err != nil {
+		panic(err)
 	}
 
 	if config.GroupID != "" {
@@ -1029,26 +638,6 @@ func NewReader(config ReaderConfig) *Rea
 				RoundRobinGroupBalancer{},
 			}
 		}
-
-		if config.HeartbeatInterval < 0 || (config.HeartbeatInterval/time.Millisecond) >= math.MaxInt32 {
-			panic(fmt.Sprintf("HeartbeatInterval out of bounds: %d", config.HeartbeatInterval))
-		}
-
-		if config.SessionTimeout < 0 || (config.SessionTimeout/time.Millisecond) >= math.MaxInt32 {
-			panic(fmt.Sprintf("SessionTimeout out of bounds: %d", config.SessionTimeout))
-		}
-
-		if config.RebalanceTimeout < 0 || (config.RebalanceTimeout/time.Millisecond) >= math.MaxInt32 {
-			panic(fmt.Sprintf("RebalanceTimeout out of bounds: %d", config.RebalanceTimeout))
-		}
-
-		if config.RetentionTime < 0 || (config.RetentionTime/time.Millisecond) >= math.MaxInt32 {
-			panic(fmt.Sprintf("RetentionTime out of bounds: %d", config.RetentionTime))
-		}
-
-		if config.CommitInterval < 0 || (config.CommitInterval/time.Millisecond) >= math.MaxInt32 {
-			panic(fmt.Sprintf("CommitInterval out of bounds: %d", config.CommitInterval))
-		}
 	}
 
 	if config.Dialer == nil {
@@ -1060,37 +649,41 @@ func NewReader(config ReaderConfig) *Rea
 	}
 
 	if config.MinBytes == 0 {
-		config.MinBytes = config.MaxBytes
+		config.MinBytes = defaultFetchMinBytes
 	}
 
 	if config.MaxWait == 0 {
 		config.MaxWait = 10 * time.Second
 	}
 
-	if config.ReadLagInterval == 0 {
-		config.ReadLagInterval = 1 * time.Minute
+	if config.ReadBatchTimeout == 0 {
+		config.ReadBatchTimeout = 10 * time.Second
 	}
 
-	if config.HeartbeatInterval == 0 {
-		config.HeartbeatInterval = defaultHeartbeatInterval
+	if config.ReadLagInterval == 0 {
+		config.ReadLagInterval = 1 * time.Minute
 	}
 
-	if config.SessionTimeout == 0 {
-		config.SessionTimeout = defaultSessionTimeout
+	if config.ReadBackoffMin == 0 {
+		config.ReadBackoffMin = defaultReadBackoffMin
 	}
 
-	if config.RebalanceTimeout == 0 {
-		config.RebalanceTimeout = defaultRebalanceTimeout
+	if config.ReadBackoffMax == 0 {
+		config.ReadBackoffMax = defaultReadBackoffMax
 	}
 
-	if config.RetentionTime == 0 {
-		config.RetentionTime = defaultRetentionTime
+	if config.ReadBackoffMax < config.ReadBackoffMin {
+		panic(fmt.Errorf("ReadBackoffMax %d smaller than ReadBackoffMin %d", config.ReadBackoffMax, config.ReadBackoffMin))
 	}
 
 	if config.QueueCapacity == 0 {
 		config.QueueCapacity = 100
 	}
 
+	if config.MaxAttempts == 0 {
+		config.MaxAttempts = 3
+	}
+
 	// when configured as a consumer group; stats should report a partition of -1
 	readerStatsPartition := config.Partition
 	if config.GroupID != "" {
@@ -1109,7 +702,6 @@ func NewReader(config ReaderConfig) *Rea
 		config:  config,
 		msgs:    make(chan readerMessage, config.QueueCapacity),
 		cancel:  func() {},
-		done:    make(chan struct{}),
 		commits: make(chan commitRequest, config.QueueCapacity),
 		stop:    stop,
 		offset:  FirstOffset,
@@ -1124,11 +716,33 @@ func NewReader(config ReaderConfig) *Rea
 			// once when the reader is created.
 			partition: strconv.Itoa(readerStatsPartition),
 		},
-		version:     version,
-		offsetStash: offsetStash{},
+		version: version,
+	}
+	if r.useConsumerGroup() {
+		r.done = make(chan struct{})
+		r.runError = make(chan error)
+		cg, err := NewConsumerGroup(ConsumerGroupConfig{
+			ID:                     r.config.GroupID,
+			Brokers:                r.config.Brokers,
+			Dialer:                 r.config.Dialer,
+			Topics:                 r.getTopics(),
+			GroupBalancers:         r.config.GroupBalancers,
+			HeartbeatInterval:      r.config.HeartbeatInterval,
+			PartitionWatchInterval: r.config.PartitionWatchInterval,
+			WatchPartitionChanges:  r.config.WatchPartitionChanges,
+			SessionTimeout:         r.config.SessionTimeout,
+			RebalanceTimeout:       r.config.RebalanceTimeout,
+			JoinGroupBackoff:       r.config.JoinGroupBackoff,
+			RetentionTime:          r.config.RetentionTime,
+			StartOffset:            r.config.StartOffset,
+			Logger:                 r.config.Logger,
+			ErrorLogger:            r.config.ErrorLogger,
+		})
+		if err != nil {
+			panic(err)
+		}
+		go r.run(cg)
 	}
-
-	go r.run()
 
 	return r
 }
@@ -1152,17 +766,10 @@ func (r *Reader) Close() error {
 	r.stop()
 	r.join.Wait()
 
-	if r.useConsumerGroup() {
-		// gracefully attempt to leave the consumer group on close
-		if generationID, membershipID := r.membership(); generationID > 0 && membershipID != "" {
-			if conn, err := r.coordinator(); err == nil {
-				_ = r.leaveGroup(conn)
-			}
-		}
+	if r.done != nil {
+		<-r.done
 	}
 
-	<-r.done
-
 	if !closed {
 		close(r.msgs)
 	}
@@ -1177,16 +784,20 @@ func (r *Reader) Close() error {
 // The method returns io.EOF to indicate that the reader has been closed.
 //
 // If consumer groups are used, ReadMessage will automatically commit the
-// offset when called.
+// offset when called. Note that this could result in an offset being committed
+// before the message is fully processed.
+//
+// If more fine-grained control of when offsets are committed is required, it
+// is recommended to use FetchMessage with CommitMessages instead.
 func (r *Reader) ReadMessage(ctx context.Context) (Message, error) {
 	m, err := r.FetchMessage(ctx)
 	if err != nil {
-		return Message{}, err
+		return Message{}, fmt.Errorf("fetching message: %w", err)
 	}
 
 	if r.useConsumerGroup() {
 		if err := r.CommitMessages(ctx, m); err != nil {
-			return Message{}, err
+			return Message{}, fmt.Errorf("committing message: %w", err)
 		}
 	}
 
@@ -1208,7 +819,7 @@ func (r *Reader) FetchMessage(ctx contex
 		r.mutex.Lock()
 
 		if !r.closed && r.version == 0 {
-			r.start(map[int]int64{r.config.Partition: r.offset})
+			r.start(r.getTopicPartitionOffset())
 		}
 
 		version := r.version
@@ -1218,6 +829,9 @@ func (r *Reader) FetchMessage(ctx contex
 		case <-ctx.Done():
 			return Message{}, ctx.Err()
 
+		case err := <-r.runError:
+			return Message{}, err
+
 		case m, ok := <-r.msgs:
 			if !ok {
 				return Message{}, io.EOF
@@ -1235,9 +849,7 @@ func (r *Reader) FetchMessage(ctx contex
 
 				r.mutex.Unlock()
 
-				switch m.error {
-				case nil:
-				case io.EOF:
+				if errors.Is(m.error, io.EOF) {
 					// io.EOF is used as a marker to indicate that the stream
 					// has been closed, in case it was received from the inner
 					// reader we don't want to confuse the program and replace
@@ -1254,18 +866,26 @@ func (r *Reader) FetchMessage(ctx contex
 // CommitMessages commits the list of messages passed as argument. The program
 // may pass a context to asynchronously cancel the commit operation when it was
 // configured to be blocking.
+//
+// Because kafka consumer groups track a single offset per partition, the
+// highest message offset passed to CommitMessages will cause all previous
+// messages to be committed. Applications need to account for these Kafka
+// limitations when committing messages, and maintain message ordering if they
+// need strong delivery guarantees. This property makes it valid to pass only
+// the last message seen to CommitMessages in order to move the offset of the
+// topic/partition it belonged to forward, effectively committing all previous
+// messages in the partition.
 func (r *Reader) CommitMessages(ctx context.Context, msgs ...Message) error {
 	if !r.useConsumerGroup() {
 		return errOnlyAvailableWithGroup
 	}
 
 	var errch <-chan error
-	var sync = r.useSyncCommits()
-	var creq = commitRequest{
+	creq := commitRequest{
 		commits: makeCommits(msgs...),
 	}
 
-	if sync {
+	if r.useSyncCommits() {
 		ch := make(chan error, 1)
 		errch, creq.errch = ch, ch
 	}
@@ -1280,7 +900,7 @@ func (r *Reader) CommitMessages(ctx cont
 		return io.ErrClosedPipe
 	}
 
-	if !sync {
+	if !r.useSyncCommits() {
 		return nil
 	}
 
@@ -1375,8 +995,8 @@ func (r *Reader) Offset() int64 {
 	r.mutex.Lock()
 	offset := r.offset
 	r.mutex.Unlock()
-	r.withLogger(func(log *log.Logger) {
-		log.Printf("looking up offset of kafka reader for partition %d of %s: %d", r.config.Partition, r.config.Topic, offset)
+	r.withLogger(func(log Logger) {
+		log.Printf("looking up offset of kafka reader for partition %d of %s: %s", r.config.Partition, r.config.Topic, toHumanOffset(offset))
 	})
 	return offset
 }
@@ -1413,14 +1033,14 @@ func (r *Reader) SetOffset(offset int64)
 	if r.closed {
 		err = io.ErrClosedPipe
 	} else if offset != r.offset {
-		r.withLogger(func(log *log.Logger) {
-			log.Printf("setting the offset of the kafka reader for partition %d of %s from %d to %d",
-				r.config.Partition, r.config.Topic, r.offset, offset)
+		r.withLogger(func(log Logger) {
+			log.Printf("setting the offset of the kafka reader for partition %d of %s from %s to %s",
+				r.config.Partition, r.config.Topic, toHumanOffset(r.offset), toHumanOffset(offset))
 		})
 		r.offset = offset
 
 		if r.version != 0 {
-			r.start(map[int]int64{r.config.Partition: r.offset})
+			r.start(r.getTopicPartitionOffset())
 		}
 
 		r.activateReadLag()
@@ -1430,6 +1050,42 @@ func (r *Reader) SetOffset(offset int64)
 	return err
 }
 
+// SetOffsetAt changes the offset from which the next batch of messages will be
+// read given the timestamp t.
+//
+// The method fails if the unable to connect partition leader, or unable to read the offset
+// given the ts, or if the reader has been closed.
+func (r *Reader) SetOffsetAt(ctx context.Context, t time.Time) error {
+	r.mutex.Lock()
+	if r.closed {
+		r.mutex.Unlock()
+		return io.ErrClosedPipe
+	}
+	r.mutex.Unlock()
+
+	if len(r.config.Brokers) < 1 {
+		return errors.New("no brokers in config")
+	}
+	var conn *Conn
+	var err error
+	for _, broker := range r.config.Brokers {
+		conn, err = r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition)
+		if err != nil {
+			continue
+		}
+		deadline, _ := ctx.Deadline()
+		conn.SetDeadline(deadline)
+		offset, err := conn.ReadOffset(t)
+		conn.Close()
+		if err != nil {
+			return err
+		}
+
+		return r.SetOffset(offset)
+	}
+	return fmt.Errorf("error dialing all brokers, one of the errors: %w", err)
+}
+
 // Stats returns a snapshot of the reader stats since the last time the method
 // was called, or since the reader was created if it is called for the first
 // time.
@@ -1438,7 +1094,7 @@ func (r *Reader) SetOffset(offset int64)
 // call Stats on a kafka reader and report the metrics to a stats collection
 // system.
 func (r *Reader) Stats() ReaderStats {
-	return ReaderStats{
+	stats := ReaderStats{
 		Dials:         r.stats.dials.snapshot(),
 		Fetches:       r.stats.fetches.snapshot(),
 		Messages:      r.stats.messages.snapshot(),
@@ -1462,15 +1118,23 @@ func (r *Reader) Stats() ReaderStats {
 		Topic:         r.config.Topic,
 		Partition:     r.stats.partition,
 	}
+	// TODO: remove when we get rid of the deprecated field.
+	stats.DeprecatedFetchesWithTypo = stats.Fetches
+	return stats
 }
 
-func (r *Reader) withLogger(do func(*log.Logger)) {
+func (r *Reader) getTopicPartitionOffset() map[topicPartition]int64 {
+	key := topicPartition{topic: r.config.Topic, partition: int32(r.config.Partition)}
+	return map[topicPartition]int64{key: r.offset}
+}
+
+func (r *Reader) withLogger(do func(Logger)) {
 	if r.config.Logger != nil {
 		do(r.config.Logger)
 	}
 }
 
-func (r *Reader) withErrorLogger(do func(*log.Logger)) {
+func (r *Reader) withErrorLogger(do func(Logger)) {
 	if r.config.ErrorLogger != nil {
 		do(r.config.ErrorLogger)
 	} else {
@@ -1499,8 +1163,8 @@ func (r *Reader) readLag(ctx context.Con
 
 		if err != nil {
 			r.stats.errors.observe(1)
-			r.withErrorLogger(func(log *log.Logger) {
-				log.Printf("kafka reader failed to read lag of partition %d of %s", r.config.Partition, r.config.Topic)
+			r.withErrorLogger(func(log Logger) {
+				log.Printf("kafka reader failed to read lag of partition %d of %s: %s", r.config.Partition, r.config.Topic, err)
 			})
 		} else {
 			r.stats.lag.observe(lag)
@@ -1514,7 +1178,7 @@ func (r *Reader) readLag(ctx context.Con
 	}
 }
 
-func (r *Reader) start(offsetsByPartition map[int]int64) {
+func (r *Reader) start(offsetsByPartition map[topicPartition]int64) {
 	if r.closed {
 		// don't start child reader if parent Reader is closed
 		return
@@ -1527,44 +1191,59 @@ func (r *Reader) start(offsetsByPartitio
 	r.version++
 
 	r.join.Add(len(offsetsByPartition))
-	for partition, offset := range offsetsByPartition {
-		go func(ctx context.Context, partition int, offset int64, join *sync.WaitGroup) {
+	for key, offset := range offsetsByPartition {
+		go func(ctx context.Context, key topicPartition, offset int64, join *sync.WaitGroup) {
 			defer join.Done()
 
 			(&reader{
-				dialer:      r.config.Dialer,
-				logger:      r.config.Logger,
-				errorLogger: r.config.ErrorLogger,
-				brokers:     r.config.Brokers,
-				topic:       r.config.Topic,
-				partition:   partition,
-				minBytes:    r.config.MinBytes,
-				maxBytes:    r.config.MaxBytes,
-				maxWait:     r.config.MaxWait,
-				version:     r.version,
-				msgs:        r.msgs,
-				stats:       r.stats,
+				dialer:           r.config.Dialer,
+				logger:           r.config.Logger,
+				errorLogger:      r.config.ErrorLogger,
+				brokers:          r.config.Brokers,
+				topic:            key.topic,
+				partition:        int(key.partition),
+				minBytes:         r.config.MinBytes,
+				maxBytes:         r.config.MaxBytes,
+				maxWait:          r.config.MaxWait,
+				readBatchTimeout: r.config.ReadBatchTimeout,
+				backoffDelayMin:  r.config.ReadBackoffMin,
+				backoffDelayMax:  r.config.ReadBackoffMax,
+				version:          r.version,
+				msgs:             r.msgs,
+				stats:            r.stats,
+				isolationLevel:   r.config.IsolationLevel,
+				maxAttempts:      r.config.MaxAttempts,
+
+				// backwards-compatibility flags
+				offsetOutOfRangeError: r.config.OffsetOutOfRangeError,
 			}).run(ctx, offset)
-		}(ctx, partition, offset, &r.join)
+		}(ctx, key, offset, &r.join)
 	}
 }
 
 // A reader reads messages from kafka and produces them on its channels, it's
-// used as an way to asynchronously fetch messages while the main program reads
+// used as a way to asynchronously fetch messages while the main program reads
 // them using the high level reader API.
 type reader struct {
-	dialer      *Dialer
-	logger      *log.Logger
-	errorLogger *log.Logger
-	brokers     []string
-	topic       string
-	partition   int
-	minBytes    int
-	maxBytes    int
-	maxWait     time.Duration
-	version     int64
-	msgs        chan<- readerMessage
-	stats       *readerStats
+	dialer           *Dialer
+	logger           Logger
+	errorLogger      Logger
+	brokers          []string
+	topic            string
+	partition        int
+	minBytes         int
+	maxBytes         int
+	maxWait          time.Duration
+	readBatchTimeout time.Duration
+	backoffDelayMin  time.Duration
+	backoffDelayMax  time.Duration
+	version          int64
+	msgs             chan<- readerMessage
+	stats            *readerStats
+	isolationLevel   IsolationLevel
+	maxAttempts      int
+
+	offsetOutOfRangeError bool
 }
 
 type readerMessage struct {
@@ -1575,9 +1254,6 @@ type readerMessage struct {
 }
 
 func (r *reader) run(ctx context.Context, offset int64) {
-	const backoffDelayMin = 100 * time.Millisecond
-	const backoffDelayMax = 1 * time.Second
-
 	// This is the reader's main loop, it only ends if the context is canceled
 	// and will keep attempting to reader messages otherwise.
 	//
@@ -1589,35 +1265,42 @@ func (r *reader) run(ctx context.Context
 	// on a Read call after reading the first error.
 	for attempt := 0; true; attempt++ {
 		if attempt != 0 {
-			if !sleep(ctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) {
+			if !sleep(ctx, backoff(attempt, r.backoffDelayMin, r.backoffDelayMax)) {
 				return
 			}
 		}
 
-		r.withLogger(func(log *log.Logger) {
-			log.Printf("initializing kafka reader for partition %d of %s starting at offset %d", r.partition, r.topic, offset)
+		r.withLogger(func(log Logger) {
+			log.Printf("initializing kafka reader for partition %d of %s starting at offset %d", r.partition, r.topic, toHumanOffset(offset))
 		})
 
 		conn, start, err := r.initialize(ctx, offset)
-		switch err {
-		case nil:
-		case OffsetOutOfRange:
-			// This would happen if the requested offset is passed the last
-			// offset on the partition leader. In that case we're just going
-			// to retry later hoping that enough data has been produced.
-			r.withErrorLogger(func(log *log.Logger) {
-				log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, OffsetOutOfRange)
-			})
-			continue
-		default:
-			// Wait 4 attempts before reporting the first errors, this helps
-			// mitigate situations where the kafka server is temporarily
+		if err != nil {
+			if errors.Is(err, OffsetOutOfRange) {
+				if r.offsetOutOfRangeError {
+					r.sendError(ctx, err)
+					return
+				}
+
+				// This would happen if the requested offset is passed the last
+				// offset on the partition leader. In that case we're just going
+				// to retry later hoping that enough data has been produced.
+				r.withErrorLogger(func(log Logger) {
+					log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, err)
+				})
+
+				continue
+			}
+
+			// Perform a configured number of attempts before
+			// reporting first errors, this helps mitigate
+			// situations where the kafka server is temporarily
 			// unavailable.
-			if attempt >= 3 {
+			if attempt >= r.maxAttempts {
 				r.sendError(ctx, err)
 			} else {
 				r.stats.errors.observe(1)
-				r.withErrorLogger(func(log *log.Logger) {
+				r.withErrorLogger(func(log Logger) {
 					log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, err)
 				})
 			}
@@ -1636,18 +1319,48 @@ func (r *reader) run(ctx context.Context
 		errcount := 0
 	readLoop:
 		for {
-			if !sleep(ctx, backoff(errcount, backoffDelayMin, backoffDelayMax)) {
+			if !sleep(ctx, backoff(errcount, r.backoffDelayMin, r.backoffDelayMax)) {
 				conn.Close()
 				return
 			}
 
-			switch offset, err = r.read(ctx, offset, conn); err {
-			case nil:
+			offset, err = r.read(ctx, offset, conn)
+			switch {
+			case err == nil:
+				errcount = 0
+				continue
+
+			case errors.Is(err, io.EOF):
+				// done with this batch of messages...carry on.  note that this
+				// block relies on the batch repackaging real io.EOF errors as
+				// io.UnexpectedEOF.  otherwise, we would end up swallowing real
+				// errors here.
 				errcount = 0
+				continue
+
+			case errors.Is(err, io.ErrNoProgress):
+				// This error is returned by the Conn when it believes the connection
+				// has been corrupted, so we need to explicitly close it. Since we are
+				// explicitly handling it and a retry will pick up, we can suppress the
+				// error metrics and logs for this case.
+				conn.Close()
+				break readLoop
 
-			case NotLeaderForPartition:
-				r.withErrorLogger(func(log *log.Logger) {
-					log.Printf("failed to read from current broker for partition %d of %s at offset %d, not the leader", r.partition, r.topic, offset)
+			case errors.Is(err, UnknownTopicOrPartition):
+				r.withErrorLogger(func(log Logger) {
+					log.Printf("failed to read from current broker %v for partition %d of %s at offset %d: %v", r.brokers, r.partition, r.topic, toHumanOffset(offset), err)
+				})
+
+				conn.Close()
+
+				// The next call to .initialize will re-establish a connection to the proper
+				// topic/partition broker combo.
+				r.stats.rebalances.observe(1)
+				break readLoop
+
+			case errors.Is(err, NotLeaderForPartition):
+				r.withErrorLogger(func(log Logger) {
+					log.Printf("failed to read from current broker for partition %d of %s at offset %d: %v", r.partition, r.topic, toHumanOffset(offset), err)
 				})
 
 				conn.Close()
@@ -1657,20 +1370,19 @@ func (r *reader) run(ctx context.Context
 				r.stats.rebalances.observe(1)
 				break readLoop
 
-			case RequestTimedOut:
+			case errors.Is(err, RequestTimedOut):
 				// Timeout on the kafka side, this can be safely retried.
 				errcount = 0
-				r.withErrorLogger(func(log *log.Logger) {
-					log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d", r.partition, r.topic, offset)
+				r.withLogger(func(log Logger) {
+					log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d: %v", r.partition, r.topic, toHumanOffset(offset), err)
 				})
 				r.stats.timeouts.observe(1)
 				continue
 
-			case OffsetOutOfRange:
+			case errors.Is(err, OffsetOutOfRange):
 				first, last, err := r.readOffsets(conn)
-
 				if err != nil {
-					r.withErrorLogger(func(log *log.Logger) {
+					r.withErrorLogger(func(log Logger) {
 						log.Printf("the kafka reader got an error while attempting to determine whether it was reading before the first offset or after the last offset of partition %d of %s: %s", r.partition, r.topic, err)
 					})
 					conn.Close()
@@ -1679,8 +1391,8 @@ func (r *reader) run(ctx context.Context
 
 				switch {
 				case offset < first:
-					r.withErrorLogger(func(log *log.Logger) {
-						log.Printf("the kafka reader is reading before the first offset for partition %d of %s, skipping from offset %d to %d (%d messages)", r.partition, r.topic, offset, first, first-offset)
+					r.withErrorLogger(func(log Logger) {
+						log.Printf("the kafka reader is reading before the first offset for partition %d of %s, skipping from offset %d to %d (%d messages)", r.partition, r.topic, toHumanOffset(offset), first, first-offset)
 					})
 					offset, errcount = first, 0
 					continue // retry immediately so we don't keep falling behind due to the backoff
@@ -1691,22 +1403,30 @@ func (r *reader) run(ctx context.Context
 
 				default:
 					// We may be reading past the last offset, will retry later.
-					r.withErrorLogger(func(log *log.Logger) {
-						log.Printf("the kafka reader is reading passed the last offset for partition %d of %s at offset %d", r.partition, r.topic, offset)
+					r.withErrorLogger(func(log Logger) {
+						log.Printf("the kafka reader is reading passed the last offset for partition %d of %s at offset %d", r.partition, r.topic, toHumanOffset(offset))
 					})
 				}
 
-			case context.Canceled:
+			case errors.Is(err, context.Canceled):
 				// Another reader has taken over, we can safely quit.
 				conn.Close()
 				return
 
+			case errors.Is(err, errUnknownCodec):
+				// The compression codec is either unsupported or has not been
+				// imported.  This is a fatal error b/c the reader cannot
+				// proceed.
+				r.sendError(ctx, err)
+				break readLoop
+
 			default:
-				if _, ok := err.(Error); ok {
+				var kafkaError Error
+				if errors.As(err, &kafkaError) {
 					r.sendError(ctx, err)
 				} else {
-					r.withErrorLogger(func(log *log.Logger) {
-						log.Printf("the kafka reader got an unknown error reading partition %d of %s at offset %d: %s", r.partition, r.topic, offset, err)
+					r.withErrorLogger(func(log Logger) {
+						log.Printf("the kafka reader got an unknown error reading partition %d of %s at offset %d: %s", r.partition, r.topic, toHumanOffset(offset), err)
 					})
 					r.stats.errors.observe(1)
 					conn.Close()
@@ -1721,7 +1441,7 @@ func (r *reader) run(ctx context.Context
 
 func (r *reader) initialize(ctx context.Context, offset int64) (conn *Conn, start int64, err error) {
 	for i := 0; i != len(r.brokers) && conn == nil; i++ {
-		var broker = r.brokers[i]
+		broker := r.brokers[i]
 		var first, last int64
 
 		t0 := time.Now()
@@ -1751,8 +1471,8 @@ func (r *reader) initialize(ctx context.
 			offset = first
 		}
 
-		r.withLogger(func(log *log.Logger) {
-			log.Printf("the kafka reader for partition %d of %s is seeking to offset %d", r.partition, r.topic, offset)
+		r.withLogger(func(log Logger) {
+			log.Printf("the kafka reader for partition %d of %s is seeking to offset %d", r.partition, r.topic, toHumanOffset(offset))
 		})
 
 		if start, err = conn.Seek(offset, SeekAbsolute); err != nil {
@@ -1774,7 +1494,11 @@ func (r *reader) read(ctx context.Contex
 	t0 := time.Now()
 	conn.SetReadDeadline(t0.Add(r.maxWait))
 
-	batch := conn.ReadBatch(r.minBytes, r.maxBytes)
+	batch := conn.ReadBatchWith(ReadBatchConfig{
+		MinBytes:       r.minBytes,
+		MaxBytes:       r.maxBytes,
+		IsolationLevel: r.isolationLevel,
+	})
 	highWaterMark := batch.HighWaterMark()
 
 	t1 := time.Now()
@@ -1785,18 +1509,11 @@ func (r *reader) read(ctx context.Contex
 	var size int64
 	var bytes int64
 
-	const safetyTimeout = 10 * time.Second
-	deadline := time.Now().Add(safetyTimeout)
-	conn.SetReadDeadline(deadline)
-
 	for {
-		if now := time.Now(); deadline.Sub(now) < (safetyTimeout / 2) {
-			deadline = now.Add(safetyTimeout)
-			conn.SetReadDeadline(deadline)
-		}
+		conn.SetReadDeadline(time.Now().Add(r.readBatchTimeout))
 
 		if msg, err = batch.ReadMessage(); err != nil {
-			err = batch.Close()
+			batch.Close()
 			break
 		}
 
@@ -1805,7 +1522,7 @@ func (r *reader) read(ctx context.Contex
 		r.stats.bytes.observe(n)
 
 		if err = r.sendMessage(ctx, msg, highWaterMark); err != nil {
-			err = batch.Close()
+			batch.Close()
 			break
 		}
 
@@ -1849,13 +1566,13 @@ func (r *reader) sendError(ctx context.C
 	}
 }
 
-func (r *reader) withLogger(do func(*log.Logger)) {
+func (r *reader) withLogger(do func(Logger)) {
 	if r.logger != nil {
 		do(r.logger)
 	}
 }
 
-func (r *reader) withErrorLogger(do func(*log.Logger)) {
+func (r *reader) withErrorLogger(do func(Logger)) {
 	if r.errorLogger != nil {
 		do(r.errorLogger)
 	} else {
@@ -1864,9 +1581,9 @@ func (r *reader) withErrorLogger(do func
 }
 
 // extractTopics returns the unique list of topics represented by the set of
-// provided members
+// provided members.
 func extractTopics(members []GroupMember) []string {
-	var visited = map[string]struct{}{}
+	visited := map[string]struct{}{}
 	var topics []string
 
 	for _, member := range members {
@@ -1884,3 +1601,21 @@ func extractTopics(members []GroupMember
 
 	return topics
 }
+
+type humanOffset int64
+
+func toHumanOffset(v int64) humanOffset {
+	return humanOffset(v)
+}
+
+func (offset humanOffset) Format(w fmt.State, _ rune) {
+	v := int64(offset)
+	switch v {
+	case FirstOffset:
+		fmt.Fprint(w, "first offset")
+	case LastOffset:
+		fmt.Fprint(w, "last offset")
+	default:
+		fmt.Fprint(w, strconv.FormatInt(v, 10))
+	}
+}
diff -pruN 0.2.1-1.1/reader_test.go 0.4.49+ds1-1/reader_test.go
--- 0.2.1-1.1/reader_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/reader_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,19 +1,24 @@
 package kafka
 
 import (
+	"bytes"
 	"context"
+	"errors"
+	"fmt"
 	"io"
 	"math/rand"
+	"net"
+	"os"
 	"reflect"
 	"strconv"
 	"sync"
 	"testing"
 	"time"
+
+	"github.com/stretchr/testify/require"
 )
 
 func TestReader(t *testing.T) {
-	t.Parallel()
-
 	tests := []struct {
 		scenario string
 		function func(*testing.T, context.Context, *Reader)
@@ -39,6 +44,11 @@ func TestReader(t *testing.T) {
 		},
 
 		{
+			scenario: "setting the offset by TimeStamp",
+			function: testReaderSetOffsetAt,
+		},
+
+		{
 			scenario: "calling Lag returns the lag of the last message read from kafka",
 			function: testReaderLag,
 		},
@@ -48,15 +58,15 @@ func TestReader(t *testing.T) {
 			function: testReaderReadLag,
 		},
 
-		{
-			scenario: "calling Stats returns accurate stats about the reader",
-			function: testReaderStats,
-		},
-
 		{ // https://github.com/segmentio/kafka-go/issues/30
 			scenario: "reading from an out-of-range offset waits until the context is cancelled",
 			function: testReaderOutOfRangeGetsCanceled,
 		},
+
+		{
+			scenario: "topic being recreated will return an error",
+			function: testReaderTopicRecreated,
+		},
 	}
 
 	for _, test := range tests {
@@ -73,6 +83,7 @@ func TestReader(t *testing.T) {
 				MinBytes: 1,
 				MaxBytes: 10e6,
 				MaxWait:  100 * time.Millisecond,
+				Logger:   newTestKafkaLogger(t, ""),
 			})
 			defer r.Close()
 			testFunc(t, ctx, r)
@@ -84,7 +95,7 @@ func testReaderReadCanceled(t *testing.T
 	ctx, cancel := context.WithCancel(ctx)
 	cancel()
 
-	if _, err := r.ReadMessage(ctx); err != context.Canceled {
+	if _, err := r.ReadMessage(ctx); !errors.Is(err, context.Canceled) {
 		t.Error(err)
 	}
 }
@@ -163,6 +174,43 @@ func testReaderSetRandomOffset(t *testin
 	}
 }
 
+func testReaderSetOffsetAt(t *testing.T, ctx context.Context, r *Reader) {
+	// We make 2 batches of messages here with a brief 2 second pause
+	// to ensure messages 0...9 will be written a few seconds before messages 10...19
+	// We'll then fetch the timestamp for message offset 10 and use that timestamp to set
+	// our reader
+	const N = 10
+	prepareReader(t, ctx, r, makeTestSequence(N)...)
+	time.Sleep(time.Second * 2)
+	prepareReader(t, ctx, r, makeTestSequence(N)...)
+
+	var ts time.Time
+	for i := 0; i < N*2; i++ {
+		m, err := r.ReadMessage(ctx)
+		if err != nil {
+			t.Error("error reading message", err)
+		}
+		// grab the time for the 10th message
+		if i == 10 {
+			ts = m.Time
+		}
+	}
+
+	err := r.SetOffsetAt(ctx, ts)
+	if err != nil {
+		t.Fatal("error setting offset by timestamp", err)
+	}
+
+	m, err := r.ReadMessage(context.Background())
+	if err != nil {
+		t.Fatal("error reading message", err)
+	}
+
+	if m.Offset != 10 {
+		t.Errorf("expected offset of 10, received offset %d", m.Offset)
+	}
+}
+
 func testReaderLag(t *testing.T, ctx context.Context, r *Reader) {
 	const N = 5
 	prepareReader(t, ctx, r, makeTestSequence(N)...)
@@ -203,72 +251,6 @@ func testReaderReadLag(t *testing.T, ctx
 	}
 }
 
-func testReaderStats(t *testing.T, ctx context.Context, r *Reader) {
-	const N = 10
-	prepareReader(t, ctx, r, makeTestSequence(N)...)
-
-	var offset int64
-	var bytes int64
-
-	for i := 0; i != N; i++ {
-		m, err := r.ReadMessage(ctx)
-		if err != nil {
-			t.Error("reading message at offset", offset, "failed:", err)
-			return
-		}
-		offset = m.Offset + 1
-		bytes += int64(len(m.Key) + len(m.Value))
-	}
-
-	stats := r.Stats()
-
-	// First verify that metrics with unpredictable values are not zero.
-	if stats.DialTime == (DurationStats{}) {
-		t.Error("no dial time reported by reader stats")
-	}
-	if stats.ReadTime == (DurationStats{}) {
-		t.Error("no read time reported by reader stats")
-	}
-	if stats.WaitTime == (DurationStats{}) {
-		t.Error("no wait time reported by reader stats")
-	}
-	if len(stats.Topic) == 0 {
-		t.Error("empty topic in reader stats")
-	}
-
-	// Then compare all remaining metrics.
-	expect := ReaderStats{
-		Dials:         1,
-		Fetches:       1,
-		Messages:      10,
-		Bytes:         10,
-		Rebalances:    0,
-		Timeouts:      0,
-		Errors:        0,
-		DialTime:      stats.DialTime,
-		ReadTime:      stats.ReadTime,
-		WaitTime:      stats.WaitTime,
-		FetchSize:     SummaryStats{Avg: 10, Min: 10, Max: 10},
-		FetchBytes:    SummaryStats{Avg: 10, Min: 10, Max: 10},
-		Offset:        10,
-		Lag:           0,
-		MinBytes:      1,
-		MaxBytes:      10000000,
-		MaxWait:       100 * time.Millisecond,
-		QueueLength:   0,
-		QueueCapacity: 100,
-		ClientID:      "",
-		Topic:         stats.Topic,
-		Partition:     "0",
-	}
-
-	if stats != expect {
-		t.Error("bad stats:")
-		t.Log("expected:", expect)
-		t.Log("found:   ", stats)
-	}
-}
-
 func testReaderOutOfRangeGetsCanceled(t *testing.T, ctx context.Context, r *Reader) {
 	prepareReader(t, ctx, r, makeTestSequence(10)...)
 
@@ -283,7 +265,7 @@ func testReaderOutOfRangeGetsCanceled(t
 	}
 
 	_, err := r.ReadMessage(ctx)
-	if err != context.DeadlineExceeded {
+	if !errors.Is(err, context.DeadlineExceeded) {
 		t.Error("bad error:", err)
 	}
 
@@ -295,14 +277,31 @@ func testReaderOutOfRangeGetsCanceled(t
 }
 
 func createTopic(t *testing.T, topic string, partitions int) {
+	t.Helper()
+
+	t.Logf("createTopic(%s, %d)", topic, partitions)
+
 	conn, err := Dial("tcp", "localhost:9092")
 	if err != nil {
-		t.Error("bad conn")
-		return
+		err = fmt.Errorf("createTopic, Dial: %w", err)
+		t.Fatal(err)
 	}
 	defer conn.Close()
 
-	_, err = conn.createTopics(createTopicsRequestV0{
+	controller, err := conn.Controller()
+	if err != nil {
+		err = fmt.Errorf("createTopic, conn.Controller: %w", err)
+		t.Fatal(err)
+	}
+
+	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	conn.SetDeadline(time.Now().Add(10 * time.Second))
+
+	_, err = conn.createTopics(createTopicsRequest{
 		Topics: []createTopicsRequestV0Topic{
 			{
 				Topic:             topic,
@@ -310,22 +309,93 @@ func createTopic(t *testing.T, topic str
 				ReplicationFactor: 1,
 			},
 		},
-		Timeout: int32(30 * time.Second / time.Millisecond),
+		Timeout: milliseconds(5 * time.Second),
 	})
-	switch err {
-	case nil:
-		// ok
-	case TopicAlreadyExists:
-		// ok
-	default:
-		t.Error("bad createTopics", err)
-		t.FailNow()
+	if err != nil {
+		if !errors.Is(err, TopicAlreadyExists) {
+			err = fmt.Errorf("createTopic, conn.createTopics: %w", err)
+			t.Error(err)
+			t.FailNow()
+		}
 	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+	defer cancel()
+
+	waitForTopic(ctx, t, topic)
 }
 
-func TestReaderOnNonZeroPartition(t *testing.T) {
-	t.Parallel()
+// Block until topic exists.
+func waitForTopic(ctx context.Context, t *testing.T, topic string) {
+	t.Helper()
+
+	for {
+		select {
+		case <-ctx.Done():
+			t.Fatalf("reached deadline before verifying topic existence")
+		default:
+		}
+
+		cli := &Client{
+			Addr:    TCP("localhost:9092"),
+			Timeout: 5 * time.Second,
+		}
+
+		response, err := cli.Metadata(ctx, &MetadataRequest{
+			Addr:   cli.Addr,
+			Topics: []string{topic},
+		})
+		if err != nil {
+			t.Fatalf("waitForTopic: error listing topics: %s", err.Error())
+		}
 
+		// Find a topic which has at least 1 partition in the metadata response
+		for _, top := range response.Topics {
+			if top.Name != topic {
+				continue
+			}
+
+			numPartitions := len(top.Partitions)
+			t.Logf("waitForTopic: found topic %q with %d partitions",
+				topic, numPartitions)
+
+			if numPartitions > 0 {
+				return
+			}
+		}
+
+		t.Logf("retrying after 100ms")
+		time.Sleep(100 * time.Millisecond)
+		continue
+	}
+}
+
+func deleteTopic(t *testing.T, topic ...string) {
+	t.Helper()
+	conn, err := Dial("tcp", "localhost:9092")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer conn.Close()
+
+	controller, err := conn.Controller()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	conn.SetDeadline(time.Now().Add(10 * time.Second))
+
+	if err := conn.DeleteTopics(topic...); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReaderOnNonZeroPartition(t *testing.T) {
 	tests := []struct {
 		scenario string
 		function func(*testing.T, context.Context, *Reader)
@@ -343,6 +413,7 @@ func TestReaderOnNonZeroPartition(t *tes
 
 			topic := makeTopic()
 			createTopic(t, topic, 2)
+			defer deleteTopic(t, topic)
 
 			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
 			defer cancel()
@@ -399,7 +470,6 @@ func TestReadTruncatedMessages(t *testin
 	//        include it in CI unit tests.
 	t.Skip()
 
-	t.Parallel()
 	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
 	defer cancel()
 	r := NewReader(ReaderConfig{
@@ -420,9 +490,11 @@ func TestReadTruncatedMessages(t *testin
 }
 
 func makeTestSequence(n int) []Message {
+	base := time.Now()
 	msgs := make([]Message, n)
 	for i := 0; i != n; i++ {
 		msgs[i] = Message{
+			Time:  base.Add(time.Duration(i) * time.Millisecond).Truncate(time.Millisecond),
 			Value: []byte(strconv.Itoa(i)),
 		}
 	}
@@ -430,7 +502,7 @@ func makeTestSequence(n int) []Message {
 }
 
 func prepareReader(t *testing.T, ctx context.Context, r *Reader, msgs ...Message) {
-	var config = r.Config()
+	config := r.Config()
 	var conn *Conn
 	var err error
 
@@ -506,59 +578,79 @@ func BenchmarkReader(b *testing.B) {
 	b.SetBytes(int64(len(benchmarkReaderPayload)))
 }
 
-func TestConsumerGroup(t *testing.T) {
-	t.Parallel()
-
-	tests := []struct {
-		scenario string
-		function func(*testing.T, context.Context, *Reader)
-	}{
-		{
-			scenario: "Close immediately after NewReader",
-			function: testConsumerGroupImmediateClose,
-		},
-
-		{
-			scenario: "Close immediately after NewReader",
-			function: testConsumerGroupSimple,
-		},
+func TestCloseLeavesGroup(t *testing.T) {
+	if os.Getenv("KAFKA_VERSION") == "2.3.1" {
+		// There's a bug in 2.3.1 that causes the MemberMetadata to be in the wrong format and thus
+		// leads to an error when decoding the DescribeGroupsResponse.
+		//
+		// See https://issues.apache.org/jira/browse/KAFKA-9150 for details.
+		t.Skip("Skipping because kafka version is 2.3.1")
 	}
 
-	for _, test := range tests {
-		testFunc := test.function
-		t.Run(test.scenario, func(t *testing.T) {
-			t.Parallel()
+	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+	defer cancel()
 
-			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
-			defer cancel()
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
 
-			topic := makeTopic()
-			createTopic(t, topic, 1)
+	groupID := makeGroupID()
+	r := NewReader(ReaderConfig{
+		Brokers:          []string{"localhost:9092"},
+		Topic:            topic,
+		GroupID:          groupID,
+		MinBytes:         1,
+		MaxBytes:         10e6,
+		MaxWait:          100 * time.Millisecond,
+		RebalanceTimeout: time.Second,
+	})
+	prepareReader(t, ctx, r, Message{Value: []byte("test")})
 
-			r := NewReader(ReaderConfig{
-				Brokers:  []string{"localhost:9092"},
-				Topic:    topic,
-				GroupID:  makeGroupID(),
-				MinBytes: 1,
-				MaxBytes: 10e6,
-				MaxWait:  100 * time.Millisecond,
-			})
-			defer r.Close()
-			testFunc(t, ctx, r)
-		})
+	conn, err := Dial("tcp", r.config.Brokers[0])
+	if err != nil {
+		t.Fatalf("error dialing: %v", err)
 	}
+	defer conn.Close()
 
-	const broker = "localhost:9092"
+	client, shutdown := newLocalClient()
+	defer shutdown()
 
-	topic := makeTopic()
-	createTopic(t, topic, 1)
+	descGroups := func() DescribeGroupsResponse {
+		resp, err := client.DescribeGroups(
+			ctx,
+			&DescribeGroupsRequest{
+				GroupIDs: []string{groupID},
+			},
+		)
+		if err != nil {
+			t.Fatalf("error from describeGroups %v", err)
+		}
+		return *resp
+	}
 
-	r := NewReader(ReaderConfig{
-		Brokers: []string{broker},
-		Topic:   topic,
-		GroupID: makeGroupID(),
-	})
-	r.Close()
+	_, err = r.ReadMessage(ctx)
+	if err != nil {
+		t.Fatalf("our reader never joind its group or couldn't read a message: %v", err)
+	}
+	resp := descGroups()
+	if len(resp.Groups) != 1 {
+		t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
+	}
+	if len(resp.Groups[0].Members) != 1 {
+		t.Fatalf("expected group membership size of %d, but got %d", 1, len(resp.Groups[0].Members))
+	}
+
+	err = r.Close()
+	if err != nil {
+		t.Fatalf("unexpected error closing reader: %s", err.Error())
+	}
+	resp = descGroups()
+	if len(resp.Groups) != 1 {
+		t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
+	}
+	if len(resp.Groups[0].Members) != 0 {
+		t.Fatalf("expected group membership size of %d, but got %d", 0, len(resp.Groups[0].Members))
+	}
 }
 
 func testConsumerGroupImmediateClose(t *testing.T, ctx context.Context, r *Reader) {
@@ -575,7 +667,7 @@ func testConsumerGroupSimple(t *testing.
 
 func TestReaderSetOffsetWhenConsumerGroupsEnabled(t *testing.T) {
 	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
-	if err := r.SetOffset(LastOffset); err != errNotAvailableWithGroup {
+	if err := r.SetOffset(LastOffset); !errors.Is(err, errNotAvailableWithGroup) {
 		t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
 	}
 }
@@ -594,6 +686,19 @@ func TestReaderLagWhenConsumerGroupsEnab
 	}
 }
 
+func TestReaderReadLagReturnsZeroLagWhenConsumerGroupsEnabled(t *testing.T) {
+	r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
+	lag, err := r.ReadLag(context.Background())
+
+	if !errors.Is(err, errNotAvailableWithGroup) {
+		t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
+	}
+
+	if lag != 0 {
+		t.Fatalf("expected 0; got %d", lag)
+	}
+}
+
 func TestReaderPartitionWhenConsumerGroupsEnabled(t *testing.T) {
 	invoke := func() (boom bool) {
 		defer func() {
@@ -612,7 +717,6 @@ func TestReaderPartitionWhenConsumerGrou
 	if !invoke() {
 		t.Fatalf("expected panic; but NewReader worked?!")
 	}
-
 }
 
 func TestExtractTopics(t *testing.T) {
@@ -685,125 +789,7 @@ func TestExtractTopics(t *testing.T) {
 	}
 }
 
-func TestReaderAssignTopicPartitions(t *testing.T) {
-	conn := &MockConn{
-		partitions: []Partition{
-			{
-				Topic: "topic-1",
-				ID:    0,
-			},
-			{
-				Topic: "topic-1",
-				ID:    1,
-			},
-			{
-				Topic: "topic-1",
-				ID:    2,
-			},
-			{
-				Topic: "topic-2",
-				ID:    0,
-			},
-		},
-	}
-
-	newJoinGroupResponseV1 := func(topicsByMemberID map[string][]string) joinGroupResponseV1 {
-		resp := joinGroupResponseV1{
-			GroupProtocol: RoundRobinGroupBalancer{}.ProtocolName(),
-		}
-
-		for memberID, topics := range topicsByMemberID {
-			resp.Members = append(resp.Members, joinGroupResponseMemberV1{
-				MemberID: memberID,
-				MemberMetadata: groupMetadata{
-					Topics: topics,
-				}.bytes(),
-			})
-		}
-
-		return resp
-	}
-
-	testCases := map[string]struct {
-		Members     joinGroupResponseV1
-		Assignments GroupMemberAssignments
-	}{
-		"nil": {
-			Members:     newJoinGroupResponseV1(nil),
-			Assignments: GroupMemberAssignments{},
-		},
-		"one member, one topic": {
-			Members: newJoinGroupResponseV1(map[string][]string{
-				"member-1": {"topic-1"},
-			}),
-			Assignments: GroupMemberAssignments{
-				"member-1": map[string][]int{
-					"topic-1": {0, 1, 2},
-				},
-			},
-		},
-		"one member, two topics": {
-			Members: newJoinGroupResponseV1(map[string][]string{
-				"member-1": {"topic-1", "topic-2"},
-			}),
-			Assignments: GroupMemberAssignments{
-				"member-1": map[string][]int{
-					"topic-1": {0, 1, 2},
-					"topic-2": {0},
-				},
-			},
-		},
-		"two members, one topic": {
-			Members: newJoinGroupResponseV1(map[string][]string{
-				"member-1": {"topic-1"},
-				"member-2": {"topic-1"},
-			}),
-			Assignments: GroupMemberAssignments{
-				"member-1": map[string][]int{
-					"topic-1": {0, 2},
-				},
-				"member-2": map[string][]int{
-					"topic-1": {1},
-				},
-			},
-		},
-		"two members, two unshared topics": {
-			Members: newJoinGroupResponseV1(map[string][]string{
-				"member-1": {"topic-1"},
-				"member-2": {"topic-2"},
-			}),
-			Assignments: GroupMemberAssignments{
-				"member-1": map[string][]int{
-					"topic-1": {0, 1, 2},
-				},
-				"member-2": map[string][]int{
-					"topic-2": {0},
-				},
-			},
-		},
-	}
-
-	for label, tc := range testCases {
-		t.Run(label, func(t *testing.T) {
-			r := &Reader{}
-			r.config.GroupBalancers = []GroupBalancer{
-				RangeGroupBalancer{},
-				RoundRobinGroupBalancer{},
-			}
-			assignments, err := r.assignTopicPartitions(conn, tc.Members)
-			if err != nil {
-				t.Fatalf("bad err: %v", err)
-			}
-			if !reflect.DeepEqual(tc.Assignments, assignments) {
-				t.Errorf("expected %v; got %v", tc.Assignments, assignments)
-			}
-		})
-	}
-}
-
 func TestReaderConsumerGroup(t *testing.T) {
-	t.Parallel()
-
 	tests := []struct {
 		scenario       string
 		partitions     int
@@ -815,7 +801,6 @@ func TestReaderConsumerGroup(t *testing.
 			partitions: 1,
 			function:   testReaderConsumerGroupHandshake,
 		},
-
 		{
 			scenario:   "verify offset committed",
 			partitions: 1,
@@ -858,23 +843,39 @@ func TestReaderConsumerGroup(t *testing.
 			partitions: 3,
 			function:   testReaderConsumerGroupReadContentAcrossPartitions,
 		},
+
+		{
+			scenario:   "Close immediately after NewReader",
+			partitions: 1,
+			function:   testConsumerGroupImmediateClose,
+		},
+
+		{
+			scenario:   "Close immediately after NewReader",
+			partitions: 1,
+			function:   testConsumerGroupSimple,
+		},
 	}
 
 	for _, test := range tests {
 		t.Run(test.scenario, func(t *testing.T) {
+			// It appears that some of the tests depend on all these tests being
+			// run concurrently to pass... this is brittle and should be fixed
+			// at some point.
 			t.Parallel()
 
 			topic := makeTopic()
 			createTopic(t, topic, test.partitions)
+			defer deleteTopic(t, topic)
 
 			groupID := makeGroupID()
 			r := NewReader(ReaderConfig{
 				Brokers:           []string{"localhost:9092"},
 				Topic:             topic,
 				GroupID:           groupID,
-				HeartbeatInterval: time.Second,
+				HeartbeatInterval: 2 * time.Second,
 				CommitInterval:    test.commitInterval,
-				RebalanceTimeout:  8 * time.Second,
+				RebalanceTimeout:  2 * time.Second,
 				RetentionTime:     time.Hour,
 				MinBytes:          1,
 				MaxBytes:          1e6,
@@ -931,13 +932,7 @@ func testReaderConsumerGroupVerifyOffset
 		t.Errorf("bad commit message: %v", err)
 	}
 
-	offsets, err := r.fetchOffsets(map[string][]int32{
-		r.config.Topic: {0},
-	})
-	if err != nil {
-		t.Errorf("bad fetchOffsets: %v", err)
-	}
-
+	offsets := getOffsets(t, r.config)
 	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
 		t.Errorf("expected %v; got %v", expected, offsets)
 	}
@@ -959,20 +954,14 @@ func testReaderConsumerGroupVerifyPeriod
 	if err := r.CommitMessages(ctx, m); err != nil {
 		t.Errorf("bad commit message: %v", err)
 	}
-	if elapsed := time.Now().Sub(started); elapsed > 10*time.Millisecond {
+	if elapsed := time.Since(started); elapsed > 10*time.Millisecond {
 		t.Errorf("background commits should happen nearly instantly")
 	}
 
 	// wait for committer to pick up the commits
 	time.Sleep(r.config.CommitInterval * 3)
 
-	offsets, err := r.fetchOffsets(map[string][]int32{
-		r.config.Topic: {0},
-	})
-	if err != nil {
-		t.Errorf("bad fetchOffsets: %v", err)
-	}
-
+	offsets := getOffsets(t, r.config)
 	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
 		t.Errorf("expected %v; got %v", expected, offsets)
 	}
@@ -1001,13 +990,7 @@ func testReaderConsumerGroupVerifyCommit
 	r2 := NewReader(r.config)
 	defer r2.Close()
 
-	offsets, err := r2.fetchOffsets(map[string][]int32{
-		r.config.Topic: {0},
-	})
-	if err != nil {
-		t.Errorf("bad fetchOffsets: %v", err)
-	}
-
+	offsets := getOffsets(t, r2.config)
 	if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
 		t.Errorf("expected %v; got %v", expected, offsets)
 	}
@@ -1016,13 +999,16 @@ func testReaderConsumerGroupVerifyCommit
 func testReaderConsumerGroupReadContentAcrossPartitions(t *testing.T, ctx context.Context, r *Reader) {
 	const N = 12
 
-	writer := NewWriter(WriterConfig{
-		Brokers:   r.config.Brokers,
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
+	writer := &Writer{
+		Addr:      TCP(r.config.Brokers...),
 		Topic:     r.config.Topic,
-		Dialer:    r.config.Dialer,
 		Balancer:  &RoundRobin{},
 		BatchSize: 1,
-	})
+		Transport: client.Transport,
+	}
 	if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
 		t.Fatalf("bad write messages: %v", err)
 	}
@@ -1053,14 +1039,17 @@ func testReaderConsumerGroupRebalance(t
 		partitions = 2
 	)
 
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
 	// rebalance should result in 12 message in each of the partitions
-	writer := NewWriter(WriterConfig{
-		Brokers:   r.config.Brokers,
+	writer := &Writer{
+		Addr:      TCP(r.config.Brokers...),
 		Topic:     r.config.Topic,
-		Dialer:    r.config.Dialer,
 		Balancer:  &RoundRobin{},
 		BatchSize: 1,
-	})
+		Transport: client.Transport,
+	}
 	if err := writer.WriteMessages(ctx, makeTestSequence(N*partitions)...); err != nil {
 		t.Fatalf("bad write messages: %v", err)
 	}
@@ -1081,8 +1070,8 @@ func testReaderConsumerGroupRebalance(t
 
 func testReaderConsumerGroupRebalanceAcrossTopics(t *testing.T, ctx context.Context, r *Reader) {
 	// create a second reader that shares the groupID, but reads from a different topic
-	topic2 := makeTopic()
-	createTopic(t, topic2, 1)
+	client, topic2, shutdown := newLocalClientAndTopic()
+	defer shutdown()
 
 	r2 := NewReader(ReaderConfig{
 		Brokers:           r.config.Brokers,
@@ -1103,13 +1092,13 @@ func testReaderConsumerGroupRebalanceAcr
 	)
 
 	// write messages across both partitions
-	writer := NewWriter(WriterConfig{
-		Brokers:   r.config.Brokers,
+	writer := &Writer{
+		Addr:      TCP(r.config.Brokers...),
 		Topic:     r.config.Topic,
-		Dialer:    r.config.Dialer,
 		Balancer:  &RoundRobin{},
 		BatchSize: 1,
-	})
+		Transport: client.Transport,
+	}
 	if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
 		t.Fatalf("bad write messages: %v", err)
 	}
@@ -1135,6 +1124,10 @@ func testReaderConsumerGroupRebalanceAcr
 	// of a minute and that seems too long for unit tests.  Also, setting this
 	// to a larger number seems to make the kafka broker unresponsive.
 	// TODO research if there's a way to reduce rebalance time across many partitions
+	// svls: the described behavior is due to the thundering herd of readers
+	//       hitting the rebalance timeout.  introducing the 100ms sleep in the
+	//       loop below in order to give time for the sync group to finish has
+	//       greatly helped, though we still hit the timeout from time to time.
 	const N = 8
 
 	var readers []*Reader
@@ -1142,21 +1135,26 @@ func testReaderConsumerGroupRebalanceAcr
 	for i := 0; i < N-1; i++ {
 		reader := NewReader(r.config)
 		readers = append(readers, reader)
+		time.Sleep(100 * time.Millisecond)
 	}
 	defer func() {
 		for _, r := range readers {
 			r.Close()
+			time.Sleep(100 * time.Millisecond)
 		}
 	}()
 
+	client, shutdown := newLocalClient()
+	defer shutdown()
+
 	// write messages across both partitions
-	writer := NewWriter(WriterConfig{
-		Brokers:   r.config.Brokers,
+	writer := &Writer{
+		Addr:      TCP(r.config.Brokers...),
 		Topic:     r.config.Topic,
-		Dialer:    r.config.Dialer,
 		Balancer:  &RoundRobin{},
 		BatchSize: 1,
-	})
+		Transport: client.Transport,
+	}
 	if err := writer.WriteMessages(ctx, makeTestSequence(N*3)...); err != nil {
 		t.Fatalf("bad write messages: %v", err)
 	}
@@ -1248,21 +1246,76 @@ func TestOffsetStash(t *testing.T) {
 	}
 }
 
-type mockOffsetCommitter struct {
-	invocations int
-	failCount   int
-	err         error
+func TestValidateReader(t *testing.T) {
+	tests := []struct {
+		config       ReaderConfig
+		errorOccured bool
+	}{
+		{config: ReaderConfig{}, errorOccured: true},
+		{config: ReaderConfig{Brokers: []string{"broker1"}}, errorOccured: true},
+		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1"}, errorOccured: false},
+		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: -1}, errorOccured: true},
+		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: -1}, errorOccured: true},
+		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: -1}, errorOccured: true},
+		{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: 6}, errorOccured: false},
+	}
+	for _, test := range tests {
+		err := test.config.Validate()
+		if test.errorOccured && err == nil {
+			t.Fail()
+		}
+		if !test.errorOccured && err != nil {
+			t.Fail()
+		}
+	}
 }
 
-func (m *mockOffsetCommitter) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) {
-	m.invocations++
+func TestCommitLoopImmediateFlushOnGenerationEnd(t *testing.T) {
+	t.Parallel()
+	var committedOffset int64
+	var commitCount int
+	gen := &Generation{
+		conn: mockCoordinator{
+			offsetCommitFunc: func(r offsetCommitRequestV2) (offsetCommitResponseV2, error) {
+				commitCount++
+				committedOffset = r.Topics[0].Partitions[0].Offset
+				return offsetCommitResponseV2{}, nil
+			},
+		},
+		done:     make(chan struct{}),
+		log:      func(func(Logger)) {},
+		logError: func(func(Logger)) {},
+		joined:   make(chan struct{}),
+	}
+
+	// initialize commits so that the commitLoopImmediate select statement blocks
+	r := &Reader{stctx: context.Background(), commits: make(chan commitRequest, 100)}
+
+	for i := 0; i < 100; i++ {
+		cr := commitRequest{
+			commits: []commit{{
+				topic:     "topic",
+				partition: 0,
+				offset:    int64(i) + 1,
+			}},
+			errch: make(chan<- error, 1),
+		}
+		r.commits <- cr
+	}
+
+	gen.Start(func(ctx context.Context) {
+		r.commitLoopImmediate(ctx, gen)
+	})
 
-	if m.failCount > 0 {
-		m.failCount--
-		return offsetCommitResponseV2{}, io.EOF
+	gen.close()
+
+	if committedOffset != 100 {
+		t.Fatalf("expected commited offset to be 100 but got %d", committedOffset)
 	}
 
-	return offsetCommitResponseV2{}, nil
+	if commitCount >= 100 {
+		t.Fatalf("expected a single final commit on generation end got %d", commitCount)
+	}
 }
 
 func TestCommitOffsetsWithRetry(t *testing.T) {
@@ -1289,19 +1342,657 @@ func TestCommitOffsetsWithRetry(t *testi
 
 	for label, test := range tests {
 		t.Run(label, func(t *testing.T) {
-			conn := &mockOffsetCommitter{failCount: test.Fails}
+			count := 0
+			gen := &Generation{
+				conn: mockCoordinator{
+					offsetCommitFunc: func(offsetCommitRequestV2) (offsetCommitResponseV2, error) {
+						count++
+						if count <= test.Fails {
+							return offsetCommitResponseV2{}, io.EOF
+						}
+						return offsetCommitResponseV2{}, nil
+					},
+				},
+				done:     make(chan struct{}),
+				log:      func(func(Logger)) {},
+				logError: func(func(Logger)) {},
+			}
 
 			r := &Reader{stctx: context.Background()}
-			err := r.commitOffsetsWithRetry(conn, offsets, defaultCommitRetries)
+			err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries)
 			switch {
 			case test.HasError && err == nil:
 				t.Error("bad err: expected not nil; got nil")
 			case !test.HasError && err != nil:
 				t.Errorf("bad err: expected nil; got %v", err)
 			}
-			if test.Invocations != conn.invocations {
-				t.Errorf("expected %v retries; got %v", test.Invocations, conn.invocations)
+		})
+	}
+}
+
+// Test that a reader won't continually rebalance when there are more consumers
+// than partitions in a group.
+// https://github.com/segmentio/kafka-go/issues/200
+func TestRebalanceTooManyConsumers(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	conf := ReaderConfig{
+		Brokers: []string{"localhost:9092"},
+		GroupID: makeGroupID(),
+		Topic:   makeTopic(),
+		MaxWait: time.Second,
+	}
+
+	// Create the first reader and wait for it to become the leader.
+	r1 := NewReader(conf)
+
+	// Give the reader some time to setup before reading a message
+	time.Sleep(1 * time.Second)
+	prepareReader(t, ctx, r1, makeTestSequence(1)...)
+
+	_, err := r1.ReadMessage(ctx)
+	if err != nil {
+		t.Fatalf("failed to read message: %v", err)
+	}
+	// Clear the stats from the first rebalance.
+	r1.Stats()
+
+	// Second reader should cause one rebalance for each r1 and r2.
+	r2 := NewReader(conf)
+
+	// Wait for rebalances.
+	time.Sleep(5 * time.Second)
+
+	// Before the fix, r2 would cause continuous rebalances,
+	// as it tried to handshake() repeatedly.
+	rebalances := r1.Stats().Rebalances + r2.Stats().Rebalances
+	if rebalances > 2 {
+		t.Errorf("unexpected rebalances to first reader, got %d", rebalances)
+	}
+}
+
+func TestConsumerGroupWithMissingTopic(t *testing.T) {
+	t.Skip("this test doesn't work when the cluster is configured to auto-create topics")
+
+	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+	defer cancel()
+
+	conf := ReaderConfig{
+		Brokers:                []string{"localhost:9092"},
+		GroupID:                makeGroupID(),
+		Topic:                  makeTopic(),
+		MaxWait:                time.Second,
+		PartitionWatchInterval: 100 * time.Millisecond,
+		WatchPartitionChanges:  true,
+	}
+
+	r := NewReader(conf)
+	defer r.Close()
+
+	recvErr := make(chan error, 1)
+	go func() {
+		_, err := r.ReadMessage(ctx)
+		recvErr <- err
+	}()
+
+	time.Sleep(time.Second)
+	client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
+	defer shutdown()
+
+	w := &Writer{
+		Addr:         TCP(r.config.Brokers...),
+		Topic:        r.config.Topic,
+		BatchTimeout: 10 * time.Millisecond,
+		BatchSize:    1,
+		Transport:    client.Transport,
+	}
+	defer w.Close()
+	if err := w.WriteMessages(ctx, Message{}); err != nil {
+		t.Fatalf("write error: %+v", err)
+	}
+
+	if err := <-recvErr; err != nil {
+		t.Fatalf("read error: %+v", err)
+	}
+
+	nMsgs := r.Stats().Messages
+	if nMsgs != 1 {
+		t.Fatalf("expected to receive one message, but got %d", nMsgs)
+	}
+}
+
+func TestConsumerGroupWithTopic(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	conf := ReaderConfig{
+		Brokers:                []string{"localhost:9092"},
+		GroupID:                makeGroupID(),
+		Topic:                  makeTopic(),
+		MaxWait:                time.Second,
+		PartitionWatchInterval: 100 * time.Millisecond,
+		WatchPartitionChanges:  true,
+		Logger:                 newTestKafkaLogger(t, "Reader:"),
+	}
+
+	r := NewReader(conf)
+	defer r.Close()
+
+	recvErr := make(chan error, len(conf.GroupTopics))
+	go func() {
+		msg, err := r.ReadMessage(ctx)
+		t.Log(msg)
+		recvErr <- err
+	}()
+
+	time.Sleep(conf.MaxWait)
+
+	client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
+	defer shutdown()
+
+	w := &Writer{
+		Addr:         TCP(r.config.Brokers...),
+		Topic:        conf.Topic,
+		BatchTimeout: 10 * time.Millisecond,
+		BatchSize:    1,
+		Transport:    client.Transport,
+		Logger:       newTestKafkaLogger(t, "Writer:"),
+	}
+	defer w.Close()
+	if err := w.WriteMessages(ctx, Message{Value: []byte(conf.Topic)}); err != nil {
+		t.Fatalf("write error: %+v", err)
+	}
+
+	if err := <-recvErr; err != nil {
+		t.Fatalf("read error: %+v", err)
+	}
+
+	nMsgs := r.Stats().Messages
+	if nMsgs != 1 {
+		t.Fatalf("expected to receive 1 message, but got %d", nMsgs)
+	}
+}
+
+func TestConsumerGroupWithGroupTopicsSingle(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	conf := ReaderConfig{
+		Brokers:                []string{"localhost:9092"},
+		GroupID:                makeGroupID(),
+		GroupTopics:            []string{makeTopic()},
+		MaxWait:                time.Second,
+		PartitionWatchInterval: 100 * time.Millisecond,
+		WatchPartitionChanges:  true,
+		Logger:                 newTestKafkaLogger(t, "Reader:"),
+	}
+
+	r := NewReader(conf)
+	defer r.Close()
+
+	recvErr := make(chan error, len(conf.GroupTopics))
+	go func() {
+		msg, err := r.ReadMessage(ctx)
+		t.Log(msg)
+		recvErr <- err
+	}()
+
+	time.Sleep(conf.MaxWait)
+
+	for i, topic := range conf.GroupTopics {
+		client, shutdown := newLocalClientWithTopic(topic, 1)
+		defer shutdown()
+
+		w := &Writer{
+			Addr:         TCP(r.config.Brokers...),
+			Topic:        topic,
+			BatchTimeout: 10 * time.Millisecond,
+			BatchSize:    1,
+			Transport:    client.Transport,
+			Logger:       newTestKafkaLogger(t, fmt.Sprintf("Writer(%d):", i)),
+		}
+		defer w.Close()
+		if err := w.WriteMessages(ctx, Message{Value: []byte(topic)}); err != nil {
+			t.Fatalf("write error: %+v", err)
+		}
+	}
+
+	if err := <-recvErr; err != nil {
+		t.Fatalf("read error: %+v", err)
+	}
+
+	nMsgs := r.Stats().Messages
+	if nMsgs != int64(len(conf.GroupTopics)) {
+		t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
+	}
+}
+
+func TestConsumerGroupWithGroupTopicsMultiple(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+	defer cancel()
+
+	client, shutdown := newLocalClient()
+	defer shutdown()
+	t1 := makeTopic()
+	createTopic(t, t1, 1)
+	defer deleteTopic(t, t1)
+	t2 := makeTopic()
+	createTopic(t, t2, 1)
+	defer deleteTopic(t, t2)
+	conf := ReaderConfig{
+		Brokers:                []string{"localhost:9092"},
+		GroupID:                makeGroupID(),
+		GroupTopics:            []string{t1, t2},
+		MaxWait:                time.Second,
+		PartitionWatchInterval: 100 * time.Millisecond,
+		WatchPartitionChanges:  true,
+		Logger:                 newTestKafkaLogger(t, "Reader:"),
+	}
+
+	r := NewReader(conf)
+
+	w := &Writer{
+		Addr:         TCP(r.config.Brokers...),
+		BatchTimeout: 10 * time.Millisecond,
+		BatchSize:    1,
+		Transport:    client.Transport,
+		Logger:       newTestKafkaLogger(t, "Writer:"),
+	}
+	defer w.Close()
+
+	time.Sleep(time.Second)
+
+	msgs := make([]Message, 0, len(conf.GroupTopics))
+	for _, topic := range conf.GroupTopics {
+		msgs = append(msgs, Message{Topic: topic})
+	}
+	if err := w.WriteMessages(ctx, msgs...); err != nil {
+		t.Logf("write error: %+v", err)
+	}
+
+	wg := new(sync.WaitGroup)
+	wg.Add(len(msgs))
+
+	go func() {
+		wg.Wait()
+		t.Log("closing reader")
+		r.Close()
+	}()
+
+	for {
+		msg, err := r.ReadMessage(ctx)
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				t.Log("reader closed")
+				break
 			}
+
+			t.Fatalf("read error: %+v", err)
+		} else {
+			t.Logf("message read: %+v", msg)
+			wg.Done()
+		}
+	}
+
+	nMsgs := r.Stats().Messages
+	if nMsgs != int64(len(conf.GroupTopics)) {
+		t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
+	}
+}
+
+func getOffsets(t *testing.T, config ReaderConfig) map[int]int64 {
+	// minimal config required to lookup coordinator
+	cg := ConsumerGroup{
+		config: ConsumerGroupConfig{
+			ID:      config.GroupID,
+			Brokers: config.Brokers,
+			Dialer:  config.Dialer,
+		},
+	}
+
+	conn, err := cg.coordinator()
+	if err != nil {
+		t.Errorf("unable to connect to coordinator: %v", err)
+	}
+	defer conn.Close()
+
+	offsets, err := conn.offsetFetch(offsetFetchRequestV1{
+		GroupID: config.GroupID,
+		Topics: []offsetFetchRequestV1Topic{{
+			Topic:      config.Topic,
+			Partitions: []int32{0},
+		}},
+	})
+	if err != nil {
+		t.Errorf("bad fetchOffsets: %v", err)
+	}
+
+	m := map[int]int64{}
+
+	for _, r := range offsets.Responses {
+		if r.Topic == config.Topic {
+			for _, p := range r.PartitionResponses {
+				m[int(p.Partition)] = p.Offset
+			}
+		}
+	}
+
+	return m
+}
+
+const (
+	connTO     = 1 * time.Second
+	connTestTO = 2 * connTO
+)
+
+func TestErrorCannotConnect(t *testing.T) {
+	r := NewReader(ReaderConfig{
+		Brokers:     []string{"localhost:9093"},
+		Dialer:      &Dialer{Timeout: connTO},
+		MaxAttempts: 1,
+		Topic:       makeTopic(),
+	})
+	ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
+	defer cancel()
+
+	_, err := r.FetchMessage(ctx)
+	if err == nil || ctx.Err() != nil {
+		t.Errorf("Reader.FetchMessage must fail when it cannot " +
+			"connect")
+	}
+}
+
+func TestErrorCannotConnectGroupSubscription(t *testing.T) {
+	r := NewReader(ReaderConfig{
+		Brokers:     []string{"localhost:9093"},
+		Dialer:      &Dialer{Timeout: 1 * time.Second},
+		GroupID:     "foobar",
+		MaxAttempts: 1,
+		Topic:       makeTopic(),
+	})
+	ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
+	defer cancel()
+
+	_, err := r.FetchMessage(ctx)
+	if err == nil || ctx.Err() != nil {
+		t.Errorf("Reader.FetchMessage with a group subscription " +
+			"must fail when it cannot connect")
+	}
+}
+
+// Tests that the reader can handle messages where the response is truncated
+// due to reaching MaxBytes.
+//
+// If MaxBytes is too small to fit 1 record then it will never truncate, so
+// we start from a small message size and increase it until we are sure
+// truncation has happened at some point.
+func TestReaderTruncatedResponse(t *testing.T) {
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	readerMaxBytes := 100
+	batchSize := 4
+	maxMsgPadding := 5
+	readContextTimeout := 10 * time.Second
+
+	var msgs []Message
+	// The key of each message
+	n := 0
+	// `i` is the amount of padding per message
+	for i := 0; i < maxMsgPadding; i++ {
+		bb := bytes.Buffer{}
+		for x := 0; x < i; x++ {
+			_, err := bb.WriteRune('0')
+			require.NoError(t, err)
+		}
+		padding := bb.Bytes()
+		// `j` is the number of times the message repeats
+		for j := 0; j < batchSize*4; j++ {
+			msgs = append(msgs, Message{
+				Key:   []byte(fmt.Sprintf("%05d", n)),
+				Value: padding,
+			})
+			n++
+		}
+	}
+
+	wr := NewWriter(WriterConfig{
+		Brokers:   []string{"localhost:9092"},
+		BatchSize: batchSize,
+		Async:     false,
+		Topic:     topic,
+		Balancer:  &LeastBytes{},
+	})
+	err := wr.WriteMessages(context.Background(), msgs...)
+	require.NoError(t, err)
+
+	ctx, cancel := context.WithTimeout(context.Background(), readContextTimeout)
+	defer cancel()
+	r := NewReader(ReaderConfig{
+		Brokers:  []string{"localhost:9092"},
+		Topic:    topic,
+		MinBytes: 1,
+		MaxBytes: readerMaxBytes,
+		// Speed up testing
+		MaxWait: 100 * time.Millisecond,
+	})
+	defer r.Close()
+
+	expectedKeys := map[string]struct{}{}
+	for _, k := range msgs {
+		expectedKeys[string(k.Key)] = struct{}{}
+	}
+	keys := map[string]struct{}{}
+	for {
+		m, err := r.FetchMessage(ctx)
+		require.NoError(t, err)
+		keys[string(m.Key)] = struct{}{}
+
+		t.Logf("got key %s have %d keys expect %d\n", string(m.Key), len(keys), len(expectedKeys))
+		if len(keys) == len(expectedKeys) {
+			require.Equal(t, expectedKeys, keys)
+			return
+		}
+	}
+}
+
+// Tests that the reader can read record batches from log compacted topics
+// where the batch ends with compacted records.
+//
+// This test forces varying sized chunks of duplicated messages along with
+// configuring the topic with a minimal `segment.bytes` in order to
+// guarantee that at least 1 batch can be compacted down to 0 "unread" messages
+// with at least 1 "old" message otherwise the batch is skipped entirely.
+func TestReaderReadCompactedMessage(t *testing.T) {
+	topic := makeTopic()
+	createTopicWithCompaction(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	msgs := makeTestDuplicateSequence()
+
+	writeMessagesForCompactionCheck(t, topic, msgs)
+
+	expectedKeys := map[string]int{}
+	for _, msg := range msgs {
+		expectedKeys[string(msg.Key)] = 1
+	}
+
+	// kafka 2.0.1 is extra slow
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
+	defer cancel()
+	for {
+		success := func() bool {
+			r := NewReader(ReaderConfig{
+				Brokers:  []string{"localhost:9092"},
+				Topic:    topic,
+				MinBytes: 200,
+				MaxBytes: 200,
+				// Speed up testing
+				MaxWait: 100 * time.Millisecond,
+			})
+			defer r.Close()
+
+			keys := map[string]int{}
+			for {
+				m, err := r.FetchMessage(ctx)
+				if err != nil {
+					t.Logf("can't get message from compacted log: %v", err)
+					return false
+				}
+				keys[string(m.Key)]++
+
+				if len(keys) == countKeys(msgs) {
+					t.Logf("got keys: %+v", keys)
+					return reflect.DeepEqual(keys, expectedKeys)
+				}
+			}
+		}()
+		if success {
+			return
+		}
+		select {
+		case <-ctx.Done():
+			t.Fatal(ctx.Err())
+		default:
+		}
+	}
+}
+
+// writeMessagesForCompactionCheck writes messages with specific writer configuration.
+func writeMessagesForCompactionCheck(t *testing.T, topic string, msgs []Message) {
+	t.Helper()
+
+	wr := NewWriter(WriterConfig{
+		Brokers: []string{"localhost:9092"},
+		// Batch size must be large enough to have multiple compacted records
+		// for testing more edge cases.
+		BatchSize: 3,
+		Async:     false,
+		Topic:     topic,
+		Balancer:  &LeastBytes{},
+	})
+	err := wr.WriteMessages(context.Background(), msgs...)
+	require.NoError(t, err)
+}
+
+// makeTestDuplicateSequence creates messages for compacted log testing
+//
+// All keys and values are 4 characters long to tightly control how many
+// messages are per log segment.
+func makeTestDuplicateSequence() []Message {
+	var msgs []Message
+	// `n` is an increasing counter so it is never compacted.
+	n := 0
+	// `i` determines how many compacted records to create
+	for i := 0; i < 5; i++ {
+		// `j` is how many times the current pattern repeats. We repeat because
+		// as long as we have a pattern that is slightly larger/smaller than
+		// the log segment size then if we repeat enough it will eventually
+		// try all configurations.
+		for j := 0; j < 30; j++ {
+			msgs = append(msgs, Message{
+				Key:   []byte(fmt.Sprintf("%04d", n)),
+				Value: []byte(fmt.Sprintf("%04d", n)),
+			})
+			n++
+
+			// This produces the duplicated messages to compact.
+			for k := 0; k < i; k++ {
+				msgs = append(msgs, Message{
+					Key:   []byte("dup_"),
+					Value: []byte("dup_"),
+				})
+			}
+		}
+	}
+
+	// "end markers" to force duplicate message outside of the last segment of
+	// the log so that they can all be compacted.
+	for i := 0; i < 10; i++ {
+		msgs = append(msgs, Message{
+			Key:   []byte(fmt.Sprintf("e-%02d", i)),
+			Value: []byte(fmt.Sprintf("e-%02d", i)),
 		})
 	}
+	return msgs
+}
+
+// countKeys counts unique keys from given Message slice.
+func countKeys(msgs []Message) int {
+	m := make(map[string]struct{})
+	for _, msg := range msgs {
+		m[string(msg.Key)] = struct{}{}
+	}
+	return len(m)
+}
+
+func createTopicWithCompaction(t *testing.T, topic string, partitions int) {
+	t.Helper()
+
+	t.Logf("createTopic(%s, %d)", topic, partitions)
+
+	conn, err := Dial("tcp", "localhost:9092")
+	require.NoError(t, err)
+	defer conn.Close()
+
+	controller, err := conn.Controller()
+	require.NoError(t, err)
+
+	conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+	require.NoError(t, err)
+
+	conn.SetDeadline(time.Now().Add(10 * time.Second))
+
+	err = conn.CreateTopics(TopicConfig{
+		Topic:             topic,
+		NumPartitions:     partitions,
+		ReplicationFactor: 1,
+		ConfigEntries: []ConfigEntry{
+			{
+				ConfigName:  "cleanup.policy",
+				ConfigValue: "compact",
+			},
+			{
+				ConfigName:  "segment.bytes",
+				ConfigValue: "200",
+			},
+		},
+	})
+	if err != nil {
+		if !errors.Is(err, TopicAlreadyExists) {
+			require.NoError(t, err)
+		}
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+	defer cancel()
+	waitForTopic(ctx, t, topic)
+}
+
+// The current behavior of the Reader is to retry OffsetOutOfRange errors
+// indefinitely, which results in programs hanging in the event of a topic being
+// re-created while a consumer is running. To retain backwards-compatibility,
+// ReaderConfig.OffsetOutOfRangeError is being used to instruct the Reader to
+// return an error in this case instead, allowing callers to react.
+func testReaderTopicRecreated(t *testing.T, ctx context.Context, r *Reader) {
+	r.config.OffsetOutOfRangeError = true
+
+	topic := r.config.Topic
+
+	// add 1 message to the topic
+	prepareReader(t, ctx, r, makeTestSequence(1)...)
+
+	// consume the message (moving the offset from 0 -> 1)
+	_, err := r.ReadMessage(ctx)
+	require.NoError(t, err)
+
+	// destroy the topic, then recreate it so the offset now becomes 0
+	deleteTopic(t, topic)
+	createTopic(t, topic, 1)
+
+	// expect an error, since the offset should now be out of range
+	_, err = r.ReadMessage(ctx)
+	require.ErrorIs(t, err, OffsetOutOfRange)
 }
diff -pruN 0.2.1-1.1/record.go 0.4.49+ds1-1/record.go
--- 0.2.1-1.1/record.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/record.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,42 @@
+package kafka
+
+import (
+	"github.com/segmentio/kafka-go/protocol"
+)
+
+// Header is a key/value pair type representing headers set on records.
+type Header = protocol.Header
+
+// Bytes is an interface representing a sequence of bytes. This abstraction
+// makes it possible for programs to inject data into produce requests without
+// having to load in into an intermediary buffer, or read record keys and values
+// from a fetch response directly from internal buffers.
+//
+// Bytes are not safe to use concurrently from multiple goroutines.
+type Bytes = protocol.Bytes
+
+// NewBytes constructs a Bytes value from a byte slice.
+//
+// If b is nil, nil is returned.
+func NewBytes(b []byte) Bytes { return protocol.NewBytes(b) }
+
+// ReadAll reads b into a byte slice.
+func ReadAll(b Bytes) ([]byte, error) { return protocol.ReadAll(b) }
+
+// Record is an interface representing a single kafka record.
+//
+// Record values are not safe to use concurrently from multiple goroutines.
+type Record = protocol.Record
+
+// RecordReader is an interface representing a sequence of records. Record sets
+// are used in both produce and fetch requests to represent the sequence of
+// records that are sent to or receive from kafka brokers.
+//
+// RecordReader values are not safe to use concurrently from multiple goroutines.
+type RecordReader = protocol.RecordReader
+
+// NewRecordReader reconstructs a RecordSet which exposes the sequence of records
+// passed as arguments.
+func NewRecordReader(records ...Record) RecordReader {
+	return protocol.NewRecordReader(records...)
+}
diff -pruN 0.2.1-1.1/recordbatch.go 0.4.49+ds1-1/recordbatch.go
--- 0.2.1-1.1/recordbatch.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/recordbatch.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,108 @@
+package kafka
+
+import (
+	"bytes"
+	"time"
+)
+
+const recordBatchHeaderSize int32 = 0 +
+	8 + // base offset
+	4 + // batch length
+	4 + // partition leader epoch
+	1 + // magic
+	4 + // crc
+	2 + // attributes
+	4 + // last offset delta
+	8 + // first timestamp
+	8 + // max timestamp
+	8 + // producer id
+	2 + // producer epoch
+	4 + // base sequence
+	4 // msg count
+
+func recordBatchSize(msgs ...Message) (size int32) {
+	size = recordBatchHeaderSize
+	baseTime := msgs[0].Time
+
+	for i := range msgs {
+		msg := &msgs[i]
+		msz := recordSize(msg, msg.Time.Sub(baseTime), int64(i))
+		size += int32(msz + varIntLen(int64(msz)))
+	}
+
+	return
+}
+
+func compressRecordBatch(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int16, size int32, err error) {
+	compressed = acquireBuffer()
+	compressor := codec.NewWriter(compressed)
+	wb := &writeBuffer{w: compressor}
+
+	for i, msg := range msgs {
+		wb.writeRecord(0, msgs[0].Time, int64(i), msg)
+	}
+
+	if err = compressor.Close(); err != nil {
+		releaseBuffer(compressed)
+		return
+	}
+
+	attributes = int16(codec.Code())
+	size = recordBatchHeaderSize + int32(compressed.Len())
+	return
+}
+
+type recordBatch struct {
+	// required input parameters
+	codec      CompressionCodec
+	attributes int16
+	msgs       []Message
+
+	// parameters calculated during init
+	compressed *bytes.Buffer
+	size       int32
+}
+
+func newRecordBatch(codec CompressionCodec, msgs ...Message) (r *recordBatch, err error) {
+	r = &recordBatch{
+		codec: codec,
+		msgs:  msgs,
+	}
+	if r.codec == nil {
+		r.size = recordBatchSize(r.msgs...)
+	} else {
+		r.compressed, r.attributes, r.size, err = compressRecordBatch(r.codec, r.msgs...)
+	}
+	return
+}
+
+func (r *recordBatch) writeTo(wb *writeBuffer) {
+	wb.writeInt32(r.size)
+
+	baseTime := r.msgs[0].Time
+	lastTime := r.msgs[len(r.msgs)-1].Time
+	if r.compressed != nil {
+		wb.writeRecordBatch(r.attributes, r.size, len(r.msgs), baseTime, lastTime, func(wb *writeBuffer) {
+			wb.Write(r.compressed.Bytes())
+		})
+		releaseBuffer(r.compressed)
+	} else {
+		wb.writeRecordBatch(r.attributes, r.size, len(r.msgs), baseTime, lastTime, func(wb *writeBuffer) {
+			for i, msg := range r.msgs {
+				wb.writeRecord(0, r.msgs[0].Time, int64(i), msg)
+			}
+		})
+	}
+}
+
+func recordSize(msg *Message, timestampDelta time.Duration, offsetDelta int64) int {
+	return 1 + // attributes
+		varIntLen(int64(milliseconds(timestampDelta))) +
+		varIntLen(offsetDelta) +
+		varBytesLen(msg.Key) +
+		varBytesLen(msg.Value) +
+		varArrayLen(len(msg.Headers), func(i int) int {
+			h := &msg.Headers[i]
+			return varStringLen(h.Key) + varBytesLen(h.Value)
+		})
+}
diff -pruN 0.2.1-1.1/resolver.go 0.4.49+ds1-1/resolver.go
--- 0.2.1-1.1/resolver.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/resolver.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,57 @@
+package kafka
+
+import (
+	"context"
+	"net"
+)
+
+// The Resolver interface is used as an abstraction to provide service discovery
+// of the hosts of a kafka cluster.
+type Resolver interface {
+	// LookupHost looks up the given host using the local resolver.
+	// It returns a slice of that host's addresses.
+	LookupHost(ctx context.Context, host string) (addrs []string, err error)
+}
+
+// BrokerResolver is an interface implemented by types that translate host
+// names into a network address.
+//
+// This resolver is not intended to be a general purpose interface. Instead,
+// it is tailored to the particular needs of the kafka protocol, with the goal
+// being to provide a flexible mechanism for extending broker name resolution
+// while retaining context that is specific to interacting with a kafka cluster.
+//
+// Resolvers must be safe to use from multiple goroutines.
+type BrokerResolver interface {
+	// Returns the IP addresses of the broker passed as argument.
+	LookupBrokerIPAddr(ctx context.Context, broker Broker) ([]net.IPAddr, error)
+}
+
+// NewBrokerResolver constructs a Resolver from r.
+//
+// If r is nil, net.DefaultResolver is used instead.
+func NewBrokerResolver(r *net.Resolver) BrokerResolver {
+	return brokerResolver{r}
+}
+
+type brokerResolver struct {
+	*net.Resolver
+}
+
+func (r brokerResolver) LookupBrokerIPAddr(ctx context.Context, broker Broker) ([]net.IPAddr, error) {
+	ipAddrs, err := r.LookupIPAddr(ctx, broker.Host)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(ipAddrs) == 0 {
+		return nil, &net.DNSError{
+			Err:         "no addresses were returned by the resolver",
+			Name:        broker.Host,
+			IsTemporary: true,
+			IsNotFound:  true,
+		}
+	}
+
+	return ipAddrs, nil
+}
diff -pruN 0.2.1-1.1/resource.go 0.4.49+ds1-1/resource.go
--- 0.2.1-1.1/resource.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/resource.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,123 @@
+package kafka
+
+import (
+	"fmt"
+	"strings"
+)
+
+// https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
+type ResourceType int8
+
+const (
+	ResourceTypeUnknown ResourceType = 0
+	ResourceTypeAny     ResourceType = 1
+	ResourceTypeTopic   ResourceType = 2
+	ResourceTypeGroup   ResourceType = 3
+	// See https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L36
+	ResourceTypeBroker          ResourceType = 4
+	ResourceTypeCluster         ResourceType = 4
+	ResourceTypeTransactionalID ResourceType = 5
+	ResourceTypeDelegationToken ResourceType = 6
+)
+
+func (rt ResourceType) String() string {
+	mapping := map[ResourceType]string{
+		ResourceTypeUnknown: "Unknown",
+		ResourceTypeAny:     "Any",
+		ResourceTypeTopic:   "Topic",
+		ResourceTypeGroup:   "Group",
+		// Note that ResourceTypeBroker and ResourceTypeCluster have the same value.
+		// A map cannot have duplicate values so we just use the same value for both.
+		ResourceTypeCluster:         "Cluster",
+		ResourceTypeTransactionalID: "Transactionalid",
+		ResourceTypeDelegationToken: "Delegationtoken",
+	}
+	s, ok := mapping[rt]
+	if !ok {
+		s = mapping[ResourceTypeUnknown]
+	}
+	return s
+}
+
+func (rt ResourceType) MarshalText() ([]byte, error) {
+	return []byte(rt.String()), nil
+}
+
+func (rt *ResourceType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]ResourceType{
+		"unknown":         ResourceTypeUnknown,
+		"any":             ResourceTypeAny,
+		"topic":           ResourceTypeTopic,
+		"group":           ResourceTypeGroup,
+		"broker":          ResourceTypeBroker,
+		"cluster":         ResourceTypeCluster,
+		"transactionalid": ResourceTypeTransactionalID,
+		"delegationtoken": ResourceTypeDelegationToken,
+	}
+	parsed, ok := mapping[normalized]
+	if !ok {
+		*rt = ResourceTypeUnknown
+		return fmt.Errorf("cannot parse %s as a ResourceType", normalized)
+	}
+	*rt = parsed
+	return nil
+}
+
+// https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
+type PatternType int8
+
+const (
+	// PatternTypeUnknown represents any PatternType which this client cannot
+	// understand.
+	PatternTypeUnknown PatternType = 0
+	// PatternTypeAny matches any resource pattern type.
+	PatternTypeAny PatternType = 1
+	// PatternTypeMatch perform pattern matching.
+	PatternTypeMatch PatternType = 2
+	// PatternTypeLiteral represents a literal name.
+	// A literal name defines the full name of a resource, e.g. topic with name
+	// 'foo', or group with name 'bob'.
+	PatternTypeLiteral PatternType = 3
+	// PatternTypePrefixed represents a prefixed name.
+	// A prefixed name defines a prefix for a resource, e.g. topics with names
+	// that start with 'foo'.
+	PatternTypePrefixed PatternType = 4
+)
+
+func (pt PatternType) String() string {
+	mapping := map[PatternType]string{
+		PatternTypeUnknown:  "Unknown",
+		PatternTypeAny:      "Any",
+		PatternTypeMatch:    "Match",
+		PatternTypeLiteral:  "Literal",
+		PatternTypePrefixed: "Prefixed",
+	}
+	s, ok := mapping[pt]
+	if !ok {
+		s = mapping[PatternTypeUnknown]
+	}
+	return s
+}
+
+func (pt PatternType) MarshalText() ([]byte, error) {
+	return []byte(pt.String()), nil
+}
+
+func (pt *PatternType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]PatternType{
+		"unknown":  PatternTypeUnknown,
+		"any":      PatternTypeAny,
+		"match":    PatternTypeMatch,
+		"literal":  PatternTypeLiteral,
+		"prefixed": PatternTypePrefixed,
+	}
+	parsed, ok := mapping[normalized]
+	if !ok {
+		*pt = PatternTypeUnknown
+		return fmt.Errorf("cannot parse %s as a PatternType", normalized)
+	}
+	*pt = parsed
+	return nil
+}
diff -pruN 0.2.1-1.1/resource_test.go 0.4.49+ds1-1/resource_test.go
--- 0.2.1-1.1/resource_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/resource_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,58 @@
+package kafka
+
+import "testing"
+
+func TestResourceTypeMarshal(t *testing.T) {
+	for i := ResourceTypeUnknown; i <= ResourceTypeDelegationToken; i++ {
+		text, err := i.MarshalText()
+		if err != nil {
+			t.Errorf("couldn't marshal %d to text: %s", i, err)
+		}
+		var got ResourceType
+		err = got.UnmarshalText(text)
+		if err != nil {
+			t.Errorf("couldn't unmarshal %s to ResourceType: %s", text, err)
+		}
+		if got != i {
+			t.Errorf("got %d, want %d", got, i)
+		}
+	}
+}
+
+// Verify that the text version of ResourceTypeBroker is "Cluster".
+// This is added since ResourceTypeBroker and ResourceTypeCluster
+// have the same value.
+func TestResourceTypeBroker(t *testing.T) {
+	text, err := ResourceTypeBroker.MarshalText()
+	if err != nil {
+		t.Errorf("couldn't marshal %d to text: %s", ResourceTypeBroker, err)
+	}
+	if string(text) != "Cluster" {
+		t.Errorf("got %s, want %s", string(text), "Cluster")
+	}
+	var got ResourceType
+	err = got.UnmarshalText(text)
+	if err != nil {
+		t.Errorf("couldn't unmarshal %s to ResourceType: %s", text, err)
+	}
+	if got != ResourceTypeBroker {
+		t.Errorf("got %d, want %d", got, ResourceTypeBroker)
+	}
+}
+
+func TestPatternTypeMarshal(t *testing.T) {
+	for i := PatternTypeUnknown; i <= PatternTypePrefixed; i++ {
+		text, err := i.MarshalText()
+		if err != nil {
+			t.Errorf("couldn't marshal %d to text: %s", i, err)
+		}
+		var got PatternType
+		err = got.UnmarshalText(text)
+		if err != nil {
+			t.Errorf("couldn't unmarshal %s to PatternType: %s", text, err)
+		}
+		if got != i {
+			t.Errorf("got %d, want %d", got, i)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/rungroup.go 0.4.49+ds1-1/rungroup.go
--- 0.2.1-1.1/rungroup.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/rungroup.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,61 +0,0 @@
-package kafka
-
-import (
-	"context"
-	"sync"
-)
-
-// runGroup is a collection of goroutines working together. If any one goroutine
-// stops, then all goroutines will be stopped.
-//
-// A zero runGroup is valid
-type runGroup struct {
-	initOnce sync.Once
-
-	ctx    context.Context
-	cancel context.CancelFunc
-
-	wg sync.WaitGroup
-}
-
-func (r *runGroup) init() {
-	if r.cancel == nil {
-		r.ctx, r.cancel = context.WithCancel(context.Background())
-	}
-}
-
-func (r *runGroup) WithContext(ctx context.Context) *runGroup {
-	ctx, cancel := context.WithCancel(ctx)
-	return &runGroup{
-		ctx:    ctx,
-		cancel: cancel,
-	}
-}
-
-// Wait blocks until all function calls have returned.
-func (r *runGroup) Wait() {
-	r.wg.Wait()
-}
-
-// Stop stops the goroutines and waits for them to complete
-func (r *runGroup) Stop() {
-	r.initOnce.Do(r.init)
-	r.cancel()
-	r.Wait()
-}
-
-// Go calls the given function in a new goroutine.
-//
-// The first call to return a non-nil error cancels the group; its error will be
-// returned by Wait.
-func (r *runGroup) Go(f func(stop <-chan struct{})) {
-	r.initOnce.Do(r.init)
-
-	r.wg.Add(1)
-	go func() {
-		defer r.wg.Done()
-		defer r.cancel()
-
-		f(r.ctx.Done())
-	}()
-}
diff -pruN 0.2.1-1.1/rungroup_test.go 0.4.49+ds1-1/rungroup_test.go
--- 0.2.1-1.1/rungroup_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/rungroup_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,61 +0,0 @@
-package kafka
-
-import (
-	"context"
-	"testing"
-	"time"
-)
-
-func TestRunGroup(t *testing.T) {
-	t.Run("Wait returns on empty group", func(t *testing.T) {
-		rg := &runGroup{}
-		rg.Wait()
-	})
-
-	t.Run("Stop returns on empty group", func(t *testing.T) {
-		rg := &runGroup{}
-		rg.Stop()
-	})
-
-	t.Run("Stop cancels running tasks", func(t *testing.T) {
-		rg := &runGroup{}
-		rg.Go(func(stop <-chan struct{}) {
-			<-stop
-		})
-		rg.Stop()
-	})
-
-	t.Run("Honors parent context", func(t *testing.T) {
-		now := time.Now()
-		timeout := time.Millisecond * 100
-
-		ctx, cancel := context.WithTimeout(context.Background(), timeout)
-		defer cancel()
-
-		rg := &runGroup{}
-		rg = rg.WithContext(ctx)
-		rg.Go(func(stop <-chan struct{}) {
-			<-stop
-		})
-		rg.Wait()
-
-		elapsed := time.Now().Sub(now)
-		if elapsed < timeout {
-			t.Errorf("expected elapsed > %v; got %v", timeout, elapsed)
-		}
-	})
-
-	t.Run("Any death kills all; one for all and all for one", func(t *testing.T) {
-		rg := &runGroup{}
-		rg.Go(func(stop <-chan struct{}) {
-			<-stop
-		})
-		rg.Go(func(stop <-chan struct{}) {
-			<-stop
-		})
-		rg.Go(func(stop <-chan struct{}) {
-			// return immediately
-		})
-		rg.Wait()
-	})
-}
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam/go.mod 0.4.49+ds1-1/sasl/aws_msk_iam/go.mod
--- 0.2.1-1.1/sasl/aws_msk_iam/go.mod	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam/go.mod	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,8 @@
+module github.com/segmentio/kafka-go/sasl/aws_msk_iam
+
+go 1.15
+
+require (
+	github.com/aws/aws-sdk-go v1.41.3
+	github.com/segmentio/kafka-go v0.4.28
+)
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam/go.sum 0.4.49+ds1-1/sasl/aws_msk_iam/go.sum
--- 0.2.1-1.1/sasl/aws_msk_iam/go.sum	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam/go.sum	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,60 @@
+github.com/aws/aws-sdk-go v1.41.3 h1:deglLZ1jjHdhkd6Rbad1MZM4gL+1pfnTfjuFk6CGJFM=
+github.com/aws/aws-sdk-go v1.41.3/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
+github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
+github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/segmentio/kafka-go v0.4.28 h1:ATYbyenAlsoFxnV+VpIJMF87bvRuRsX7fezHNfpwkdM=
+github.com/segmentio/kafka-go v0.4.28/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284 h1:rlLehGeYg6jfoyz/eDqDU1iRXLKfR42nnNh57ytKEWo=
+golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam/msk_iam.go 0.4.49+ds1-1/sasl/aws_msk_iam/msk_iam.go
--- 0.2.1-1.1/sasl/aws_msk_iam/msk_iam.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam/msk_iam.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,124 @@
+package aws_msk_iam
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"runtime"
+	"strings"
+	"time"
+
+	sigv4 "github.com/aws/aws-sdk-go/aws/signer/v4"
+	"github.com/segmentio/kafka-go/sasl"
+)
+
+const (
+	// These constants come from https://github.com/aws/aws-msk-iam-auth#details and
+	// https://github.com/aws/aws-msk-iam-auth/blob/main/src/main/java/software/amazon/msk/auth/iam/internals/AWS4SignedPayloadGenerator.java.
+	signVersion      = "2020_10_22"
+	signService      = "kafka-cluster"
+	signAction       = "kafka-cluster:Connect"
+	signVersionKey   = "version"
+	signHostKey      = "host"
+	signUserAgentKey = "user-agent"
+	signActionKey    = "action"
+	queryActionKey   = "Action"
+)
+
+var signUserAgent = fmt.Sprintf("kafka-go/sasl/aws_msk_iam/%s", runtime.Version())
+
+// Mechanism implements sasl.Mechanism for the AWS_MSK_IAM mechanism, based on the official java implementation:
+// https://github.com/aws/aws-msk-iam-auth
+type Mechanism struct {
+	// The sigv4.Signer to use when signing the request. Required.
+	Signer *sigv4.Signer
+	// The region where the msk cluster is hosted, e.g. "us-east-1". Required.
+	Region string
+	// The time the request is planned for. Optional, defaults to time.Now() at time of authentication.
+	SignTime time.Time
+	// The duration for which the presigned request is active. Optional, defaults to 5 minutes.
+	Expiry time.Duration
+}
+
+func (m *Mechanism) Name() string {
+	return "AWS_MSK_IAM"
+}
+
+// Start produces the authentication values required for AWS_MSK_IAM. It produces the following json as a byte array,
+// making use of the aws-sdk to produce the signed output.
+// 	{
+// 	  "version" : "2020_10_22",
+// 	  "host" : "<broker host>",
+// 	  "user-agent": "<user agent string from the client>",
+// 	  "action": "kafka-cluster:Connect",
+// 	  "x-amz-algorithm" : "<algorithm>",
+// 	  "x-amz-credential" : "<clientAWSAccessKeyID>/<date in yyyyMMdd format>/<region>/kafka-cluster/aws4_request",
+// 	  "x-amz-date" : "<timestamp in yyyyMMdd'T'HHmmss'Z' format>",
+// 	  "x-amz-security-token" : "<clientAWSSessionToken if any>",
+// 	  "x-amz-signedheaders" : "host",
+// 	  "x-amz-expires" : "<expiration in seconds>",
+// 	  "x-amz-signature" : "<AWS SigV4 signature computed by the client>"
+// 	}
+func (m *Mechanism) Start(ctx context.Context) (sess sasl.StateMachine, ir []byte, err error) {
+	saslMeta := sasl.MetadataFromContext(ctx)
+	if saslMeta == nil {
+		return nil, nil, errors.New("missing sasl metadata")
+	}
+
+	query := url.Values{
+		queryActionKey: {signAction},
+	}
+
+	signUrl := url.URL{
+		Scheme:   "kafka",
+		Host:     saslMeta.Host,
+		Path:     "/",
+		RawQuery: query.Encode(),
+	}
+
+	req, err := http.NewRequest("GET", signUrl.String(), nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	signTime := m.SignTime
+	if signTime.IsZero() {
+		signTime = time.Now()
+	}
+
+	expiry := m.Expiry
+	if expiry == 0 {
+		expiry = 5 * time.Minute
+	}
+
+	header, err := m.Signer.Presign(req, nil, signService, m.Region, expiry, signTime)
+	if err != nil {
+		return nil, nil, err
+	}
+	signedMap := map[string]string{
+		signVersionKey:   signVersion,
+		signHostKey:      signUrl.Host,
+		signUserAgentKey: signUserAgent,
+		signActionKey:    signAction,
+	}
+	// The protocol requires lowercase keys.
+	for key, vals := range header {
+		signedMap[strings.ToLower(key)] = vals[0]
+	}
+	for key, vals := range req.URL.Query() {
+		signedMap[strings.ToLower(key)] = vals[0]
+	}
+
+	signedJson, err := json.Marshal(signedMap)
+	return m, signedJson, err
+}
+
+func (m *Mechanism) Next(ctx context.Context, challenge []byte) (bool, []byte, error) {
+	// After the initial step, the authentication is complete
+	// kafka will return error if it rejected the credentials, so we'll only
+	// arrive here on success.
+	return true, nil, nil
+}
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam/msk_iam_test.go 0.4.49+ds1-1/sasl/aws_msk_iam/msk_iam_test.go
--- 0.2.1-1.1/sasl/aws_msk_iam/msk_iam_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam/msk_iam_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,105 @@
+package aws_msk_iam
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/sasl"
+
+	"github.com/aws/aws-sdk-go/aws/credentials"
+	sigv4 "github.com/aws/aws-sdk-go/aws/signer/v4"
+)
+
+const (
+	accessKeyId     = "ACCESS_KEY"
+	secretAccessKey = "SECRET_KEY"
+)
+
+// using a fixed time allows the signature to be verifiable in a test
+var signTime = time.Date(2021, 10, 14, 13, 5, 0, 0, time.UTC)
+
+func TestAwsMskIamMechanism(t *testing.T) {
+	tests := []struct {
+		description string
+		ctx         func() context.Context
+		shouldFail  bool
+	}{
+		{
+			description: "with metadata",
+			ctx: func() context.Context {
+				return sasl.WithMetadata(context.Background(), &sasl.Metadata{
+					Host: "localhost",
+					Port: 9092,
+				})
+			},
+		},
+		{
+			description: "without metadata",
+			ctx: func() context.Context {
+				return context.Background()
+			},
+			shouldFail: true,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.description, func(t *testing.T) {
+			ctx := tt.ctx()
+
+			creds := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
+			mskMechanism := &Mechanism{
+				Signer:   sigv4.NewSigner(creds),
+				Region:   "us-east-1",
+				SignTime: signTime,
+			}
+
+			sess, auth, err := mskMechanism.Start(ctx)
+			if tt.shouldFail { // if error is expected
+				if err == nil { // but we don't find one
+					t.Fatal("error expected")
+				} else { // but we do find one
+					return // return early since the remaining assertions are irrelevant
+				}
+			} else { // if error is not expected (typical)
+				if err != nil { // but we do find one
+					t.Fatal(err)
+				}
+			}
+
+			if sess != mskMechanism {
+				t.Error(
+					"Unexpected session",
+					"expected", mskMechanism,
+					"got", sess,
+				)
+			}
+
+			expectedMap := map[string]string{
+				"version":             "2020_10_22",
+				"action":              "kafka-cluster:Connect",
+				"host":                "localhost",
+				"user-agent":          signUserAgent,
+				"x-amz-algorithm":     "AWS4-HMAC-SHA256",
+				"x-amz-credential":    "ACCESS_KEY/20211014/us-east-1/kafka-cluster/aws4_request",
+				"x-amz-date":          "20211014T130500Z",
+				"x-amz-expires":       "300",
+				"x-amz-signedheaders": "host",
+				"x-amz-signature":     "6b8d25f9b45b9c7db9da855a49112d80379224153a27fd279c305a5b7940d1a7",
+			}
+			expectedAuth, err := json.Marshal(expectedMap)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			if !bytes.Equal(expectedAuth, auth) {
+				t.Error("Unexpected authentication",
+					"expected", expectedAuth,
+					"got", auth,
+				)
+			}
+		})
+	}
+}
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam_v2/README.md 0.4.49+ds1-1/sasl/aws_msk_iam_v2/README.md
--- 0.2.1-1.1/sasl/aws_msk_iam_v2/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam_v2/README.md	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,15 @@
+# AWS MSK IAM V2
+
+This extension provides a capability to get authenticated with [AWS Managed Apache Kafka](https://aws.amazon.com/msk/)
+through AWS IAM.
+
+## How to use
+
+This module is an extension for MSK users and thus this is isolated from `kafka-go` module.
+You can add this module to your dependency by running the command below.
+
+```shell
+go get github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2
+```
+
+Please find the sample code in [example_test.go](./example_test.go), you can use the `Mechanism` for SASL authentication of `Reader` and `Writer`.
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam_v2/example_test.go 0.4.49+ds1-1/sasl/aws_msk_iam_v2/example_test.go
--- 0.2.1-1.1/sasl/aws_msk_iam_v2/example_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam_v2/example_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,30 @@
+package aws_msk_iam_v2_test
+
+import (
+	"context"
+	"crypto/tls"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/config"
+	"github.com/segmentio/kafka-go"
+	"github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2"
+)
+
+func main() {
+	cfg, err := config.LoadDefaultConfig(context.TODO())
+	if err != nil {
+		panic(err)
+	}
+	mechanism := aws_msk_iam_v2.NewMechanism(cfg)
+	_ = kafka.ReaderConfig{
+		Brokers:     []string{"https://localhost"},
+		GroupID:     "some-consumer-group",
+		GroupTopics: []string{"some-topic"},
+		Dialer: &kafka.Dialer{
+			Timeout:       10 * time.Second,
+			DualStack:     true,
+			SASLMechanism: mechanism,
+			TLS:           &tls.Config{},
+		},
+	}
+}
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam_v2/go.mod 0.4.49+ds1-1/sasl/aws_msk_iam_v2/go.mod
--- 0.2.1-1.1/sasl/aws_msk_iam_v2/go.mod	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam_v2/go.mod	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,11 @@
+module github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2
+
+go 1.15
+
+require (
+	github.com/aws/aws-sdk-go-v2 v1.16.12
+	github.com/aws/aws-sdk-go-v2/config v1.17.2
+	github.com/aws/aws-sdk-go-v2/credentials v1.12.15
+	github.com/segmentio/kafka-go v0.4.34
+	github.com/stretchr/testify v1.8.0
+)
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam_v2/go.sum 0.4.49+ds1-1/sasl/aws_msk_iam_v2/go.sum
--- 0.2.1-1.1/sasl/aws_msk_iam_v2/go.sum	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam_v2/go.sum	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,69 @@
+github.com/aws/aws-sdk-go-v2 v1.16.12 h1:wbMYa2PlFysFx2GLIQojr6FJV5+OWCM/BwyHXARxETA=
+github.com/aws/aws-sdk-go-v2 v1.16.12/go.mod h1:C+Ym0ag2LIghJbXhfXZ0YEEp49rBWowxKzJLUoob0ts=
+github.com/aws/aws-sdk-go-v2/config v1.17.2 h1:V96WPd2a1H/MXGZjk4zto+KpYnwZI2kdIdy/cI8kYnQ=
+github.com/aws/aws-sdk-go-v2/config v1.17.2/go.mod h1:jumS/AMwul4WaG8vyXsF6kUndG9zndR+yfYBwl4i9ds=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.15 h1:6DONxG9cR3pAuISj1Irh5u2SRqCfIJwyHNyDDes7SZw=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.15/go.mod h1:41zTC6U/78fUD7ZCa5NymTJANDjfqySg5YEAYVFl2Ic=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.13 h1:+uferi8SUDZtMloCDt24Zenyy/i71C/ua5mjUCpbpN0=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.13/go.mod h1:y0eXmsNBFIVjUE8ZBjES8myOHlMsXDz7qGT93+MVdjk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.19 h1:gC5mudiFrWGhzcdoWj1iCGUfrzCpQG0MQIQf0CXFFQQ=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.19/go.mod h1:llxE6bwUZhuCas0K7qGiu5OgMis3N7kdWtFSxoHmJ7E=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.13 h1:qezY57na06d6kSE7uuB0N7XEflu914AXx/hg2L8Ykcw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.13/go.mod h1:lB12mkZqCSo5PsdBFLNqc2M/OOYgNAy8UtaktyuWvE8=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.20 h1:GvszACAU8GSV3+Tant5GutW6smY8WavrP8ZuRS9Ku4Q=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.20/go.mod h1:bfTcsThj5a9P5pIGRy0QudJ8k4+issxXX+O6Djnd5Cs=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.13 h1:ObfthqDyhe7rMAOa7pqft6974VHIk8BAJB7kYdoIfTA=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.13/go.mod h1:V390DK4MQxLpDdXxFqizyz8KUxuWImkW/xzgXMz0yyk=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.18 h1:gTn1a/FbcOXK5LQS88dD5k+PKwyjVvhAEEwyN4c6eW8=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.18/go.mod h1:ytmEi5+qwcSNcV2pVA8PIb1DnKT/0Bu/K4nfJHwoM6c=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.1 h1:p48IfndYbRk3iDsoQAmVXdCKEM5+7Y50JAPikjwk8gI=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.1/go.mod h1:NY+G+8PW0ISyJ7/6t5mgOe6qpJiwZa9Jix05WPscJjg=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.14 h1:7kxso8VZLQ86Jg27QRBw6fjrQhQ8CMNMZ7SB0w7RQiA=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.14/go.mod h1:Y+BUV19q3OmQVqNUlbZ40zVi3NM6Biuxwkx/qdSD/CY=
+github.com/aws/smithy-go v1.13.0 h1:YfyEmSJLo7fAv8FbuDK4R8F9aAmi9DZ88Zb/KJJmUl0=
+github.com/aws/smithy-go v1.13.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
+github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/segmentio/kafka-go v0.4.34 h1:Dm6YlLMiVSiwwav20KY0AoY63s661FXevwJ3CVHUERo=
+github.com/segmentio/kafka-go v0.4.34/go.mod h1:GAjxBQJdQMB5zfNA21AhpaqOB2Mu+w3De4ni3Gbm8y0=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw=
+github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4=
+github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220706163947-c90051bbdb60 h1:8NSylCMxLW4JvserAndSgFL7aPli6A68yf0bYFTcWCM=
+golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam_v2/msk_iam.go 0.4.49+ds1-1/sasl/aws_msk_iam_v2/msk_iam.go
--- 0.2.1-1.1/sasl/aws_msk_iam_v2/msk_iam.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam_v2/msk_iam.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,180 @@
+package aws_msk_iam_v2
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"net/http"
+	"net/url"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+	"github.com/segmentio/kafka-go/sasl"
+)
+
+const (
+	// These constants come from https://github.com/aws/aws-msk-iam-auth#details and
+	// https://github.com/aws/aws-msk-iam-auth/blob/main/src/main/java/software/amazon/msk/auth/iam/internals/AWS4SignedPayloadGenerator.java.
+	signAction       = "kafka-cluster:Connect"
+	signPayload      = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" // the hex encoded SHA-256 of an empty string
+	signService      = "kafka-cluster"
+	signVersion      = "2020_10_22"
+	signActionKey    = "action"
+	signHostKey      = "host"
+	signUserAgentKey = "user-agent"
+	signVersionKey   = "version"
+	queryActionKey   = "Action"
+	queryExpiryKey   = "X-Amz-Expires"
+)
+
+var signUserAgent = "kafka-go/sasl/aws_msk_iam_v2/" + runtime.Version()
+
+// Mechanism implements sasl.Mechanism for the AWS_MSK_IAM mechanism, based on the official java implementation:
+// https://github.com/aws/aws-msk-iam-auth
+type Mechanism struct {
+	// The sigv4.Signer of aws-sdk-go-v2 to use when signing the request. Required.
+	Signer *signer.Signer
+	// The aws.Config.Credentials or config.CredentialsProvider of aws-sdk-go-v2. Required.
+	Credentials aws.CredentialsProvider
+	// The region where the msk cluster is hosted, e.g. "us-east-1". Required.
+	Region string
+	// The time the request is planned for. Optional, defaults to time.Now() at time of authentication.
+	SignTime time.Time
+	// The duration for which the presigned request is active. Optional, defaults to 5 minutes.
+	Expiry time.Duration
+}
+
+func (m *Mechanism) Name() string {
+	return "AWS_MSK_IAM"
+}
+
+func (m *Mechanism) Next(ctx context.Context, challenge []byte) (bool, []byte, error) {
+	// After the initial step, the authentication is complete
+	// kafka will return error if it rejected the credentials, so we'll only
+	// arrive here on success.
+	return true, nil, nil
+}
+
+// Start produces the authentication values required for AWS_MSK_IAM. It produces the following json as a byte array,
+// making use of the aws-sdk to produce the signed output.
+//
+//	{
+//	  "version" : "2020_10_22",
+//	  "host" : "<broker host>",
+//	  "user-agent": "<user agent string from the client>",
+//	  "action": "kafka-cluster:Connect",
+//	  "x-amz-algorithm" : "<algorithm>",
+//	  "x-amz-credential" : "<clientAWSAccessKeyID>/<date in yyyyMMdd format>/<region>/kafka-cluster/aws4_request",
+//	  "x-amz-date" : "<timestamp in yyyyMMdd'T'HHmmss'Z' format>",
+//	  "x-amz-security-token" : "<clientAWSSessionToken if any>",
+//	  "x-amz-signedheaders" : "host",
+//	  "x-amz-expires" : "<expiration in seconds>",
+//	  "x-amz-signature" : "<AWS SigV4 signature computed by the client>"
+//	}
+func (m *Mechanism) Start(ctx context.Context) (sess sasl.StateMachine, ir []byte, err error) {
+	signedMap, err := m.preSign(ctx)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	signedJson, err := json.Marshal(signedMap)
+	return m, signedJson, err
+}
+
+// preSign produces the authentication values required for AWS_MSK_IAM.
+func (m *Mechanism) preSign(ctx context.Context) (map[string]string, error) {
+	req, err := buildReq(ctx, defaultExpiry(m.Expiry))
+	if err != nil {
+		return nil, err
+	}
+
+	creds, err := m.Credentials.Retrieve(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	signedUrl, header, err := m.Signer.PresignHTTP(ctx, creds, req, signPayload, signService, m.Region, defaultSignTime(m.SignTime))
+	if err != nil {
+		return nil, err
+	}
+
+	u, err := url.Parse(signedUrl)
+	if err != nil {
+		return nil, err
+	}
+	return buildSignedMap(u, header), nil
+}
+
+// buildReq builds http.Request for aws PreSign.
+func buildReq(ctx context.Context, expiry time.Duration) (*http.Request, error) {
+	query := url.Values{
+		queryActionKey: {signAction},
+		queryExpiryKey: {strconv.FormatInt(int64(expiry/time.Second), 10)},
+	}
+	saslMeta := sasl.MetadataFromContext(ctx)
+	if saslMeta == nil {
+		return nil, errors.New("missing sasl metadata")
+	}
+
+	signUrl := url.URL{
+		Scheme:   "kafka",
+		Host:     saslMeta.Host,
+		Path:     "/",
+		RawQuery: query.Encode(),
+	}
+
+	req, err := http.NewRequest(http.MethodGet, signUrl.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return req, nil
+}
+
+// buildSignedMap builds signed string map which will be used to authenticate with MSK.
+func buildSignedMap(u *url.URL, header http.Header) map[string]string {
+	signedMap := map[string]string{
+		signVersionKey:   signVersion,
+		signHostKey:      u.Host,
+		signUserAgentKey: signUserAgent,
+		signActionKey:    signAction,
+	}
+	// The protocol requires lowercase keys.
+	for key, vals := range header {
+		signedMap[strings.ToLower(key)] = vals[0]
+	}
+	for key, vals := range u.Query() {
+		signedMap[strings.ToLower(key)] = vals[0]
+	}
+
+	return signedMap
+}
+
+// defaultExpiry set default expiration time if user doesn't define Mechanism.Expiry.
+func defaultExpiry(v time.Duration) time.Duration {
+	if v == 0 {
+		return 5 * time.Minute
+	}
+	return v
+}
+
+// defaultSignTime set default sign time if user doesn't define Mechanism.SignTime.
+func defaultSignTime(v time.Time) time.Time {
+	if v.IsZero() {
+		return time.Now()
+	}
+	return v
+}
+
+// NewMechanism provides
+func NewMechanism(awsCfg aws.Config) *Mechanism {
+	return &Mechanism{
+		Signer:      signer.NewSigner(),
+		Credentials: awsCfg.Credentials,
+		Region:      awsCfg.Region,
+	}
+}
diff -pruN 0.2.1-1.1/sasl/aws_msk_iam_v2/msk_iam_test.go 0.4.49+ds1-1/sasl/aws_msk_iam_v2/msk_iam_test.go
--- 0.2.1-1.1/sasl/aws_msk_iam_v2/msk_iam_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/aws_msk_iam_v2/msk_iam_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,163 @@
+package aws_msk_iam_v2
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"testing"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/segmentio/kafka-go/sasl"
+	"github.com/stretchr/testify/assert"
+
+	signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+	"github.com/aws/aws-sdk-go-v2/credentials"
+)
+
+const (
+	accessKeyId     = "ACCESS_KEY"
+	secretAccessKey = "SECRET_KEY"
+)
+
+// using a fixed time allows the signature to be verifiable in a test
+var signTime = time.Date(2021, 10, 14, 13, 5, 0, 0, time.UTC)
+
+func TestAwsMskIamMechanism(t *testing.T) {
+	creds := credentials.NewStaticCredentialsProvider(accessKeyId, secretAccessKey, "")
+	ctxWithMetadata := func() context.Context {
+		return sasl.WithMetadata(context.Background(), &sasl.Metadata{
+			Host: "localhost",
+			Port: 9092,
+		})
+	}
+
+	tests := []struct {
+		description string
+		ctx         func() context.Context
+		shouldFail  bool
+	}{
+		{
+			description: "with metadata",
+			ctx:         ctxWithMetadata,
+		},
+		{
+			description: "without metadata",
+			ctx: func() context.Context {
+				return context.Background()
+			},
+			shouldFail: true,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.description, func(t *testing.T) {
+			ctx := tt.ctx()
+
+			mskMechanism := &Mechanism{
+				Signer:      signer.NewSigner(),
+				Credentials: creds,
+				Region:      "us-east-1",
+				SignTime:    signTime,
+			}
+			sess, auth, err := mskMechanism.Start(ctx)
+			if tt.shouldFail { // if error is expected
+				if err == nil { // but we don't find one
+					t.Fatal("error expected")
+				} else { // but we do find one
+					return // return early since the remaining assertions are irrelevant
+				}
+			} else { // if error is not expected (typical)
+				if err != nil { // but we do find one
+					t.Fatal(err)
+				}
+			}
+
+			if sess != mskMechanism {
+				t.Error(
+					"Unexpected session",
+					"expected", mskMechanism,
+					"got", sess,
+				)
+			}
+
+			expectedMap := map[string]string{
+				"version":             "2020_10_22",
+				"action":              "kafka-cluster:Connect",
+				"host":                "localhost",
+				"user-agent":          signUserAgent,
+				"x-amz-algorithm":     "AWS4-HMAC-SHA256",
+				"x-amz-credential":    "ACCESS_KEY/20211014/us-east-1/kafka-cluster/aws4_request",
+				"x-amz-date":          "20211014T130500Z",
+				"x-amz-expires":       "300",
+				"x-amz-signedheaders": "host",
+				"x-amz-signature":     "6b8d25f9b45b9c7db9da855a49112d80379224153a27fd279c305a5b7940d1a7",
+			}
+			expectedAuth, err := json.Marshal(expectedMap)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			if !bytes.Equal(expectedAuth, auth) {
+				t.Error("Unexpected authentication",
+					"expected", expectedAuth,
+					"got", auth,
+				)
+			}
+		})
+	}
+}
+
+func TestDefaultExpiry(t *testing.T) {
+	expiry := time.Second * 5
+	testCases := map[string]struct {
+		Expiry time.Duration
+	}{
+		"with default":    {Expiry: expiry},
+		"without default": {},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := defaultExpiry(testCase.Expiry)
+			if testCase.Expiry == 0 {
+				assert.Equal(t, time.Minute*5, actual)
+			} else {
+				assert.Equal(t, expiry, actual)
+			}
+
+		})
+	}
+}
+
+func TestDefaultSignTime(t *testing.T) {
+	testCases := map[string]struct {
+		SignTime time.Time
+	}{
+		"with default":    {SignTime: signTime},
+		"without default": {},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := defaultSignTime(testCase.SignTime)
+			if testCase.SignTime.IsZero() {
+				assert.True(t, actual.After(signTime))
+			} else {
+				assert.Equal(t, signTime, actual)
+			}
+		})
+	}
+}
+
+func TestNewMechanism(t *testing.T) {
+	region := "us-east-1"
+	creds := credentials.StaticCredentialsProvider{}
+	awsCfg := aws.Config{
+		Region:      region,
+		Credentials: creds,
+	}
+	m := NewMechanism(awsCfg)
+	assert.Equal(t, m.Region, region)
+	assert.Equal(t, m.Credentials, creds)
+}
diff -pruN 0.2.1-1.1/sasl/plain/plain.go 0.4.49+ds1-1/sasl/plain/plain.go
--- 0.2.1-1.1/sasl/plain/plain.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/plain/plain.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,30 @@
+package plain
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/segmentio/kafka-go/sasl"
+)
+
+// Mechanism implements the PLAIN mechanism and passes the credentials in clear
+// text.
+type Mechanism struct {
+	Username string
+	Password string
+}
+
+func (Mechanism) Name() string {
+	return "PLAIN"
+}
+
+func (m Mechanism) Start(ctx context.Context) (sasl.StateMachine, []byte, error) {
+	// Mechanism is stateless, so it can also implement sasl.Session
+	return m, []byte(fmt.Sprintf("\x00%s\x00%s", m.Username, m.Password)), nil
+}
+
+func (m Mechanism) Next(ctx context.Context, challenge []byte) (bool, []byte, error) {
+	// kafka will return error if it rejected the credentials, so we'd only
+	// arrive here on success.
+	return true, nil, nil
+}
diff -pruN 0.2.1-1.1/sasl/sasl.go 0.4.49+ds1-1/sasl/sasl.go
--- 0.2.1-1.1/sasl/sasl.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/sasl.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,65 @@
+package sasl
+
+import "context"
+
+type ctxKey struct{}
+
+// Mechanism implements the SASL state machine for a particular mode of
+// authentication.  It is used by the kafka.Dialer to perform the SASL
+// handshake.
+//
+// A Mechanism must be re-usable and safe for concurrent access by multiple
+// goroutines.
+type Mechanism interface {
+	// Name returns the identifier for this SASL mechanism.  This string will be
+	// passed to the SASL handshake request and much match one of the mechanisms
+	// supported by Kafka.
+	Name() string
+
+	// Start begins SASL authentication. It returns an authentication state
+	// machine and "initial response" data (if required by the selected
+	// mechanism). A non-nil error causes the client to abort the authentication
+	// attempt.
+	//
+	// A nil ir value is different from a zero-length value. The nil value
+	// indicates that the selected mechanism does not use an initial response,
+	// while a zero-length value indicates an empty initial response, which must
+	// be sent to the server.
+	Start(ctx context.Context) (sess StateMachine, ir []byte, err error)
+}
+
+// StateMachine implements the SASL challenge/response flow for a single SASL
+// handshake.  A StateMachine will be created by the Mechanism per connection,
+// so it does not need to be safe for concurrent access by multiple goroutines.
+//
+// Once the StateMachine is created by the Mechanism, the caller loops by
+// passing the server's response into Next and then sending Next's returned
+// bytes to the server.  Eventually either Next will indicate that the
+// authentication has been successfully completed via the done return value, or
+// it will indicate that the authentication failed by returning a non-nil error.
+type StateMachine interface {
+	// Next continues challenge-response authentication. A non-nil error
+	// indicates that the client should abort the authentication attempt.  If
+	// the client has been successfully authenticated, then the done return
+	// value will be true.
+	Next(ctx context.Context, challenge []byte) (done bool, response []byte, err error)
+}
+
+// Metadata contains additional data for performing SASL authentication.
+type Metadata struct {
+	// Host is the address of the broker the authentication will be
+	// performed on.
+	Host string
+	Port int
+}
+
+// WithMetadata returns a copy of the context with associated Metadata.
+func WithMetadata(ctx context.Context, m *Metadata) context.Context {
+	return context.WithValue(ctx, ctxKey{}, m)
+}
+
+// MetadataFromContext retrieves the Metadata from the context.
+func MetadataFromContext(ctx context.Context) *Metadata {
+	m, _ := ctx.Value(ctxKey{}).(*Metadata)
+	return m
+}
diff -pruN 0.2.1-1.1/sasl/sasl_test.go 0.4.49+ds1-1/sasl/sasl_test.go
--- 0.2.1-1.1/sasl/sasl_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/sasl_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,104 @@
+package sasl_test
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go"
+	"github.com/segmentio/kafka-go/sasl"
+	"github.com/segmentio/kafka-go/sasl/plain"
+	"github.com/segmentio/kafka-go/sasl/scram"
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+const (
+	saslTestConnect = "localhost:9093" // connect to sasl listener
+	saslTestTopic   = "test-writer-0"  // this topic is guaranteed to exist.
+)
+
+func TestSASL(t *testing.T) {
+	scramUsers := map[scram.Algorithm]string{scram.SHA256: "adminscram", scram.SHA512: "adminscram"}
+	// kafka 4.0.0 test environment supports only different users for different scram algorithms.
+	if ktesting.KafkaIsAtLeast("4.0.0") {
+		scramUsers = map[scram.Algorithm]string{scram.SHA256: "adminscram256", scram.SHA512: "adminscram512"}
+	}
+	tests := []struct {
+		valid    func() sasl.Mechanism
+		invalid  func() sasl.Mechanism
+		minKafka string
+	}{
+		{
+			valid: func() sasl.Mechanism {
+				return plain.Mechanism{
+					Username: "adminplain",
+					Password: "admin-secret",
+				}
+			},
+			invalid: func() sasl.Mechanism {
+				return plain.Mechanism{
+					Username: "adminplain",
+					Password: "badpassword",
+				}
+			},
+		},
+		{
+			valid: func() sasl.Mechanism {
+				mech, _ := scram.Mechanism(scram.SHA256, scramUsers[scram.SHA256], "admin-secret-256")
+				return mech
+			},
+			invalid: func() sasl.Mechanism {
+				mech, _ := scram.Mechanism(scram.SHA256, scramUsers[scram.SHA256], "badpassword")
+				return mech
+			},
+			minKafka: "0.10.2.0",
+		},
+		{
+			valid: func() sasl.Mechanism {
+				mech, _ := scram.Mechanism(scram.SHA512, scramUsers[scram.SHA512], "admin-secret-512")
+				return mech
+			},
+			invalid: func() sasl.Mechanism {
+				mech, _ := scram.Mechanism(scram.SHA512, scramUsers[scram.SHA512], "badpassword")
+				return mech
+			},
+			minKafka: "0.10.2.0",
+		},
+	}
+
+	for _, tt := range tests {
+		mech := tt.valid()
+		if !ktesting.KafkaIsAtLeast(tt.minKafka) {
+			t.Skip("requires min kafka version " + tt.minKafka)
+		}
+
+		t.Run(mech.Name()+" success", func(t *testing.T) {
+			testConnect(t, tt.valid(), true)
+		})
+		t.Run(mech.Name()+" failure", func(t *testing.T) {
+			testConnect(t, tt.invalid(), false)
+		})
+		t.Run(mech.Name()+" is reusable", func(t *testing.T) {
+			mech := tt.valid()
+			testConnect(t, mech, true)
+			testConnect(t, mech, true)
+			testConnect(t, mech, true)
+		})
+
+	}
+}
+
+func testConnect(t *testing.T, mechanism sasl.Mechanism, success bool) {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	d := kafka.Dialer{
+		SASLMechanism: mechanism,
+	}
+	_, err := d.DialLeader(ctx, "tcp", saslTestConnect, saslTestTopic, 0)
+	if success && err != nil {
+		t.Errorf("should have logged in correctly, got err: %v", err)
+	} else if !success && err == nil {
+		t.Errorf("should not have logged in correctly")
+	}
+}
diff -pruN 0.2.1-1.1/sasl/scram/scram.go 0.4.49+ds1-1/sasl/scram/scram.go
--- 0.2.1-1.1/sasl/scram/scram.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/sasl/scram/scram.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,91 @@
+package scram
+
+import (
+	"context"
+	"crypto/sha256"
+	"crypto/sha512"
+	"hash"
+
+	"github.com/segmentio/kafka-go/sasl"
+	"github.com/xdg-go/scram"
+)
+
+// Algorithm determines the hash function used by SCRAM to protect the user's
+// credentials.
+type Algorithm interface {
+	// Name returns the algorithm's name, e.g. "SCRAM-SHA-256"
+	Name() string
+
+	// Hash returns a new hash.Hash.
+	Hash() hash.Hash
+}
+
+type sha256Algo struct{}
+
+func (sha256Algo) Name() string {
+	return "SCRAM-SHA-256"
+}
+
+func (sha256Algo) Hash() hash.Hash {
+	return sha256.New()
+}
+
+type sha512Algo struct{}
+
+func (sha512Algo) Name() string {
+	return "SCRAM-SHA-512"
+}
+
+func (sha512Algo) Hash() hash.Hash {
+	return sha512.New()
+}
+
+var (
+	SHA256 Algorithm = sha256Algo{}
+	SHA512 Algorithm = sha512Algo{}
+)
+
+type mechanism struct {
+	algo   Algorithm
+	client *scram.Client
+}
+
+type session struct {
+	convo *scram.ClientConversation
+}
+
+// Mechanism returns a new sasl.Mechanism that will use SCRAM with the provided
+// Algorithm to securely transmit the provided credentials to Kafka.
+//
+// SCRAM-SHA-256 and SCRAM-SHA-512 were added to Kafka in 0.10.2.0.  These
+// mechanisms will not work with older versions.
+func Mechanism(algo Algorithm, username, password string) (sasl.Mechanism, error) {
+	hashGen := scram.HashGeneratorFcn(algo.Hash)
+	client, err := hashGen.NewClient(username, password, "")
+	if err != nil {
+		return nil, err
+	}
+
+	return &mechanism{
+		algo:   algo,
+		client: client,
+	}, nil
+}
+
+func (m *mechanism) Name() string {
+	return m.algo.Name()
+}
+
+func (m *mechanism) Start(ctx context.Context) (sasl.StateMachine, []byte, error) {
+	convo := m.client.NewConversation()
+	str, err := convo.Step("")
+	if err != nil {
+		return nil, nil, err
+	}
+	return &session{convo: convo}, []byte(str), nil
+}
+
+func (s *session) Next(ctx context.Context, challenge []byte) (bool, []byte, error) {
+	str, err := s.convo.Step(string(challenge))
+	return s.convo.Done(), []byte(str), err
+}
diff -pruN 0.2.1-1.1/saslauthenticate.go 0.4.49+ds1-1/saslauthenticate.go
--- 0.2.1-1.1/saslauthenticate.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/saslauthenticate.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,54 @@
+package kafka
+
+import (
+	"bufio"
+)
+
+type saslAuthenticateRequestV0 struct {
+	// Data holds the SASL payload
+	Data []byte
+}
+
+func (t saslAuthenticateRequestV0) size() int32 {
+	return sizeofBytes(t.Data)
+}
+
+func (t *saslAuthenticateRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
+	return readBytes(r, sz, &t.Data)
+}
+
+func (t saslAuthenticateRequestV0) writeTo(wb *writeBuffer) {
+	wb.writeBytes(t.Data)
+}
+
+type saslAuthenticateResponseV0 struct {
+	// ErrorCode holds response error code
+	ErrorCode int16
+
+	ErrorMessage string
+
+	Data []byte
+}
+
+func (t saslAuthenticateResponseV0) size() int32 {
+	return sizeofInt16(t.ErrorCode) + sizeofString(t.ErrorMessage) + sizeofBytes(t.Data)
+}
+
+func (t saslAuthenticateResponseV0) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.ErrorCode)
+	wb.writeString(t.ErrorMessage)
+	wb.writeBytes(t.Data)
+}
+
+func (t *saslAuthenticateResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
+	if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil {
+		return
+	}
+	if remain, err = readString(r, remain, &t.ErrorMessage); err != nil {
+		return
+	}
+	if remain, err = readBytes(r, remain, &t.Data); err != nil {
+		return
+	}
+	return
+}
diff -pruN 0.2.1-1.1/saslauthenticate_test.go 0.4.49+ds1-1/saslauthenticate_test.go
--- 0.2.1-1.1/saslauthenticate_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/saslauthenticate_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,60 @@
+package kafka
+
+import (
+	"bufio"
+	"bytes"
+	"reflect"
+	"testing"
+)
+
+func TestSASLAuthenticateRequestV0(t *testing.T) {
+	item := saslAuthenticateRequestV0{
+		Data: []byte("\x00user\x00pass"),
+	}
+
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
+	item.writeTo(w)
+
+	var found saslAuthenticateRequestV0
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
+	if err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+	if remain != 0 {
+		t.Errorf("expected 0 remain, got %v", remain)
+		t.FailNow()
+	}
+	if !reflect.DeepEqual(item, found) {
+		t.Error("expected item and found to be the same")
+		t.FailNow()
+	}
+}
+
+func TestSASLAuthenticateResponseV0(t *testing.T) {
+	item := saslAuthenticateResponseV0{
+		ErrorCode:    2,
+		ErrorMessage: "Message",
+		Data:         []byte("bytes"),
+	}
+
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
+	item.writeTo(w)
+
+	var found saslAuthenticateResponseV0
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
+	if err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+	if remain != 0 {
+		t.Errorf("expected 0 remain, got %v", remain)
+		t.FailNow()
+	}
+	if !reflect.DeepEqual(item, found) {
+		t.Error("expected item and found to be the same")
+		t.FailNow()
+	}
+}
diff -pruN 0.2.1-1.1/saslhandshake.go 0.4.49+ds1-1/saslhandshake.go
--- 0.2.1-1.1/saslhandshake.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/saslhandshake.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,53 @@
+package kafka
+
+import (
+	"bufio"
+)
+
+// saslHandshakeRequestV0 implements the format for V0 and V1 SASL
+// requests (they are identical).
+type saslHandshakeRequestV0 struct {
+	// Mechanism holds the SASL Mechanism chosen by the client.
+	Mechanism string
+}
+
+func (t saslHandshakeRequestV0) size() int32 {
+	return sizeofString(t.Mechanism)
+}
+
+func (t *saslHandshakeRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
+	return readString(r, sz, &t.Mechanism)
+}
+
+func (t saslHandshakeRequestV0) writeTo(wb *writeBuffer) {
+	wb.writeString(t.Mechanism)
+}
+
+// saslHandshakeResponseV0 implements the format for V0 and V1 SASL
+// responses (they are identical).
+type saslHandshakeResponseV0 struct {
+	// ErrorCode holds response error code
+	ErrorCode int16
+
+	// Array of mechanisms enabled in the server
+	EnabledMechanisms []string
+}
+
+func (t saslHandshakeResponseV0) size() int32 {
+	return sizeofInt16(t.ErrorCode) + sizeofStringArray(t.EnabledMechanisms)
+}
+
+func (t saslHandshakeResponseV0) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.ErrorCode)
+	wb.writeStringArray(t.EnabledMechanisms)
+}
+
+func (t *saslHandshakeResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
+	if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil {
+		return
+	}
+	if remain, err = readStringArray(r, remain, &t.EnabledMechanisms); err != nil {
+		return
+	}
+	return
+}
diff -pruN 0.2.1-1.1/saslhandshake_test.go 0.4.49+ds1-1/saslhandshake_test.go
--- 0.2.1-1.1/saslhandshake_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/saslhandshake_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,59 @@
+package kafka
+
+import (
+	"bufio"
+	"bytes"
+	"reflect"
+	"testing"
+)
+
+func TestSASLHandshakeRequestV0(t *testing.T) {
+	item := saslHandshakeRequestV0{
+		Mechanism: "SCRAM-SHA-512",
+	}
+
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
+	item.writeTo(w)
+
+	var found saslHandshakeRequestV0
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
+	if err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+	if remain != 0 {
+		t.Errorf("expected 0 remain, got %v", remain)
+		t.FailNow()
+	}
+	if !reflect.DeepEqual(item, found) {
+		t.Error("expected item and found to be the same")
+		t.FailNow()
+	}
+}
+
+func TestSASLHandshakeResponseV0(t *testing.T) {
+	item := saslHandshakeResponseV0{
+		ErrorCode:         2,
+		EnabledMechanisms: []string{"PLAIN", "SCRAM-SHA-512"},
+	}
+
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
+	item.writeTo(w)
+
+	var found saslHandshakeResponseV0
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
+	if err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+	if remain != 0 {
+		t.Errorf("expected 0 remain, got %v", remain)
+		t.FailNow()
+	}
+	if !reflect.DeepEqual(item, found) {
+		t.Error("expected item and found to be the same")
+		t.FailNow()
+	}
+}
diff -pruN 0.2.1-1.1/scripts/wait-for-kafka.sh 0.4.49+ds1-1/scripts/wait-for-kafka.sh
--- 0.2.1-1.1/scripts/wait-for-kafka.sh	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/scripts/wait-for-kafka.sh	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,20 @@
+#/bin/bash
+
+COUNTER=0; 
+echo foo | nc localhost 9092
+STATUS=$?
+ATTEMPTS=60
+until [ ${STATUS} -eq 0 ] || [ "$COUNTER" -ge "${ATTEMPTS}" ];
+do
+    let COUNTER=$COUNTER+1;
+    sleep 1;
+    echo "[$COUNTER] waiting for 9092 port to be open";
+    echo foo | nc localhost 9092
+    STATUS=$?
+done
+
+if [ "${COUNTER}" -gt "${ATTEMPTS}" ];
+then
+    echo "Kafka is not running, failing"
+    exit 1
+fi
\ No newline at end of file
diff -pruN 0.2.1-1.1/sizeof.go 0.4.49+ds1-1/sizeof.go
--- 0.2.1-1.1/sizeof.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/sizeof.go	2025-08-21 19:15:53.000000000 +0000
@@ -28,10 +28,6 @@ func sizeof(a interface{}) int32 {
 	panic(fmt.Sprintf("unsupported type: %T", a))
 }
 
-func sizeofInt8(_ int8) int32 {
-	return 1
-}
-
 func sizeofInt16(_ int16) int32 {
 	return 2
 }
@@ -48,8 +44,11 @@ func sizeofString(s string) int32 {
 	return 2 + int32(len(s))
 }
 
-func sizeofBool(_ bool) int32 {
-	return 1
+func sizeofNullableString(s *string) int32 {
+	if s == nil {
+		return 2
+	}
+	return sizeofString(*s)
 }
 
 func sizeofBytes(b []byte) int32 {
diff -pruN 0.2.1-1.1/snappy/snappy.go 0.4.49+ds1-1/snappy/snappy.go
--- 0.2.1-1.1/snappy/snappy.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/snappy/snappy.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,68 +1,24 @@
+// Package snappy does nothing, it's kept for backward compatibility to avoid
+// breaking the majority of programs that imported it to install the compression
+// codec, which is now always included.
 package snappy
 
-import (
-	"bytes"
-	"encoding/binary"
+import "github.com/segmentio/kafka-go/compress/snappy"
 
-	"github.com/golang/snappy"
-	"github.com/segmentio/kafka-go"
-)
-
-func init() {
-	kafka.RegisterCompressionCodec(func() kafka.CompressionCodec {
-		return NewCompressionCodec()
-	})
-}
+type CompressionCodec = snappy.Codec
 
-type CompressionCodec struct{}
-
-const Code = 2
-
-func NewCompressionCodec() CompressionCodec {
-	return CompressionCodec{}
-}
+type Framing = snappy.Framing
 
-// Code implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Code() int8 {
-	return Code
-}
-
-// Encode implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Encode(src []byte) ([]byte, error) {
-	// NOTE : passing a nil dst means snappy will allocate it.
-	return snappy.Encode(nil, src), nil
-}
+const (
+	Code     = 2
+	Framed   = snappy.Framed
+	Unframed = snappy.Unframed
+)
 
-// Decode implements the kafka.CompressionCodec interface.
-func (c CompressionCodec) Decode(src []byte) ([]byte, error) {
-	return decode(src)
+func NewCompressionCodec() *CompressionCodec {
+	return NewCompressionCodecFraming(Framed)
 }
 
-var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
-
-// From github.com/eapache/go-xerial-snappy
-func decode(src []byte) ([]byte, error) {
-	if !bytes.Equal(src[:8], xerialHeader) {
-		return snappy.Decode(nil, src)
-	}
-
-	var (
-		pos   = uint32(16)
-		max   = uint32(len(src))
-		dst   = make([]byte, 0, len(src))
-		chunk []byte
-		err   error
-	)
-	for pos < max {
-		size := binary.BigEndian.Uint32(src[pos : pos+4])
-		pos += 4
-
-		chunk, err = snappy.Decode(chunk, src[pos:pos+size])
-		if err != nil {
-			return nil, err
-		}
-		pos += size
-		dst = append(dst, chunk...)
-	}
-	return dst, nil
+func NewCompressionCodecFraming(framing Framing) *CompressionCodec {
+	return &CompressionCodec{Framing: framing}
 }
diff -pruN 0.2.1-1.1/stats.go 0.4.49+ds1-1/stats.go
--- 0.2.1-1.1/stats.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/stats.go	2025-08-21 19:15:53.000000000 +0000
@@ -6,19 +6,21 @@ import (
 )
 
 // SummaryStats is a data structure that carries a summary of observed values.
-// The average, minimum, and maximum are reported.
 type SummaryStats struct {
-	Avg int64 `metric:"avg" type:"gauge"`
-	Min int64 `metric:"min" type:"gauge"`
-	Max int64 `metric:"max" type:"gauge"`
+	Avg   int64 `metric:"avg" type:"gauge"`
+	Min   int64 `metric:"min" type:"gauge"`
+	Max   int64 `metric:"max" type:"gauge"`
+	Count int64 `metric:"count" type:"counter"`
+	Sum   int64 `metric:"sum" type:"counter"`
 }
 
-// DurationStats is a data structure that carries a summary of observed duration
-// values. The average, minimum, and maximum are reported.
+// DurationStats is a data structure that carries a summary of observed duration values.
 type DurationStats struct {
-	Avg time.Duration `metric:"avg" type:"gauge"`
-	Min time.Duration `metric:"min" type:"gauge"`
-	Max time.Duration `metric:"max" type:"gauge"`
+	Avg   time.Duration `metric:"avg" type:"gauge"`
+	Min   time.Duration `metric:"min" type:"gauge"`
+	Max   time.Duration `metric:"max" type:"gauge"`
+	Count int64         `metric:"count" type:"counter"`
+	Sum   time.Duration `metric:"sum" type:"counter"`
 }
 
 // counter is an atomic incrementing counter which gets reset on snapshot.
@@ -36,10 +38,7 @@ func (c *counter) observe(v int64) {
 }
 
 func (c *counter) snapshot() int64 {
-	p := c.ptr()
-	v := atomic.LoadInt64(p)
-	atomic.AddInt64(p, -v)
-	return v
+	return atomic.SwapInt64(c.ptr(), 0)
 }
 
 // gauge is an atomic integer that may be set to any arbitrary value, the value
@@ -170,17 +169,21 @@ func (s *summary) snapshot() SummaryStat
 	}
 
 	return SummaryStats{
-		Avg: avg,
-		Min: min,
-		Max: max,
+		Avg:   avg,
+		Min:   min,
+		Max:   max,
+		Count: count,
+		Sum:   sum,
 	}
 }
 
 func (s *summary) snapshotDuration() DurationStats {
 	summary := s.snapshot()
 	return DurationStats{
-		Avg: time.Duration(summary.Avg),
-		Min: time.Duration(summary.Min),
-		Max: time.Duration(summary.Max),
+		Avg:   time.Duration(summary.Avg),
+		Min:   time.Duration(summary.Min),
+		Max:   time.Duration(summary.Max),
+		Count: summary.Count,
+		Sum:   time.Duration(summary.Sum),
 	}
 }
diff -pruN 0.2.1-1.1/syncgroup.go 0.4.49+ds1-1/syncgroup.go
--- 0.2.1-1.1/syncgroup.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/syncgroup.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,8 +3,157 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/consumer"
+	"github.com/segmentio/kafka-go/protocol/syncgroup"
 )
 
+// SyncGroupRequest is the request structure for the SyncGroup function.
+type SyncGroupRequest struct {
+	// Address of the kafka broker to sent he request to.
+	Addr net.Addr
+
+	// GroupID of the group to sync.
+	GroupID string
+
+	// The generation of the group.
+	GenerationID int
+
+	// The member ID assigned by the group.
+	MemberID string
+
+	// The unique identifier for the consumer instance.
+	GroupInstanceID string
+
+	// The name for the class of protocols implemented by the group being joined.
+	ProtocolType string
+
+	// The group protocol name.
+	ProtocolName string
+
+	// The group member assignments.
+	Assignments []SyncGroupRequestAssignment
+}
+
+// SyncGroupRequestAssignment represents an assignement for a goroup memeber.
+type SyncGroupRequestAssignment struct {
+	// The ID of the member to assign.
+	MemberID string
+
+	// The member assignment.
+	Assignment GroupProtocolAssignment
+}
+
+// SyncGroupResponse is the response structure for the SyncGroup function.
+type SyncGroupResponse struct {
+	// An error that may have occurred when attempting to sync the group.
+	//
+	// The errors contain the kafka error code. Programs may use the standard
+	// errors.Is function to test the error against kafka error codes.
+	Error error
+
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// The group protocol type.
+	ProtocolType string
+
+	// The group protocol name.
+	ProtocolName string
+
+	// The member assignment.
+	Assignment GroupProtocolAssignment
+}
+
+// GroupProtocolAssignment represents an assignment of topics and partitions for a group memeber.
+type GroupProtocolAssignment struct {
+	// The topics and partitions assigned to the group memeber.
+	AssignedPartitions map[string][]int
+
+	// UserData for the assignemnt.
+	UserData []byte
+}
+
+// SyncGroup sends a sync group request to the coordinator and returns the response.
+func (c *Client) SyncGroup(ctx context.Context, req *SyncGroupRequest) (*SyncGroupResponse, error) {
+	syncGroup := syncgroup.Request{
+		GroupID:         req.GroupID,
+		GenerationID:    int32(req.GenerationID),
+		MemberID:        req.MemberID,
+		GroupInstanceID: req.GroupInstanceID,
+		ProtocolType:    req.ProtocolType,
+		ProtocolName:    req.ProtocolName,
+		Assignments:     make([]syncgroup.RequestAssignment, 0, len(req.Assignments)),
+	}
+
+	for _, assignment := range req.Assignments {
+		assign := consumer.Assignment{
+			Version:            consumer.MaxVersionSupported,
+			AssignedPartitions: make([]consumer.TopicPartition, 0, len(assignment.Assignment.AssignedPartitions)),
+			UserData:           assignment.Assignment.UserData,
+		}
+
+		for topic, partitions := range assignment.Assignment.AssignedPartitions {
+			tp := consumer.TopicPartition{
+				Topic:      topic,
+				Partitions: make([]int32, 0, len(partitions)),
+			}
+			for _, partition := range partitions {
+				tp.Partitions = append(tp.Partitions, int32(partition))
+			}
+			assign.AssignedPartitions = append(assign.AssignedPartitions, tp)
+		}
+
+		assignBytes, err := protocol.Marshal(consumer.MaxVersionSupported, assign)
+		if err != nil {
+			return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err)
+		}
+
+		syncGroup.Assignments = append(syncGroup.Assignments, syncgroup.RequestAssignment{
+			MemberID:   assignment.MemberID,
+			Assignment: assignBytes,
+		})
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, &syncGroup)
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err)
+	}
+
+	r := m.(*syncgroup.Response)
+
+	var assignment consumer.Assignment
+	err = protocol.Unmarshal(r.Assignments, consumer.MaxVersionSupported, &assignment)
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err)
+	}
+
+	res := &SyncGroupResponse{
+		Throttle:     makeDuration(r.ThrottleTimeMS),
+		Error:        makeError(r.ErrorCode, ""),
+		ProtocolType: r.ProtocolType,
+		ProtocolName: r.ProtocolName,
+		Assignment: GroupProtocolAssignment{
+			AssignedPartitions: make(map[string][]int, len(assignment.AssignedPartitions)),
+			UserData:           assignment.UserData,
+		},
+	}
+	partitions := map[string][]int{}
+	for _, topicPartition := range assignment.AssignedPartitions {
+		for _, partition := range topicPartition.Partitions {
+			partitions[topicPartition.Topic] = append(partitions[topicPartition.Topic], int(partition))
+		}
+	}
+	res.Assignment.AssignedPartitions = partitions
+
+	return res, nil
+}
+
 type groupAssignment struct {
 	Version  int16
 	Topics   map[string][]int32
@@ -21,16 +170,16 @@ func (t groupAssignment) size() int32 {
 	return sz + sizeofBytes(t.UserData)
 }
 
-func (t groupAssignment) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.Version)
-	writeInt32(w, int32(len(t.Topics)))
+func (t groupAssignment) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.Version)
+	wb.writeInt32(int32(len(t.Topics)))
 
 	for topic, partitions := range t.Topics {
-		writeString(w, topic)
-		writeInt32Array(w, partitions)
+		wb.writeString(topic)
+		wb.writeInt32Array(partitions)
 	}
 
-	writeBytes(w, t.UserData)
+	wb.writeBytes(t.UserData)
 }
 
 func (t *groupAssignment) readFrom(r *bufio.Reader, size int) (remain int, err error) {
@@ -57,9 +206,7 @@ func (t *groupAssignment) readFrom(r *bu
 
 func (t groupAssignment) bytes() []byte {
 	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
-	t.writeTo(w)
-	w.Flush()
+	t.writeTo(&writeBuffer{w: buf})
 	return buf.Bytes()
 }
 
@@ -78,9 +225,9 @@ func (t syncGroupRequestGroupAssignmentV
 		sizeofBytes(t.MemberAssignments)
 }
 
-func (t syncGroupRequestGroupAssignmentV0) writeTo(w *bufio.Writer) {
-	writeString(w, t.MemberID)
-	writeBytes(w, t.MemberAssignments)
+func (t syncGroupRequestGroupAssignmentV0) writeTo(wb *writeBuffer) {
+	wb.writeString(t.MemberID)
+	wb.writeBytes(t.MemberAssignments)
 }
 
 type syncGroupRequestV0 struct {
@@ -103,11 +250,11 @@ func (t syncGroupRequestV0) size() int32
 		sizeofArray(len(t.GroupAssignments), func(i int) int32 { return t.GroupAssignments[i].size() })
 }
 
-func (t syncGroupRequestV0) writeTo(w *bufio.Writer) {
-	writeString(w, t.GroupID)
-	writeInt32(w, t.GenerationID)
-	writeString(w, t.MemberID)
-	writeArray(w, len(t.GroupAssignments), func(i int) { t.GroupAssignments[i].writeTo(w) })
+func (t syncGroupRequestV0) writeTo(wb *writeBuffer) {
+	wb.writeString(t.GroupID)
+	wb.writeInt32(t.GenerationID)
+	wb.writeString(t.MemberID)
+	wb.writeArray(len(t.GroupAssignments), func(i int) { t.GroupAssignments[i].writeTo(wb) })
 }
 
 type syncGroupResponseV0 struct {
@@ -125,9 +272,9 @@ func (t syncGroupResponseV0) size() int3
 		sizeofBytes(t.MemberAssignments)
 }
 
-func (t syncGroupResponseV0) writeTo(w *bufio.Writer) {
-	writeInt16(w, t.ErrorCode)
-	writeBytes(w, t.MemberAssignments)
+func (t syncGroupResponseV0) writeTo(wb *writeBuffer) {
+	wb.writeInt16(t.ErrorCode)
+	wb.writeBytes(t.MemberAssignments)
 }
 
 func (t *syncGroupResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) {
diff -pruN 0.2.1-1.1/syncgroup_test.go 0.4.49+ds1-1/syncgroup_test.go
--- 0.2.1-1.1/syncgroup_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/syncgroup_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,11 +3,159 @@ package kafka
 import (
 	"bufio"
 	"bytes"
+	"context"
+	"errors"
 	"io"
 	"reflect"
 	"testing"
+	"time"
 )
 
+func TestClientSyncGroup(t *testing.T) {
+	// In order to get to a sync group call we need to first
+	// join a group.
+	topic := makeTopic()
+	client, shutdown := newLocalClient()
+	client.Timeout = time.Minute
+	// Although at higher api versions ClientID is nullable
+	// for some reason the SyncGroup API call errors
+	// when ClientID is null.
+	// The Java Kafka Consumer generates a ClientID if one is not
+	// present or if the provided ClientID is empty.
+	client.Transport.(*Transport).ClientID = "test-client"
+	defer shutdown()
+
+	err := clientCreateTopic(client, topic, 3)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	groupID := makeGroupID()
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+	defer cancel()
+	respc, err := waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     groupID,
+		KeyType: CoordinatorKeyTypeConsumer,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if respc.Error != nil {
+		t.Fatal(err)
+	}
+
+	groupInstanceID := "group-instance-id"
+	userData := "user-data"
+
+	var rrGroupBalancer RoundRobinGroupBalancer
+
+	req := &JoinGroupRequest{
+		GroupID:          groupID,
+		GroupInstanceID:  groupInstanceID,
+		ProtocolType:     "consumer",
+		SessionTimeout:   time.Minute,
+		RebalanceTimeout: time.Minute,
+		Protocols: []GroupProtocol{
+			{
+				Name: rrGroupBalancer.ProtocolName(),
+				Metadata: GroupProtocolSubscription{
+					Topics:   []string{topic},
+					UserData: []byte(userData),
+					OwnedPartitions: map[string][]int{
+						topic: {0, 1, 2},
+					},
+				},
+			},
+		},
+	}
+
+	var resp *JoinGroupResponse
+
+	for {
+		resp, err = client.JoinGroup(ctx, req)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if errors.Is(resp.Error, MemberIDRequired) {
+			req.MemberID = resp.MemberID
+			time.Sleep(time.Second)
+			continue
+		}
+
+		if resp.Error != nil {
+			t.Fatal(resp.Error)
+		}
+		break
+	}
+
+	if resp.MemberID != resp.LeaderID {
+		t.Fatalf("expected to be group leader %s got %s", resp.MemberID, resp.LeaderID)
+	}
+
+	groupMembers := make([]GroupMember, 0, len(resp.Members))
+	groupUserDataLookup := make(map[string]GroupMember)
+	for _, member := range resp.Members {
+		gm := GroupMember{
+			ID:       member.ID,
+			Topics:   member.Metadata.Topics,
+			UserData: member.Metadata.UserData,
+		}
+		groupMembers = append(groupMembers, gm)
+		groupUserDataLookup[member.ID] = gm
+	}
+
+	metaResp, err := client.Metadata(ctx, &MetadataRequest{
+		Topics: []string{topic},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assignments := rrGroupBalancer.AssignGroups(groupMembers, metaResp.Topics[0].Partitions)
+
+	sgRequest := &SyncGroupRequest{
+		GroupID:         groupID,
+		GenerationID:    resp.GenerationID,
+		MemberID:        resp.MemberID,
+		GroupInstanceID: groupInstanceID,
+		ProtocolType:    "consumer",
+		ProtocolName:    rrGroupBalancer.ProtocolName(),
+	}
+
+	for member, assignment := range assignments {
+		sgRequest.Assignments = append(sgRequest.Assignments, SyncGroupRequestAssignment{
+			MemberID: member,
+			Assignment: GroupProtocolAssignment{
+				AssignedPartitions: assignment,
+				UserData:           groupUserDataLookup[member].UserData,
+			},
+		})
+	}
+	sgResp, err := client.SyncGroup(ctx, sgRequest)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if sgResp.Error != nil {
+		t.Fatal(sgResp.Error)
+	}
+
+	expectedAssignment := GroupProtocolAssignment{
+		AssignedPartitions: map[string][]int{
+			topic: {0, 1, 2},
+		},
+		UserData: []byte(userData),
+	}
+
+	if !reflect.DeepEqual(sgResp.Assignment, expectedAssignment) {
+		t.Fatalf("\nexpected assignment to be \n%#v \ngot\n%#v", expectedAssignment, sgResp.Assignment)
+	}
+}
+
 func TestGroupAssignment(t *testing.T) {
 	item := groupAssignment{
 		Version: 1,
@@ -18,13 +166,12 @@ func TestGroupAssignment(t *testing.T) {
 		UserData: []byte(`blah`),
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found groupAssignment
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
@@ -61,13 +208,12 @@ func TestSyncGroupResponseV0(t *testing.
 		MemberAssignments: []byte(`blah`),
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
 	var found syncGroupResponseV0
-	remain, err := (&found).readFrom(bufio.NewReader(buf), buf.Len())
+	remain, err := (&found).readFrom(bufio.NewReader(b), b.Len())
 	if err != nil {
 		t.Error(err)
 		t.FailNow()
@@ -88,14 +234,13 @@ func BenchmarkSyncGroupResponseV0(t *tes
 		MemberAssignments: []byte(`blah`),
 	}
 
-	buf := bytes.NewBuffer(nil)
-	w := bufio.NewWriter(buf)
+	b := bytes.NewBuffer(nil)
+	w := &writeBuffer{w: b}
 	item.writeTo(w)
-	w.Flush()
 
-	r := bytes.NewReader(buf.Bytes())
+	r := bytes.NewReader(b.Bytes())
 	reader := bufio.NewReader(r)
-	size := buf.Len()
+	size := b.Len()
 
 	for i := 0; i < t.N; i++ {
 		r.Seek(0, io.SeekStart)
diff -pruN 0.2.1-1.1/testing/conn.go 0.4.49+ds1-1/testing/conn.go
--- 0.2.1-1.1/testing/conn.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/testing/conn.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,32 @@
+package testing
+
+import (
+	"context"
+	"net"
+	"sync"
+)
+
+type ConnWaitGroup struct {
+	DialFunc func(context.Context, string, string) (net.Conn, error)
+	sync.WaitGroup
+}
+
+func (g *ConnWaitGroup) Dial(ctx context.Context, network, address string) (net.Conn, error) {
+	c, err := g.DialFunc(ctx, network, address)
+	if err != nil {
+		return nil, err
+	}
+	g.Add(1)
+	return &groupConn{Conn: c, group: g}, nil
+}
+
+type groupConn struct {
+	net.Conn
+	group *ConnWaitGroup
+	once  sync.Once
+}
+
+func (c *groupConn) Close() error {
+	defer c.once.Do(c.group.Done)
+	return c.Conn.Close()
+}
diff -pruN 0.2.1-1.1/testing/version.go 0.4.49+ds1-1/testing/version.go
--- 0.2.1-1.1/testing/version.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/testing/version.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,58 @@
+package testing
+
+import (
+	"os"
+	"strconv"
+	"strings"
+)
+
+type semver []int
+
+func (v semver) atLeast(other semver) bool {
+	for i := range v {
+		if i >= len(other) {
+			break
+		}
+		if v[i] < other[i] {
+			return false
+		}
+		if v[i] > other[i] {
+			return true
+		}
+	}
+	for i := len(v); i < len(other); i++ {
+		if other[i] > 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// kafkaVersion is set in the circle config.  It can also be provided on the
+// command line in order to target a particular kafka version.
+var kafkaVersion = parseVersion(os.Getenv("KAFKA_VERSION"))
+
+// KafkaIsAtLeast returns true when the test broker is running a protocol
+// version that is semver or newer.  It determines the broker's version using
+// the `KAFKA_VERSION` environment variable.  If the var is unset, then this
+// function will return true.
+func KafkaIsAtLeast(semver string) bool {
+	return kafkaVersion.atLeast(parseVersion(semver))
+}
+
+func parseVersion(semver string) semver {
+	if semver == "" {
+		return nil
+	}
+	parts := strings.Split(semver, ".")
+	version := make([]int, len(parts))
+	for i := range version {
+		v, err := strconv.Atoi(parts[i])
+		if err != nil {
+			// panic-ing because tests should be using hard-coded version values
+			panic("invalid version string: " + semver)
+		}
+		version[i] = v
+	}
+	return version
+}
diff -pruN 0.2.1-1.1/testing/version_test.go 0.4.49+ds1-1/testing/version_test.go
--- 0.2.1-1.1/testing/version_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/testing/version_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,19 @@
+package testing
+
+import (
+	"testing"
+)
+
+func TestSemVersionAtLeastEmpty(t *testing.T) {
+	result := semver([]int{}).atLeast(semver([]int{1, 2}))
+	if result {
+		t.Errorf("Empty version can't be at least 1.2")
+	}
+}
+
+func TestSemVersionAtLeastShorter(t *testing.T) {
+	result := semver([]int{1, 1}).atLeast(semver([]int{1, 1, 2}))
+	if result {
+		t.Errorf("Version 1.1 version can't be at least 1.1.2")
+	}
+}
diff -pruN 0.2.1-1.1/time.go 0.4.49+ds1-1/time.go
--- 0.2.1-1.1/time.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/time.go	2025-08-21 19:15:53.000000000 +0000
@@ -11,6 +11,13 @@ const (
 	defaultRTT = 1 * time.Second
 )
 
+func makeTime(t int64) time.Time {
+	if t <= 0 {
+		return time.Time{}
+	}
+	return time.Unix(t/1000, (t%1000)*int64(time.Millisecond)).UTC()
+}
+
 func timestamp(t time.Time) int64 {
 	if t.IsZero() {
 		return 0
@@ -18,11 +25,7 @@ func timestamp(t time.Time) int64 {
 	return t.UnixNano() / int64(time.Millisecond)
 }
 
-func timestampToTime(t int64) time.Time {
-	return time.Unix(t/1000, (t%1000)*int64(time.Millisecond))
-}
-
-func duration(ms int32) time.Duration {
+func makeDuration(ms int32) time.Duration {
 	return time.Duration(ms) * time.Millisecond
 }
 
diff -pruN 0.2.1-1.1/topics/list_topics.go 0.4.49+ds1-1/topics/list_topics.go
--- 0.2.1-1.1/topics/list_topics.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/topics/list_topics.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,42 @@
+// Package topics is an experimental package that provides additional tooling
+// around Kafka Topics. This package does not make any promises around
+// backwards compatibility.
+package topics
+
+import (
+	"context"
+	"errors"
+	"regexp"
+
+	"github.com/segmentio/kafka-go"
+)
+
+// List returns a slice of all the Topics.
+func List(ctx context.Context, client *kafka.Client) (topics []kafka.Topic, err error) {
+	if client == nil {
+		return nil, errors.New("client is required")
+	}
+	response, err := client.Metadata(ctx, &kafka.MetadataRequest{
+		Addr: client.Addr,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return response.Topics, nil
+}
+
+// ListRe returns a slice of Topics that match a regex.
+func ListRe(ctx context.Context, cli *kafka.Client, re *regexp.Regexp) (topics []kafka.Topic, err error) {
+	alltopics, err := List(ctx, cli)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, val := range alltopics {
+		if re.MatchString(val.Name) {
+			topics = append(topics, val)
+		}
+	}
+	return topics, nil
+}
diff -pruN 0.2.1-1.1/topics/list_topics_test.go 0.4.49+ds1-1/topics/list_topics_test.go
--- 0.2.1-1.1/topics/list_topics_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/topics/list_topics_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,117 @@
+package topics
+
+import (
+	"context"
+	"net"
+	"regexp"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go"
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestListReNil(t *testing.T) {
+	_, err := ListRe(context.Background(), nil, nil)
+	if err == nil {
+		t.Fatal(err)
+	}
+}
+
+func TestListRe(t *testing.T) {
+	client, shutdown := newLocalClientWithTopic("TestTopics-A", 1)
+	defer shutdown()
+	clientCreateTopic(client, "TestTopics-B", 1)
+
+	allRegex := regexp.MustCompile("TestTopics-.*")
+	fooRegex := regexp.MustCompile("TestTopics-B")
+
+	// Get all the topics
+	topics, err := ListRe(context.Background(), client, allRegex)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(topics) != 2 {
+		t.Error("the wrong number of topics were returned. ", len(topics))
+	}
+
+	// Get one topic
+	topics, err = ListRe(context.Background(), client, fooRegex)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(topics) != 1 {
+		t.Error("the wrong number of topics were returned. ", len(topics))
+	}
+}
+
+func newLocalClientWithTopic(topic string, partitions int) (*kafka.Client, func()) {
+	client, shutdown := newLocalClient()
+	if err := clientCreateTopic(client, topic, partitions); err != nil {
+		shutdown()
+		panic(err)
+	}
+	return client, func() {
+		client.DeleteTopics(context.Background(), &kafka.DeleteTopicsRequest{
+			Topics: []string{topic},
+		})
+		shutdown()
+	}
+}
+
+func clientCreateTopic(client *kafka.Client, topic string, partitions int) error {
+	_, err := client.CreateTopics(context.Background(), &kafka.CreateTopicsRequest{
+		Topics: []kafka.TopicConfig{{
+			Topic:             topic,
+			NumPartitions:     partitions,
+			ReplicationFactor: 1,
+		}},
+	})
+	if err != nil {
+		return err
+	}
+
+	// Topic creation seems to be asynchronous. Metadata for the topic partition
+	// layout in the cluster is available in the controller before being synced
+	// with the other brokers, which causes "Error:[3] Unknown Topic Or Partition"
+	// when sending requests to the partition leaders.
+	//
+	// This loop will wait up to 2 seconds polling the cluster until no errors
+	// are returned.
+	for i := 0; i < 20; i++ {
+		r, err := client.Fetch(context.Background(), &kafka.FetchRequest{
+			Topic:     topic,
+			Partition: 0,
+			Offset:    0,
+		})
+		if err == nil && r.Error == nil {
+			break
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+
+	return nil
+}
+
+func newLocalClient() (*kafka.Client, func()) {
+	return newClient(kafka.TCP("localhost"))
+}
+
+func newClient(addr net.Addr) (*kafka.Client, func()) {
+	conns := &ktesting.ConnWaitGroup{
+		DialFunc: (&net.Dialer{}).DialContext,
+	}
+
+	transport := &kafka.Transport{
+		Dial:     conns.Dial,
+		Resolver: kafka.NewBrokerResolver(nil),
+	}
+
+	client := &kafka.Client{
+		Addr:      addr,
+		Timeout:   5 * time.Second,
+		Transport: transport,
+	}
+
+	return client, func() { transport.CloseIdleConnections(); conns.Wait() }
+}
diff -pruN 0.2.1-1.1/transport.go 0.4.49+ds1-1/transport.go
--- 0.2.1-1.1/transport.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/transport.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,1363 @@
+package kafka
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"net"
+	"runtime/pprof"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/apiversions"
+	"github.com/segmentio/kafka-go/protocol/createtopics"
+	"github.com/segmentio/kafka-go/protocol/findcoordinator"
+	meta "github.com/segmentio/kafka-go/protocol/metadata"
+	"github.com/segmentio/kafka-go/protocol/saslauthenticate"
+	"github.com/segmentio/kafka-go/protocol/saslhandshake"
+	"github.com/segmentio/kafka-go/sasl"
+)
+
+// Request is an interface implemented by types that represent messages sent
+// from kafka clients to brokers.
+type Request = protocol.Message
+
+// Response is an interface implemented by types that represent messages sent
+// from kafka brokers in response to client requests.
+type Response = protocol.Message
+
+// RoundTripper is an interface implemented by types which support interacting
+// with kafka brokers.
+type RoundTripper interface {
+	// RoundTrip sends a request to a kafka broker and returns the response that
+	// was received, or a non-nil error.
+	//
+	// The context passed as first argument can be used to asynchronnously abort
+	// the call if needed.
+	RoundTrip(context.Context, net.Addr, Request) (Response, error)
+}
+
+// Transport is an implementation of the RoundTripper interface.
+//
+// Transport values manage a pool of connections and automatically discovers the
+// clusters layout to route requests to the appropriate brokers.
+//
+// Transport values are safe to use concurrently from multiple goroutines.
+//
+// Note: The intent is for the Transport to become the underlying layer of the
+// kafka.Reader and kafka.Writer types.
+type Transport struct {
+	// A function used to establish connections to the kafka cluster.
+	Dial func(context.Context, string, string) (net.Conn, error)
+
+	// Time limit set for establishing connections to the kafka cluster. This
+	// limit includes all round trips done to establish the connections (TLS
+	// handshake, SASL negotiation, etc...).
+	//
+	// Defaults to 5s.
+	DialTimeout time.Duration
+
+	// Maximum amount of time that connections will remain open and unused.
+	// The transport will manage to automatically close connections that have
+	// been idle for too long, and re-open them on demand when the transport is
+	// used again.
+	//
+	// Defaults to 30s.
+	IdleTimeout time.Duration
+
+	// TTL for the metadata cached by this transport. Note that the value
+	// configured here is an upper bound, the transport randomizes the TTLs to
+	// avoid getting into states where multiple clients end up synchronized and
+	// cause bursts of requests to the kafka broker.
+	//
+	// Default to 6s.
+	MetadataTTL time.Duration
+
+	// Topic names for the metadata cached by this transport. If this field is left blank,
+	// metadata information of all topics in the cluster will be retrieved.
+	MetadataTopics []string
+
+	// Unique identifier that the transport communicates to the brokers when it
+	// sends requests.
+	ClientID string
+
+	// An optional configuration for TLS connections established by this
+	// transport.
+	//
+	// If the Server
+	TLS *tls.Config
+
+	// SASL configures the Transfer to use SASL authentication.
+	SASL sasl.Mechanism
+
+	// An optional resolver used to translate broker host names into network
+	// addresses.
+	//
+	// The resolver will be called for every request (not every connection),
+	// making it possible to implement ACL policies by validating that the
+	// program is allowed to connect to the kafka broker. This also means that
+	// the resolver should probably provide a caching layer to avoid storming
+	// the service discovery backend with requests.
+	//
+	// When set, the Dial function is not responsible for performing name
+	// resolution, and is always called with a pre-resolved address.
+	Resolver BrokerResolver
+
+	// The background context used to control goroutines started internally by
+	// the transport.
+	//
+	// If nil, context.Background() is used instead.
+	Context context.Context
+
+	mutex sync.RWMutex
+	pools map[networkAddress]*connPool
+}
+
+// DefaultTransport is the default transport used by kafka clients in this
+// package.
+var DefaultTransport RoundTripper = &Transport{
+	Dial: (&net.Dialer{
+		Timeout:   3 * time.Second,
+		DualStack: true,
+	}).DialContext,
+}
+
+// CloseIdleConnections closes all idle connections immediately, and marks all
+// connections that are in use to be closed when they become idle again.
+func (t *Transport) CloseIdleConnections() {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+
+	for _, pool := range t.pools {
+		pool.unref()
+	}
+
+	for k := range t.pools {
+		delete(t.pools, k)
+	}
+}
+
+// RoundTrip sends a request to a kafka cluster and returns the response, or an
+// error if no responses were received.
+//
+// Message types are available in sub-packages of the protocol package. Each
+// kafka API is implemented in a different sub-package. For example, the request
+// and response types for the Fetch API are available in the protocol/fetch
+// package.
+//
+// The type of the response message will match the type of the request. For
+// example, if RoundTrip was called with a *fetch.Request as argument, the value
+// returned will be of type *fetch.Response. It is safe for the program to do a
+// type assertion after checking that no error was returned.
+//
+// This example illustrates the way this method is expected to be used:
+//
+//	r, err := transport.RoundTrip(ctx, addr, &fetch.Request{ ... })
+//	if err != nil {
+//		...
+//	} else {
+//		res := r.(*fetch.Response)
+//		...
+//	}
+//
+// The transport automatically selects the highest version of the API that is
+// supported by both the kafka-go package and the kafka broker. The negotiation
+// happens transparently once when connections are established.
+//
+// This API was introduced in version 0.4 as a way to leverage the lower-level
+// features of the kafka protocol, but also provide a more efficient way of
+// managing connections to kafka brokers.
+func (t *Transport) RoundTrip(ctx context.Context, addr net.Addr, req Request) (Response, error) {
+	p := t.grabPool(addr)
+	defer p.unref()
+	return p.roundTrip(ctx, req)
+}
+
+func (t *Transport) dial() func(context.Context, string, string) (net.Conn, error) {
+	if t.Dial != nil {
+		return t.Dial
+	}
+	return defaultDialer.DialContext
+}
+
+func (t *Transport) dialTimeout() time.Duration {
+	if t.DialTimeout > 0 {
+		return t.DialTimeout
+	}
+	return 5 * time.Second
+}
+
+func (t *Transport) idleTimeout() time.Duration {
+	if t.IdleTimeout > 0 {
+		return t.IdleTimeout
+	}
+	return 30 * time.Second
+}
+
+func (t *Transport) metadataTTL() time.Duration {
+	if t.MetadataTTL > 0 {
+		return t.MetadataTTL
+	}
+	return 6 * time.Second
+}
+
+func (t *Transport) grabPool(addr net.Addr) *connPool {
+	k := networkAddress{
+		network: addr.Network(),
+		address: addr.String(),
+	}
+
+	t.mutex.RLock()
+	p := t.pools[k]
+	if p != nil {
+		p.ref()
+	}
+	t.mutex.RUnlock()
+
+	if p != nil {
+		return p
+	}
+
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+
+	if p := t.pools[k]; p != nil {
+		p.ref()
+		return p
+	}
+
+	ctx, cancel := context.WithCancel(t.context())
+
+	p = &connPool{
+		refc: 2,
+
+		dial:           t.dial(),
+		dialTimeout:    t.dialTimeout(),
+		idleTimeout:    t.idleTimeout(),
+		metadataTTL:    t.metadataTTL(),
+		metadataTopics: t.MetadataTopics,
+		clientID:       t.ClientID,
+		tls:            t.TLS,
+		sasl:           t.SASL,
+		resolver:       t.Resolver,
+
+		ready:  make(event),
+		wake:   make(chan event),
+		conns:  make(map[int32]*connGroup),
+		cancel: cancel,
+	}
+
+	p.ctrl = p.newConnGroup(addr)
+	go p.discover(ctx, p.wake)
+
+	if t.pools == nil {
+		t.pools = make(map[networkAddress]*connPool)
+	}
+	t.pools[k] = p
+	return p
+}
+
+func (t *Transport) context() context.Context {
+	if t.Context != nil {
+		return t.Context
+	}
+	return context.Background()
+}
+
+type event chan struct{}
+
+func (e event) trigger() { close(e) }
+
+type connPool struct {
+	refc uintptr
+	// Immutable fields of the connection pool. Connections access these field
+	// on their parent pool in a ready-only fashion, so no synchronization is
+	// required.
+	dial           func(context.Context, string, string) (net.Conn, error)
+	dialTimeout    time.Duration
+	idleTimeout    time.Duration
+	metadataTTL    time.Duration
+	metadataTopics []string
+	clientID       string
+	tls            *tls.Config
+	sasl           sasl.Mechanism
+	resolver       BrokerResolver
+	// Signaling mechanisms to orchestrate communications between the pool and
+	// the rest of the program.
+	once   sync.Once  // ensure that `ready` is triggered only once
+	ready  event      // triggered after the first metadata update
+	wake   chan event // used to force metadata updates
+	cancel context.CancelFunc
+	// Mutable fields of the connection pool, access must be synchronized.
+	mutex sync.RWMutex
+	conns map[int32]*connGroup // data connections used for produce/fetch/etc...
+	ctrl  *connGroup           // control connections used for metadata requests
+	state atomic.Value         // cached cluster state
+}
+
+type connPoolState struct {
+	metadata *meta.Response   // last metadata response seen by the pool
+	err      error            // last error from metadata requests
+	layout   protocol.Cluster // cluster layout built from metadata response
+}
+
+func (p *connPool) grabState() connPoolState {
+	state, _ := p.state.Load().(connPoolState)
+	return state
+}
+
+func (p *connPool) setState(state connPoolState) {
+	p.state.Store(state)
+}
+
+func (p *connPool) ref() {
+	atomic.AddUintptr(&p.refc, +1)
+}
+
+func (p *connPool) unref() {
+	if atomic.AddUintptr(&p.refc, ^uintptr(0)) == 0 {
+		p.mutex.Lock()
+		defer p.mutex.Unlock()
+
+		for _, conns := range p.conns {
+			conns.closeIdleConns()
+		}
+
+		p.ctrl.closeIdleConns()
+		p.cancel()
+	}
+}
+
+func (p *connPool) roundTrip(ctx context.Context, req Request) (Response, error) {
+	// This first select should never block after the first metadata response
+	// that would mark the pool as `ready`.
+	select {
+	case <-p.ready:
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	}
+
+	state := p.grabState()
+	var response promise
+
+	switch m := req.(type) {
+	case *meta.Request:
+		// We serve metadata requests directly from the transport cache unless
+		// we would like to auto create a topic that isn't in our cache.
+		//
+		// This reduces the number of round trips to kafka brokers while keeping
+		// the logic simple when applying partitioning strategies.
+		if state.err != nil {
+			return nil, state.err
+		}
+
+		cachedMeta := filterMetadataResponse(m, state.metadata)
+		// requestNeeded indicates if we need to send this metadata request to the server.
+		// It's true when we want to auto-create topics and we don't have the topic in our
+		// cache.
+		var requestNeeded bool
+		if m.AllowAutoTopicCreation {
+			for _, topic := range cachedMeta.Topics {
+				if topic.ErrorCode == int16(UnknownTopicOrPartition) {
+					requestNeeded = true
+					break
+				}
+			}
+		}
+
+		if !requestNeeded {
+			return cachedMeta, nil
+		}
+
+	case protocol.Splitter:
+		// Messages that implement the Splitter interface trigger the creation of
+		// multiple requests that are all merged back into a single results by
+		// a merger.
+		messages, merger, err := m.Split(state.layout)
+		if err != nil {
+			return nil, err
+		}
+		promises := make([]promise, len(messages))
+		for i, m := range messages {
+			promises[i] = p.sendRequest(ctx, m, state)
+		}
+		response = join(promises, messages, merger)
+	}
+
+	if response == nil {
+		response = p.sendRequest(ctx, req, state)
+	}
+
+	r, err := response.await(ctx)
+	if err != nil {
+		return r, err
+	}
+
+	switch resp := r.(type) {
+	case *createtopics.Response:
+		// Force an update of the metadata when adding topics,
+		// otherwise the cached state would get out of sync.
+		topicsToRefresh := make([]string, 0, len(resp.Topics))
+		for _, topic := range resp.Topics {
+			// fixes issue 672: don't refresh topics that failed to create, it causes the library to hang indefinitely
+			if topic.ErrorCode != 0 {
+				continue
+			}
+
+			topicsToRefresh = append(topicsToRefresh, topic.Name)
+		}
+
+		p.refreshMetadata(ctx, topicsToRefresh)
+	case *meta.Response:
+		m := req.(*meta.Request)
+		// If we get here with allow auto topic creation then
+		// we didn't have that topic in our cache, so we should update
+		// the cache.
+		if m.AllowAutoTopicCreation {
+			topicsToRefresh := make([]string, 0, len(resp.Topics))
+			for _, topic := range resp.Topics {
+				// Don't refresh topics that failed to create, since that may
+				// mean that enable automatic topic creation is not enabled.
+				// That causes the library to hang indefinitely, same as
+				// don't refresh topics that failed to create,
+				// createtopics process. Fixes issue 806.
+				if topic.ErrorCode != 0 {
+					continue
+				}
+
+				topicsToRefresh = append(topicsToRefresh, topic.Name)
+			}
+			p.refreshMetadata(ctx, topicsToRefresh)
+		}
+	}
+
+	return r, nil
+}
+
+// refreshMetadata forces an update of the cached cluster metadata, and waits
+// for the given list of topics to appear. This waiting mechanism is necessary
+// to account for the fact that topic creation is asynchronous in kafka, and
+// causes subsequent requests to fail while the cluster state is propagated to
+// all the brokers.
+func (p *connPool) refreshMetadata(ctx context.Context, expectTopics []string) {
+	minBackoff := 100 * time.Millisecond
+	maxBackoff := 2 * time.Second
+	cancel := ctx.Done()
+
+	for ctx.Err() == nil {
+		notify := make(event)
+		select {
+		case <-cancel:
+			return
+		case p.wake <- notify:
+			select {
+			case <-notify:
+			case <-cancel:
+				return
+			}
+		}
+
+		state := p.grabState()
+		found := 0
+
+		for _, topic := range expectTopics {
+			if _, ok := state.layout.Topics[topic]; ok {
+				found++
+			}
+		}
+
+		if found == len(expectTopics) {
+			return
+		}
+
+		if delay := time.Duration(rand.Int63n(int64(minBackoff))); delay > 0 {
+			timer := time.NewTimer(minBackoff)
+			select {
+			case <-cancel:
+			case <-timer.C:
+			}
+			timer.Stop()
+
+			if minBackoff *= 2; minBackoff > maxBackoff {
+				minBackoff = maxBackoff
+			}
+		}
+	}
+}
+
+func (p *connPool) setReady() {
+	p.once.Do(p.ready.trigger)
+}
+
+// update is called periodically by the goroutine running the discover method
+// to refresh the cluster layout information used by the transport to route
+// requests to brokers.
+func (p *connPool) update(ctx context.Context, metadata *meta.Response, err error) {
+	var layout protocol.Cluster
+
+	if metadata != nil {
+		metadata.ThrottleTimeMs = 0
+
+		// Normalize the lists so we can apply binary search on them.
+		sortMetadataBrokers(metadata.Brokers)
+		sortMetadataTopics(metadata.Topics)
+
+		for i := range metadata.Topics {
+			t := &metadata.Topics[i]
+			sortMetadataPartitions(t.Partitions)
+		}
+
+		layout = makeLayout(metadata)
+	}
+
+	state := p.grabState()
+	addBrokers := make(map[int32]struct{})
+	delBrokers := make(map[int32]struct{})
+
+	if err != nil {
+		// Only update the error on the transport if the cluster layout was
+		// unknown. This ensures that we prioritize a previously known state
+		// of the cluster to reduce the impact of transient failures.
+		if state.metadata != nil {
+			return
+		}
+		state.err = err
+	} else {
+		for id, b2 := range layout.Brokers {
+			if b1, ok := state.layout.Brokers[id]; !ok {
+				addBrokers[id] = struct{}{}
+			} else if b1 != b2 {
+				addBrokers[id] = struct{}{}
+				delBrokers[id] = struct{}{}
+			}
+		}
+
+		for id := range state.layout.Brokers {
+			if _, ok := layout.Brokers[id]; !ok {
+				delBrokers[id] = struct{}{}
+			}
+		}
+
+		state.metadata, state.layout = metadata, layout
+		state.err = nil
+	}
+
+	defer p.setReady()
+	defer p.setState(state)
+
+	if len(addBrokers) != 0 || len(delBrokers) != 0 {
+		// Only acquire the lock when there is a change of layout. This is an
+		// infrequent event so we don't risk introducing regular contention on
+		// the mutex if we were to lock it on every update.
+		p.mutex.Lock()
+		defer p.mutex.Unlock()
+
+		if ctx.Err() != nil {
+			return // the pool has been closed, no need to update
+		}
+
+		for id := range delBrokers {
+			if broker := p.conns[id]; broker != nil {
+				broker.closeIdleConns()
+				delete(p.conns, id)
+			}
+		}
+
+		for id := range addBrokers {
+			broker := layout.Brokers[id]
+			p.conns[id] = p.newBrokerConnGroup(Broker{
+				Rack: broker.Rack,
+				Host: broker.Host,
+				Port: int(broker.Port),
+				ID:   int(broker.ID),
+			})
+		}
+	}
+}
+
+// discover is the entry point of an internal goroutine for the transport which
+// periodically requests updates of the cluster metadata and refreshes the
+// transport cached cluster layout.
+func (p *connPool) discover(ctx context.Context, wake <-chan event) {
+	prng := rand.New(rand.NewSource(time.Now().UnixNano()))
+	metadataTTL := func() time.Duration {
+		return time.Duration(prng.Int63n(int64(p.metadataTTL)))
+	}
+
+	timer := time.NewTimer(metadataTTL())
+	defer timer.Stop()
+
+	var notify event
+	done := ctx.Done()
+
+	req := &meta.Request{
+		TopicNames: p.metadataTopics,
+	}
+
+	for {
+		c, err := p.grabClusterConn(ctx)
+		if err != nil {
+			p.update(ctx, nil, err)
+		} else {
+			res := make(async, 1)
+			deadline, cancel := context.WithTimeout(ctx, p.metadataTTL)
+			c.reqs <- connRequest{
+				ctx: deadline,
+				req: req,
+				res: res,
+			}
+			r, err := res.await(deadline)
+			cancel()
+			if err != nil && errors.Is(err, ctx.Err()) {
+				return
+			}
+			ret, _ := r.(*meta.Response)
+			p.update(ctx, ret, err)
+		}
+
+		if notify != nil {
+			notify.trigger()
+			notify = nil
+		}
+
+		select {
+		case <-timer.C:
+			timer.Reset(metadataTTL())
+		case <-done:
+			return
+		case notify = <-wake:
+		}
+	}
+}
+
+// grabBrokerConn returns a connection to a specific broker represented by the
+// broker id passed as argument. If the broker id was not known, an error is
+// returned.
+func (p *connPool) grabBrokerConn(ctx context.Context, brokerID int32) (*conn, error) {
+	p.mutex.RLock()
+	g := p.conns[brokerID]
+	p.mutex.RUnlock()
+	if g == nil {
+		return nil, BrokerNotAvailable
+	}
+	return g.grabConnOrConnect(ctx)
+}
+
+// grabClusterConn returns the connection to the kafka cluster that the pool is
+// configured to connect to.
+//
+// The transport uses a shared `control` connection to the cluster for any
+// requests that aren't supposed to be sent to specific brokers (e.g. Fetch or
+// Produce requests). Requests intended to be routed to specific brokers are
+// dispatched on a separate pool of connections that the transport maintains.
+// This split help avoid head-of-line blocking situations where control requests
+// like Metadata would be queued behind large responses from Fetch requests for
+// example.
+//
+// In either cases, the requests are multiplexed so we can keep a minimal number
+// of connections open (N+1, where N is the number of brokers in the cluster).
+func (p *connPool) grabClusterConn(ctx context.Context) (*conn, error) {
+	return p.ctrl.grabConnOrConnect(ctx)
+}
+
+func (p *connPool) sendRequest(ctx context.Context, req Request, state connPoolState) promise {
+	brokerID := int32(-1)
+
+	switch m := req.(type) {
+	case protocol.BrokerMessage:
+		// Some requests are supposed to be sent to specific brokers (e.g. the
+		// partition leaders). They implement the BrokerMessage interface to
+		// delegate the routing decision to each message type.
+		broker, err := m.Broker(state.layout)
+		if err != nil {
+			return reject(err)
+		}
+		brokerID = broker.ID
+
+	case protocol.GroupMessage:
+		// Some requests are supposed to be sent to a group coordinator,
+		// look up which broker is currently the coordinator for the group
+		// so we can get a connection to that broker.
+		//
+		// TODO: should we cache the coordinator info?
+		p := p.sendRequest(ctx, &findcoordinator.Request{Key: m.Group()}, state)
+		r, err := p.await(ctx)
+		if err != nil {
+			return reject(err)
+		}
+		brokerID = r.(*findcoordinator.Response).NodeID
+	case protocol.TransactionalMessage:
+		p := p.sendRequest(ctx, &findcoordinator.Request{
+			Key:     m.Transaction(),
+			KeyType: int8(CoordinatorKeyTypeTransaction),
+		}, state)
+		r, err := p.await(ctx)
+		if err != nil {
+			return reject(err)
+		}
+		brokerID = r.(*findcoordinator.Response).NodeID
+	}
+
+	var c *conn
+	var err error
+	if brokerID >= 0 {
+		c, err = p.grabBrokerConn(ctx, brokerID)
+	} else {
+		c, err = p.grabClusterConn(ctx)
+	}
+	if err != nil {
+		return reject(err)
+	}
+
+	res := make(async, 1)
+
+	c.reqs <- connRequest{
+		ctx: ctx,
+		req: req,
+		res: res,
+	}
+
+	return res
+}
+
+func filterMetadataResponse(req *meta.Request, res *meta.Response) *meta.Response {
+	ret := *res
+
+	if req.TopicNames != nil {
+		ret.Topics = make([]meta.ResponseTopic, len(req.TopicNames))
+
+		for i, topicName := range req.TopicNames {
+			j, ok := findMetadataTopic(res.Topics, topicName)
+			if ok {
+				ret.Topics[i] = res.Topics[j]
+			} else {
+				ret.Topics[i] = meta.ResponseTopic{
+					ErrorCode: int16(UnknownTopicOrPartition),
+					Name:      topicName,
+				}
+			}
+		}
+	}
+
+	return &ret
+}
+
+func findMetadataTopic(topics []meta.ResponseTopic, topicName string) (int, bool) {
+	i := sort.Search(len(topics), func(i int) bool {
+		return topics[i].Name >= topicName
+	})
+	return i, i >= 0 && i < len(topics) && topics[i].Name == topicName
+}
+
+func sortMetadataBrokers(brokers []meta.ResponseBroker) {
+	sort.Slice(brokers, func(i, j int) bool {
+		return brokers[i].NodeID < brokers[j].NodeID
+	})
+}
+
+func sortMetadataTopics(topics []meta.ResponseTopic) {
+	sort.Slice(topics, func(i, j int) bool {
+		return topics[i].Name < topics[j].Name
+	})
+}
+
+func sortMetadataPartitions(partitions []meta.ResponsePartition) {
+	sort.Slice(partitions, func(i, j int) bool {
+		return partitions[i].PartitionIndex < partitions[j].PartitionIndex
+	})
+}
+
+func makeLayout(metadataResponse *meta.Response) protocol.Cluster {
+	layout := protocol.Cluster{
+		Controller: metadataResponse.ControllerID,
+		Brokers:    make(map[int32]protocol.Broker),
+		Topics:     make(map[string]protocol.Topic),
+	}
+
+	for _, broker := range metadataResponse.Brokers {
+		layout.Brokers[broker.NodeID] = protocol.Broker{
+			Rack: broker.Rack,
+			Host: broker.Host,
+			Port: broker.Port,
+			ID:   broker.NodeID,
+		}
+	}
+
+	for _, topic := range metadataResponse.Topics {
+		if topic.IsInternal {
+			continue // TODO: do we need to expose those?
+		}
+		layout.Topics[topic.Name] = protocol.Topic{
+			Name:       topic.Name,
+			Error:      topic.ErrorCode,
+			Partitions: makePartitions(topic.Partitions),
+		}
+	}
+
+	return layout
+}
+
+func makePartitions(metadataPartitions []meta.ResponsePartition) map[int32]protocol.Partition {
+	protocolPartitions := make(map[int32]protocol.Partition, len(metadataPartitions))
+	numBrokerIDs := 0
+
+	for _, p := range metadataPartitions {
+		numBrokerIDs += len(p.ReplicaNodes) + len(p.IsrNodes) + len(p.OfflineReplicas)
+	}
+
+	// Reduce the memory footprint a bit by allocating a single buffer to write
+	// all broker ids.
+	brokerIDs := make([]int32, 0, numBrokerIDs)
+
+	for _, p := range metadataPartitions {
+		var rep, isr, off []int32
+		brokerIDs, rep = appendBrokerIDs(brokerIDs, p.ReplicaNodes)
+		brokerIDs, isr = appendBrokerIDs(brokerIDs, p.IsrNodes)
+		brokerIDs, off = appendBrokerIDs(brokerIDs, p.OfflineReplicas)
+
+		protocolPartitions[p.PartitionIndex] = protocol.Partition{
+			ID:       p.PartitionIndex,
+			Error:    p.ErrorCode,
+			Leader:   p.LeaderID,
+			Replicas: rep,
+			ISR:      isr,
+			Offline:  off,
+		}
+	}
+
+	return protocolPartitions
+}
+
+func appendBrokerIDs(ids, brokers []int32) ([]int32, []int32) {
+	i := len(ids)
+	ids = append(ids, brokers...)
+	return ids, ids[i:len(ids):len(ids)]
+}
+
+func (p *connPool) newConnGroup(a net.Addr) *connGroup {
+	return &connGroup{
+		addr: a,
+		pool: p,
+		broker: Broker{
+			ID: -1,
+		},
+	}
+}
+
+func (p *connPool) newBrokerConnGroup(broker Broker) *connGroup {
+	return &connGroup{
+		addr: &networkAddress{
+			network: "tcp",
+			address: net.JoinHostPort(broker.Host, strconv.Itoa(broker.Port)),
+		},
+		pool:   p,
+		broker: broker,
+	}
+}
+
+type connRequest struct {
+	ctx context.Context
+	req Request
+	res async
+}
+
+// The promise interface is used as a message passing abstraction to coordinate
+// between goroutines that handle requests and responses.
+type promise interface {
+	// Waits until the promise is resolved, rejected, or the context canceled.
+	await(context.Context) (Response, error)
+}
+
+// async is an implementation of the promise interface which supports resolving
+// or rejecting the await call asynchronously.
+type async chan interface{}
+
+func (p async) await(ctx context.Context) (Response, error) {
+	select {
+	case x := <-p:
+		switch v := x.(type) {
+		case nil:
+			return nil, nil // A nil response is ok (e.g. when RequiredAcks is None)
+		case Response:
+			return v, nil
+		case error:
+			return nil, v
+		default:
+			panic(fmt.Errorf("BUG: promise resolved with impossible value of type %T", v))
+		}
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	}
+}
+
+func (p async) resolve(res Response) { p <- res }
+
+func (p async) reject(err error) { p <- err }
+
+// rejected is an implementation of the promise interface which is always
+// returns an error. Values of this type are constructed using the reject
+// function.
+type rejected struct{ err error }
+
+func reject(err error) promise { return &rejected{err: err} }
+
+func (p *rejected) await(ctx context.Context) (Response, error) {
+	return nil, p.err
+}
+
+// joined is an implementation of the promise interface which merges results
+// from multiple promises into one await call using a merger.
+type joined struct {
+	promises []promise
+	requests []Request
+	merger   protocol.Merger
+}
+
+func join(promises []promise, requests []Request, merger protocol.Merger) promise {
+	return &joined{
+		promises: promises,
+		requests: requests,
+		merger:   merger,
+	}
+}
+
+func (p *joined) await(ctx context.Context) (Response, error) {
+	results := make([]interface{}, len(p.promises))
+
+	for i, sub := range p.promises {
+		m, err := sub.await(ctx)
+		if err != nil {
+			results[i] = err
+		} else {
+			results[i] = m
+		}
+	}
+
+	return p.merger.Merge(p.requests, results)
+}
+
+// Default dialer used by the transport connections when no Dial function
+// was configured by the program.
+var defaultDialer = net.Dialer{
+	Timeout:   3 * time.Second,
+	DualStack: true,
+}
+
+// connGroup represents a logical connection group to a kafka broker. The
+// actual network connections are lazily open before sending requests, and
+// closed if they are unused for longer than the idle timeout.
+type connGroup struct {
+	addr   net.Addr
+	broker Broker
+	// Immutable state of the connection.
+	pool *connPool
+	// Shared state of the connection, this is synchronized on the mutex through
+	// calls to the synchronized method. Both goroutines of the connection share
+	// the state maintained in these fields.
+	mutex     sync.Mutex
+	closed    bool
+	idleConns []*conn // stack of idle connections
+}
+
+func (g *connGroup) closeIdleConns() {
+	g.mutex.Lock()
+	conns := g.idleConns
+	g.idleConns = nil
+	g.closed = true
+	g.mutex.Unlock()
+
+	for _, c := range conns {
+		c.close()
+	}
+}
+
+func (g *connGroup) grabConnOrConnect(ctx context.Context) (*conn, error) {
+	rslv := g.pool.resolver
+	addr := g.addr
+	var c *conn
+
+	if rslv == nil {
+		c = g.grabConn()
+	} else {
+		var err error
+		broker := g.broker
+
+		if broker.ID < 0 {
+			host, port, err := splitHostPortNumber(addr.String())
+			if err != nil {
+				return nil, err
+			}
+			broker.Host = host
+			broker.Port = port
+		}
+
+		ipAddrs, err := rslv.LookupBrokerIPAddr(ctx, broker)
+		if err != nil {
+			return nil, err
+		}
+
+		for _, ipAddr := range ipAddrs {
+			network := addr.Network()
+			address := net.JoinHostPort(ipAddr.String(), strconv.Itoa(broker.Port))
+
+			if c = g.grabConnTo(network, address); c != nil {
+				break
+			}
+		}
+	}
+
+	if c == nil {
+		connChan := make(chan *conn)
+		errChan := make(chan error)
+
+		go func() {
+			c, err := g.connect(ctx, addr)
+			if err != nil {
+				select {
+				case errChan <- err:
+				case <-ctx.Done():
+				}
+			} else {
+				select {
+				case connChan <- c:
+				case <-ctx.Done():
+					if !g.releaseConn(c) {
+						c.close()
+					}
+				}
+			}
+		}()
+
+		select {
+		case c = <-connChan:
+		case err := <-errChan:
+			return nil, err
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
+	}
+
+	return c, nil
+}
+
+func (g *connGroup) grabConnTo(network, address string) *conn {
+	g.mutex.Lock()
+	defer g.mutex.Unlock()
+
+	for i := len(g.idleConns) - 1; i >= 0; i-- {
+		c := g.idleConns[i]
+
+		if c.network == network && c.address == address {
+			copy(g.idleConns[i:], g.idleConns[i+1:])
+			n := len(g.idleConns) - 1
+			g.idleConns[n] = nil
+			g.idleConns = g.idleConns[:n]
+
+			if c.timer != nil {
+				c.timer.Stop()
+			}
+
+			return c
+		}
+	}
+
+	return nil
+}
+
+func (g *connGroup) grabConn() *conn {
+	g.mutex.Lock()
+	defer g.mutex.Unlock()
+
+	if len(g.idleConns) == 0 {
+		return nil
+	}
+
+	n := len(g.idleConns) - 1
+	c := g.idleConns[n]
+	g.idleConns[n] = nil
+	g.idleConns = g.idleConns[:n]
+
+	if c.timer != nil {
+		c.timer.Stop()
+	}
+
+	return c
+}
+
+func (g *connGroup) removeConn(c *conn) bool {
+	g.mutex.Lock()
+	defer g.mutex.Unlock()
+
+	if c.timer != nil {
+		c.timer.Stop()
+	}
+
+	for i, x := range g.idleConns {
+		if x == c {
+			copy(g.idleConns[i:], g.idleConns[i+1:])
+			n := len(g.idleConns) - 1
+			g.idleConns[n] = nil
+			g.idleConns = g.idleConns[:n]
+			return true
+		}
+	}
+
+	return false
+}
+
+func (g *connGroup) releaseConn(c *conn) bool {
+	idleTimeout := g.pool.idleTimeout
+
+	g.mutex.Lock()
+	defer g.mutex.Unlock()
+
+	if g.closed {
+		return false
+	}
+
+	if c.timer != nil {
+		c.timer.Reset(idleTimeout)
+	} else {
+		c.timer = time.AfterFunc(idleTimeout, func() {
+			if g.removeConn(c) {
+				c.close()
+			}
+		})
+	}
+
+	g.idleConns = append(g.idleConns, c)
+	return true
+}
+
+func (g *connGroup) connect(ctx context.Context, addr net.Addr) (*conn, error) {
+	deadline := time.Now().Add(g.pool.dialTimeout)
+
+	ctx, cancel := context.WithDeadline(ctx, deadline)
+	defer cancel()
+
+	network := strings.Split(addr.Network(), ",")
+	address := strings.Split(addr.String(), ",")
+	var netConn net.Conn
+	var netAddr net.Addr
+	var err error
+
+	if len(address) > 1 {
+		// Shuffle the list of addresses to randomize the order in which
+		// connections are attempted. This prevents routing all connections
+		// to the first broker (which will usually succeed).
+		rand.Shuffle(len(address), func(i, j int) {
+			network[i], network[j] = network[j], network[i]
+			address[i], address[j] = address[j], address[i]
+		})
+	}
+
+	for i := range address {
+		netConn, err = g.pool.dial(ctx, network[i], address[i])
+		if err == nil {
+			netAddr = &networkAddress{
+				network: network[i],
+				address: address[i],
+			}
+			break
+		}
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if netConn != nil {
+			netConn.Close()
+		}
+	}()
+
+	if tlsConfig := g.pool.tls; tlsConfig != nil {
+		if tlsConfig.ServerName == "" {
+			host, _ := splitHostPort(netAddr.String())
+			tlsConfig = tlsConfig.Clone()
+			tlsConfig.ServerName = host
+		}
+		netConn = tls.Client(netConn, tlsConfig)
+	}
+
+	pc := protocol.NewConn(netConn, g.pool.clientID)
+	pc.SetDeadline(deadline)
+
+	r, err := pc.RoundTrip(new(apiversions.Request))
+	if err != nil {
+		return nil, err
+	}
+	res := r.(*apiversions.Response)
+	ver := make(map[protocol.ApiKey]int16, len(res.ApiKeys))
+
+	if res.ErrorCode != 0 {
+		return nil, fmt.Errorf("negotating API versions with kafka broker at %s: %w", g.addr, Error(res.ErrorCode))
+	}
+
+	for _, r := range res.ApiKeys {
+		apiKey := protocol.ApiKey(r.ApiKey)
+		ver[apiKey] = apiKey.SelectVersion(r.MinVersion, r.MaxVersion)
+	}
+
+	pc.SetVersions(ver)
+	pc.SetDeadline(time.Time{})
+
+	if g.pool.sasl != nil {
+		host, port, err := splitHostPortNumber(netAddr.String())
+		if err != nil {
+			return nil, err
+		}
+		metadata := &sasl.Metadata{
+			Host: host,
+			Port: port,
+		}
+		if err := authenticateSASL(sasl.WithMetadata(ctx, metadata), pc, g.pool.sasl); err != nil {
+			return nil, err
+		}
+	}
+
+	reqs := make(chan connRequest)
+	c := &conn{
+		network: netAddr.Network(),
+		address: netAddr.String(),
+		reqs:    reqs,
+		group:   g,
+	}
+	go c.run(pc, reqs)
+
+	netConn = nil
+	return c, nil
+}
+
+type conn struct {
+	reqs    chan<- connRequest
+	network string
+	address string
+	once    sync.Once
+	group   *connGroup
+	timer   *time.Timer
+}
+
+func (c *conn) close() {
+	c.once.Do(func() { close(c.reqs) })
+}
+
+func (c *conn) run(pc *protocol.Conn, reqs <-chan connRequest) {
+	defer pc.Close()
+
+	for cr := range reqs {
+		r, err := c.roundTrip(cr.ctx, pc, cr.req)
+		if err != nil {
+			cr.res.reject(err)
+			if !errors.Is(err, protocol.ErrNoRecord) {
+				break
+			}
+		} else {
+			cr.res.resolve(r)
+		}
+		if !c.group.releaseConn(c) {
+			break
+		}
+	}
+}
+
+func (c *conn) roundTrip(ctx context.Context, pc *protocol.Conn, req Request) (Response, error) {
+	pprof.SetGoroutineLabels(ctx)
+	defer pprof.SetGoroutineLabels(context.Background())
+
+	if deadline, hasDeadline := ctx.Deadline(); hasDeadline {
+		pc.SetDeadline(deadline)
+		defer pc.SetDeadline(time.Time{})
+	}
+
+	return pc.RoundTrip(req)
+}
+
+// authenticateSASL performs all of the required requests to authenticate this
+// connection.  If any step fails, this function returns with an error.  A nil
+// error indicates successful authentication.
+func authenticateSASL(ctx context.Context, pc *protocol.Conn, mechanism sasl.Mechanism) error {
+	if err := saslHandshakeRoundTrip(pc, mechanism.Name()); err != nil {
+		return err
+	}
+
+	sess, state, err := mechanism.Start(ctx)
+	if err != nil {
+		return err
+	}
+
+	for completed := false; !completed; {
+		challenge, err := saslAuthenticateRoundTrip(pc, state)
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				// the broker may communicate a failed exchange by closing the
+				// connection (esp. in the case where we're passing opaque sasl
+				// data over the wire since there's no protocol info).
+				return SASLAuthenticationFailed
+			}
+
+			return err
+		}
+
+		completed, state, err = sess.Next(ctx, challenge)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// saslHandshake sends the SASL handshake message.  This will determine whether
+// the Mechanism is supported by the cluster.  If it's not, this function will
+// error out with UnsupportedSASLMechanism.
+//
+// If the mechanism is unsupported, the handshake request will reply with the
+// list of the cluster's configured mechanisms, which could potentially be used
+// to facilitate negotiation.  At the moment, we are not negotiating the
+// mechanism as we believe that brokers are usually known to the client, and
+// therefore the client should already know which mechanisms are supported.
+//
+// See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake
+func saslHandshakeRoundTrip(pc *protocol.Conn, mechanism string) error {
+	msg, err := pc.RoundTrip(&saslhandshake.Request{
+		Mechanism: mechanism,
+	})
+	if err != nil {
+		return err
+	}
+	res := msg.(*saslhandshake.Response)
+	if res.ErrorCode != 0 {
+		err = Error(res.ErrorCode)
+	}
+	return err
+}
+
+// saslAuthenticate sends the SASL authenticate message.  This function must
+// be immediately preceded by a successful saslHandshake.
+//
+// See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate
+func saslAuthenticateRoundTrip(pc *protocol.Conn, data []byte) ([]byte, error) {
+	msg, err := pc.RoundTrip(&saslauthenticate.Request{
+		AuthBytes: data,
+	})
+	if err != nil {
+		return nil, err
+	}
+	res := msg.(*saslauthenticate.Response)
+	if res.ErrorCode != 0 {
+		err = makeError(res.ErrorCode, res.ErrorMessage)
+	}
+	return res.AuthBytes, err
+}
+
+var _ RoundTripper = (*Transport)(nil)
diff -pruN 0.2.1-1.1/transport_test.go 0.4.49+ds1-1/transport_test.go
--- 0.2.1-1.1/transport_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/transport_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,306 @@
+package kafka
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol"
+	"github.com/segmentio/kafka-go/protocol/createtopics"
+	meta "github.com/segmentio/kafka-go/protocol/metadata"
+)
+
+func TestIssue477(t *testing.T) {
+	// This test verifies that a connection attempt with a minimal TLS
+	// configuration does not panic.
+	l, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer l.Close()
+
+	cg := connGroup{
+		addr: l.Addr(),
+		pool: &connPool{
+			dial: defaultDialer.DialContext,
+			tls:  &tls.Config{},
+		},
+	}
+
+	if _, err := cg.connect(context.Background(), cg.addr); err != nil {
+		// An error is expected here because we are not actually establishing
+		// a TLS connection to a kafka broker.
+		t.Log(err)
+	} else {
+		t.Error("no error was reported when attempting to establish a TLS connection to a non-TLS endpoint")
+	}
+}
+
+func TestIssue672(t *testing.T) {
+	// ensure the test times out if the bug is re-introduced
+	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+	defer cancel()
+
+	// we'll simulate a situation with one good topic and one bad topic (bad configuration)
+	const brokenTopicName = "bad-topic"
+	const okTopicName = "good-topic"
+
+	// make the connection pool think it's immediately ready to send
+	ready := make(chan struct{})
+	close(ready)
+
+	// allow the system to wake as much as it wants
+	wake := make(chan event)
+	defer close(wake)
+	go func() {
+		for {
+			select {
+			case <-ctx.Done():
+				return
+			case e := <-wake:
+				if e == nil {
+					return
+				}
+				e.trigger()
+			}
+		}
+	}()
+
+	// handle requests by immediately resolving them with a create topics response,
+	// the "bad topic" will have an error value
+	requests := make(chan connRequest, 1)
+	defer close(requests)
+	go func() {
+		request := <-requests
+		request.res.resolve(&createtopics.Response{
+			ThrottleTimeMs: 0,
+			Topics: []createtopics.ResponseTopic{
+				{
+					Name:         brokenTopicName,
+					ErrorCode:    int16(InvalidPartitionNumber),
+					ErrorMessage: InvalidPartitionNumber.Description(),
+				},
+				{
+					Name:              okTopicName,
+					NumPartitions:     1,
+					ReplicationFactor: 1,
+				},
+			},
+		})
+	}()
+
+	pool := &connPool{
+		ready: ready,
+		wake:  wake,
+		conns: map[int32]*connGroup{},
+	}
+
+	// configure the state so it can find the good topic, but not the one that fails to create
+	pool.setState(connPoolState{
+		layout: protocol.Cluster{
+			Topics: map[string]protocol.Topic{
+				okTopicName: {
+					Name: okTopicName,
+					Partitions: map[int32]protocol.Partition{
+						0: {},
+					},
+				},
+			},
+		},
+	})
+
+	// trick the connection pool into thinking it has a valid connection to a broker
+	pool.conns[0] = &connGroup{
+		pool:   pool,
+		broker: Broker{},
+		idleConns: []*conn{
+			{
+				reqs: requests,
+			},
+		},
+	}
+
+	// perform the round trip:
+	// - if the issue is presenting this will hang waiting for metadata to arrive that will
+	//   never arrive, causing a deadline timeout.
+	// - if the issue is fixed this will resolve almost instantaneously
+	r, err := pool.roundTrip(ctx, &createtopics.Request{
+		Topics: []createtopics.RequestTopic{
+			{
+				Name:              brokenTopicName,
+				NumPartitions:     0,
+				ReplicationFactor: 1,
+			},
+			{
+				Name:              okTopicName,
+				NumPartitions:     1,
+				ReplicationFactor: 1,
+			},
+		},
+	})
+	// detect if the issue is presenting using the context timeout (note that checking the err return value
+	// isn't good enough as the original implementation didn't return the context cancellation error due to
+	// being run in a defer)
+	if errors.Is(ctx.Err(), context.DeadlineExceeded) {
+		t.Fatalf("issue 672 is presenting! roundTrip should not have timed out")
+	}
+
+	// ancillary assertions as general house-keeping, not directly related to the issue:
+
+	// we're not expecting any errors in this test
+	if err != nil {
+		t.Fatalf("unexpected error provoking connection pool roundTrip: %v", err)
+	}
+
+	// we expect a response containing the errors from the broker
+	if r == nil {
+		t.Fatal("expected a non-nil response")
+	}
+
+	// we expect to have the create topic response with created earlier
+	_, ok := r.(*createtopics.Response)
+	if !ok {
+		t.Fatalf("expected a createtopics.Response but got %T", r)
+	}
+}
+
+func TestIssue806(t *testing.T) {
+	// ensure the test times out if the bug is re-introduced
+	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+	defer cancel()
+
+	// simulate unknown topic want auto create with unknownTopicName,
+	const unknownTopicName = "unknown-topic"
+	const okTopicName = "good-topic"
+
+	// make the connection pool think it's immediately ready to send
+	ready := make(chan struct{})
+	close(ready)
+
+	// allow the system to wake as much as it wants
+	wake := make(chan event)
+	defer close(wake)
+	go func() {
+		for {
+			select {
+			case <-ctx.Done():
+				return
+			case e := <-wake:
+				if e == nil {
+					return
+				}
+				e.trigger()
+			}
+		}
+	}()
+
+	// handle requests by immediately resolving them with a create topics response,
+	// the "unknown topic" will have err UNKNOWN_TOPIC_OR_PARTITION
+	requests := make(chan connRequest, 1)
+	defer close(requests)
+	go func() {
+		request := <-requests
+		request.res.resolve(&meta.Response{
+			Topics: []meta.ResponseTopic{
+				{
+					Name:      unknownTopicName,
+					ErrorCode: int16(UnknownTopicOrPartition),
+				},
+				{
+					Name: okTopicName,
+					Partitions: []meta.ResponsePartition{
+						{
+							PartitionIndex: 0,
+						},
+					},
+				},
+			},
+		})
+	}()
+
+	pool := &connPool{
+		ready: ready,
+		wake:  wake,
+		conns: map[int32]*connGroup{},
+	}
+
+	// configure the state,
+	//
+	// set cached metadata only have good topic,
+	// so it need to request metadata,
+	// caused by unknown topic cannot find in cached metadata
+	//
+	// set layout only have good topic,
+	// so it can find the good topic, but not the one that fails to create
+	pool.setState(connPoolState{
+		metadata: &meta.Response{
+			Topics: []meta.ResponseTopic{
+				{
+					Name: okTopicName,
+					Partitions: []meta.ResponsePartition{
+						{
+							PartitionIndex: 0,
+						},
+					},
+				},
+			},
+		},
+		layout: protocol.Cluster{
+			Topics: map[string]protocol.Topic{
+				okTopicName: {
+					Name: okTopicName,
+					Partitions: map[int32]protocol.Partition{
+						0: {},
+					},
+				},
+			},
+		},
+	})
+
+	// trick the connection pool into thinking it has a valid connection to request metadata
+	pool.ctrl = &connGroup{
+		pool:   pool,
+		broker: Broker{},
+		idleConns: []*conn{
+			{
+				reqs: requests,
+			},
+		},
+	}
+
+	// perform the round trip:
+	// - if the issue is presenting this will hang waiting for metadata to arrive that will
+	//   never arrive, causing a deadline timeout.
+	// - if the issue is fixed this will resolve almost instantaneously
+	r, err := pool.roundTrip(ctx, &meta.Request{
+		TopicNames:             []string{unknownTopicName},
+		AllowAutoTopicCreation: true,
+	})
+	// detect if the issue is presenting using the context timeout (note that checking the err return value
+	// isn't good enough as the original implementation didn't return the context cancellation error due to
+	// being run in a defer)
+	if errors.Is(ctx.Err(), context.DeadlineExceeded) {
+		t.Fatalf("issue 806 is presenting! roundTrip should not have timed out")
+	}
+
+	// ancillary assertions as general house-keeping, not directly related to the issue:
+
+	// we're not expecting any errors in this test
+	if err != nil {
+		t.Fatalf("unexpected error provoking connection pool roundTrip: %v", err)
+	}
+
+	// we expect a response containing the errors from the broker
+	if r == nil {
+		t.Fatal("expected a non-nil response")
+	}
+
+	// we expect to have the create topic response with created earlier
+	_, ok := r.(*meta.Response)
+	if !ok {
+		t.Fatalf("expected a meta.Response but got %T", r)
+	}
+}
diff -pruN 0.2.1-1.1/txnoffsetcommit.go 0.4.49+ds1-1/txnoffsetcommit.go
--- 0.2.1-1.1/txnoffsetcommit.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/txnoffsetcommit.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,142 @@
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/segmentio/kafka-go/protocol/txnoffsetcommit"
+)
+
+// TxnOffsetCommitRequest represents a request sent to a kafka broker to commit
+// offsets for a partition within a transaction.
+type TxnOffsetCommitRequest struct {
+	// Address of the kafka broker to send the request to.
+	Addr net.Addr
+
+	// The transactional id key.
+	TransactionalID string
+
+	// ID of the consumer group to publish the offsets for.
+	GroupID string
+
+	// The Producer ID (PID) for the current producer session;
+	// received from an InitProducerID request.
+	ProducerID int
+
+	// The epoch associated with the current producer session for the given PID
+	ProducerEpoch int
+
+	// GenerationID is the current generation for the group.
+	GenerationID int
+
+	// ID of the group member submitting the offsets.
+	MemberID string
+
+	// GroupInstanceID is a unique identifier for the consumer.
+	GroupInstanceID string
+
+	// Set of topic partitions to publish the offsets for.
+	//
+	// Not that offset commits need to be submitted to the broker acting as the
+	// group coordinator. This will be automatically resolved by the transport.
+	Topics map[string][]TxnOffsetCommit
+}
+
+// TxnOffsetCommit represent the commit of an offset to a partition within a transaction.
+//
+// The extra metadata is opaque to the kafka protocol, it is intended to hold
+// information like an identifier for the process that committed the offset,
+// or the time at which the commit was made.
+type TxnOffsetCommit struct {
+	Partition int
+	Offset    int64
+	Metadata  string
+}
+
+// TxnOffsetFetchResponse represents a response from a kafka broker to an offset
+// commit request within a transaction.
+type TxnOffsetCommitResponse struct {
+	// The amount of time that the broker throttled the request.
+	Throttle time.Duration
+
+	// Set of topic partitions that the kafka broker has accepted offset commits
+	// for.
+	Topics map[string][]TxnOffsetCommitPartition
+}
+
+// TxnOffsetFetchPartition represents the state of a single partition in responses
+// to committing offsets within a  transaction.
+type TxnOffsetCommitPartition struct {
+	// ID of the partition.
+	Partition int
+
+	// An error that may have occurred while attempting to publish consumer
+	// group offsets for this partition.
+	//
+	// The error contains both the kafka error code, and an error message
+	// returned by the kafka broker. Programs may use the standard errors.Is
+	// function to test the error against kafka error codes.
+	Error error
+}
+
+// TxnOffsetCommit sends an txn offset commit request to a kafka broker and returns the
+// response.
+func (c *Client) TxnOffsetCommit(
+	ctx context.Context,
+	req *TxnOffsetCommitRequest,
+) (*TxnOffsetCommitResponse, error) {
+	protoReq := &txnoffsetcommit.Request{
+		TransactionalID: req.TransactionalID,
+		GroupID:         req.GroupID,
+		ProducerID:      int64(req.ProducerID),
+		ProducerEpoch:   int16(req.ProducerEpoch),
+		GenerationID:    int32(req.GenerationID),
+		MemberID:        req.MemberID,
+		GroupInstanceID: req.GroupInstanceID,
+		Topics:          make([]txnoffsetcommit.RequestTopic, 0, len(req.Topics)),
+	}
+
+	for topic, partitions := range req.Topics {
+		parts := make([]txnoffsetcommit.RequestPartition, len(partitions))
+		for i, partition := range partitions {
+			parts[i] = txnoffsetcommit.RequestPartition{
+				Partition:         int32(partition.Partition),
+				CommittedOffset:   int64(partition.Offset),
+				CommittedMetadata: partition.Metadata,
+			}
+		}
+		t := txnoffsetcommit.RequestTopic{
+			Name:       topic,
+			Partitions: parts,
+		}
+
+		protoReq.Topics = append(protoReq.Topics, t)
+	}
+
+	m, err := c.roundTrip(ctx, req.Addr, protoReq)
+	if err != nil {
+		return nil, fmt.Errorf("kafka.(*Client).TxnOffsetCommit: %w", err)
+	}
+
+	r := m.(*txnoffsetcommit.Response)
+
+	res := &TxnOffsetCommitResponse{
+		Throttle: makeDuration(r.ThrottleTimeMs),
+		Topics:   make(map[string][]TxnOffsetCommitPartition, len(r.Topics)),
+	}
+
+	for _, topic := range r.Topics {
+		partitions := make([]TxnOffsetCommitPartition, 0, len(topic.Partitions))
+		for _, partition := range topic.Partitions {
+			partitions = append(partitions, TxnOffsetCommitPartition{
+				Partition: int(partition.Partition),
+				Error:     makeError(partition.ErrorCode, ""),
+			})
+		}
+		res.Topics[topic.Name] = partitions
+	}
+
+	return res, nil
+}
diff -pruN 0.2.1-1.1/txnoffsetcommit_test.go 0.4.49+ds1-1/txnoffsetcommit_test.go
--- 0.2.1-1.1/txnoffsetcommit_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/txnoffsetcommit_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,239 @@
+package kafka
+
+import (
+	"context"
+	"log"
+	"os"
+	"strconv"
+	"testing"
+	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
+)
+
+func TestClientTxnOffsetCommit(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		t.Skip("Skipping test because kafka version is not high enough.")
+	}
+
+	// TODO: look into why this test fails on Kafka 3.0.0 and higher when transactional support
+	// work is revisited.
+	if ktesting.KafkaIsAtLeast("3.0.0") {
+		t.Skip("Skipping test because it fails on Kafka version 3.0.0 or higher.")
+	}
+
+	transactionalID := makeTransactionalID()
+	topic := makeTopic()
+
+	client, shutdown := newLocalClientWithTopic(topic, 1)
+	defer shutdown()
+	waitForTopic(context.TODO(), t, topic)
+	defer deleteTopic(t, topic)
+
+	now := time.Now()
+
+	const N = 10
+	records := make([]Record, 0, N)
+	for i := 0; i < N; i++ {
+		records = append(records, Record{
+			Time:  now,
+			Value: NewBytes([]byte("test-message-" + strconv.Itoa(i))),
+		})
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	res, err := client.Produce(ctx, &ProduceRequest{
+		Topic:        topic,
+		RequiredAcks: RequireAll,
+		Records:      NewRecordReader(records...),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res.Error != nil {
+		t.Error(res.Error)
+	}
+
+	for index, err := range res.RecordErrors {
+		t.Fatalf("record at index %d produced an error: %v", index, err)
+	}
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	respc, err := waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     transactionalID,
+		KeyType: CoordinatorKeyTypeTransaction,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if respc.Error != nil {
+		t.Fatal(respc.Error)
+	}
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	respc, err = waitForCoordinatorIndefinitely(ctx, client, &FindCoordinatorRequest{
+		Addr:    client.Addr,
+		Key:     transactionalID,
+		KeyType: CoordinatorKeyTypeConsumer,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if respc.Error != nil {
+		t.Fatal(respc.Error)
+	}
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	ipResp, err := client.InitProducerID(ctx, &InitProducerIDRequest{
+		TransactionalID:      transactionalID,
+		TransactionTimeoutMs: 10000,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ipResp.Error != nil {
+		t.Fatal(ipResp.Error)
+	}
+
+	groupID := makeGroupID()
+
+	group, err := NewConsumerGroup(ConsumerGroupConfig{
+		ID:                groupID,
+		Topics:            []string{topic},
+		Brokers:           []string{"localhost:9092"},
+		HeartbeatInterval: 2 * time.Second,
+		RebalanceTimeout:  2 * time.Second,
+		RetentionTime:     time.Hour,
+		Logger:            log.New(os.Stdout, "cg-test: ", 0),
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer group.Close()
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	gen, err := group.Next(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	apresp, err := client.AddPartitionsToTxn(ctx, &AddPartitionsToTxnRequest{
+		TransactionalID: transactionalID,
+		ProducerID:      ipResp.Producer.ProducerID,
+		ProducerEpoch:   ipResp.Producer.ProducerEpoch,
+		Topics: map[string][]AddPartitionToTxn{
+			topic: {
+				{
+					Partition: 0,
+				},
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	appartition := apresp.Topics[topic]
+	if len(appartition) != 1 {
+		t.Fatalf("unexpected partition count; expected: 1, got: %d", len(appartition))
+	}
+
+	for _, partition := range appartition {
+		if partition.Error != nil {
+			t.Fatal(partition.Error)
+		}
+	}
+
+	client.AddOffsetsToTxn(ctx, &AddOffsetsToTxnRequest{
+		TransactionalID: transactionalID,
+		ProducerID:      ipResp.Producer.ProducerID,
+		ProducerEpoch:   ipResp.Producer.ProducerEpoch,
+		GroupID:         groupID,
+	})
+
+	ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
+	defer cancel()
+	resp, err := client.TxnOffsetCommit(ctx, &TxnOffsetCommitRequest{
+		TransactionalID: transactionalID,
+		GroupID:         groupID,
+		MemberID:        gen.MemberID,
+		ProducerID:      ipResp.Producer.ProducerID,
+		ProducerEpoch:   ipResp.Producer.ProducerEpoch,
+		GenerationID:    int(gen.ID),
+		GroupInstanceID: groupID,
+		Topics: map[string][]TxnOffsetCommit{
+			topic: {
+				{
+					Partition: 0,
+					Offset:    10,
+				},
+			},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	partitions := resp.Topics[topic]
+
+	if len(partitions) != 1 {
+		t.Fatalf("unexpected partition count; expected: 1, got: %d", len(partitions))
+	}
+
+	for _, partition := range partitions {
+		if partition.Error != nil {
+			t.Fatal(partition.Error)
+		}
+	}
+
+	err = clientEndTxn(client, &EndTxnRequest{
+		TransactionalID: transactionalID,
+		ProducerID:      ipResp.Producer.ProducerID,
+		ProducerEpoch:   ipResp.Producer.ProducerEpoch,
+		Committed:       true,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// seems like external visibility of the commit isn't
+	// synchronous with the EndTxn request. This seems
+	// to give enough time for the commit to become consistently visible.
+	<-time.After(time.Second)
+
+	ofr, err := client.OffsetFetch(ctx, &OffsetFetchRequest{
+		GroupID: groupID,
+		Topics:  map[string][]int{topic: {0}},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ofr.Error != nil {
+		t.Error(ofr.Error)
+	}
+
+	fetresps := ofr.Topics[topic]
+	if len(fetresps) != 1 {
+		t.Fatalf("unexpected 1 offsetfetchpartition responses; got %d", len(fetresps))
+	}
+
+	for _, r := range fetresps {
+		if r.Error != nil {
+			t.Fatal(r.Error)
+		}
+
+		if r.CommittedOffset != 10 {
+			t.Fatalf("expected committed offset to be 10; got: %v for partition: %v", r.CommittedOffset, r.Partition)
+		}
+	}
+}
diff -pruN 0.2.1-1.1/write.go 0.4.49+ds1-1/write.go
--- 0.2.1-1.1/write.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/write.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,121 +1,173 @@
 package kafka
 
 import (
-	"bufio"
 	"bytes"
 	"encoding/binary"
 	"fmt"
+	"hash/crc32"
+	"io"
 	"time"
 )
 
-type writable interface {
-	writeTo(*bufio.Writer)
+type writeBuffer struct {
+	w io.Writer
+	b [16]byte
+}
+
+func (wb *writeBuffer) writeInt8(i int8) {
+	wb.b[0] = byte(i)
+	wb.Write(wb.b[:1])
+}
+
+func (wb *writeBuffer) writeInt16(i int16) {
+	binary.BigEndian.PutUint16(wb.b[:2], uint16(i))
+	wb.Write(wb.b[:2])
 }
 
-func writeInt8(w *bufio.Writer, i int8) {
-	w.WriteByte(byte(i))
+func (wb *writeBuffer) writeInt32(i int32) {
+	binary.BigEndian.PutUint32(wb.b[:4], uint32(i))
+	wb.Write(wb.b[:4])
 }
 
-func writeInt16(w *bufio.Writer, i int16) {
-	var b [2]byte
-	binary.BigEndian.PutUint16(b[:], uint16(i))
-	w.WriteByte(b[0])
-	w.WriteByte(b[1])
+func (wb *writeBuffer) writeInt64(i int64) {
+	binary.BigEndian.PutUint64(wb.b[:8], uint64(i))
+	wb.Write(wb.b[:8])
+}
+
+func (wb *writeBuffer) writeVarInt(i int64) {
+	u := uint64((i << 1) ^ (i >> 63))
+	n := 0
+
+	for u >= 0x80 && n < len(wb.b) {
+		wb.b[n] = byte(u) | 0x80
+		u >>= 7
+		n++
+	}
+
+	if n < len(wb.b) {
+		wb.b[n] = byte(u)
+		n++
+	}
+
+	wb.Write(wb.b[:n])
 }
 
-func writeInt32(w *bufio.Writer, i int32) {
-	var b [4]byte
-	binary.BigEndian.PutUint32(b[:], uint32(i))
-	w.WriteByte(b[0])
-	w.WriteByte(b[1])
-	w.WriteByte(b[2])
-	w.WriteByte(b[3])
+func (wb *writeBuffer) writeString(s string) {
+	wb.writeInt16(int16(len(s)))
+	wb.WriteString(s)
 }
 
-func writeInt64(w *bufio.Writer, i int64) {
-	var b [8]byte
-	binary.BigEndian.PutUint64(b[:], uint64(i))
-	w.WriteByte(b[0])
-	w.WriteByte(b[1])
-	w.WriteByte(b[2])
-	w.WriteByte(b[3])
-	w.WriteByte(b[4])
-	w.WriteByte(b[5])
-	w.WriteByte(b[6])
-	w.WriteByte(b[7])
+func (wb *writeBuffer) writeVarString(s string) {
+	wb.writeVarInt(int64(len(s)))
+	wb.WriteString(s)
 }
 
-func writeString(w *bufio.Writer, s string) {
-	writeInt16(w, int16(len(s)))
-	w.WriteString(s)
+func (wb *writeBuffer) writeNullableString(s *string) {
+	if s == nil {
+		wb.writeInt16(-1)
+	} else {
+		wb.writeString(*s)
+	}
 }
 
-func writeBytes(w *bufio.Writer, b []byte) {
+func (wb *writeBuffer) writeBytes(b []byte) {
 	n := len(b)
 	if b == nil {
 		n = -1
 	}
-	writeInt32(w, int32(n))
-	w.Write(b)
+	wb.writeInt32(int32(n))
+	wb.Write(b)
+}
+
+func (wb *writeBuffer) writeVarBytes(b []byte) {
+	if b != nil {
+		wb.writeVarInt(int64(len(b)))
+		wb.Write(b)
+	} else {
+		//-1 is used to indicate nil key
+		wb.writeVarInt(-1)
+	}
 }
 
-func writeBool(w *bufio.Writer, b bool) {
+func (wb *writeBuffer) writeBool(b bool) {
 	v := int8(0)
 	if b {
 		v = 1
 	}
-	writeInt8(w, v)
+	wb.writeInt8(v)
+}
+
+func (wb *writeBuffer) writeArrayLen(n int) {
+	wb.writeInt32(int32(n))
 }
 
-func writeArrayLen(w *bufio.Writer, n int) {
-	writeInt32(w, int32(n))
+func (wb *writeBuffer) writeArray(n int, f func(int)) {
+	wb.writeArrayLen(n)
+	for i := 0; i < n; i++ {
+		f(i)
+	}
 }
 
-func writeArray(w *bufio.Writer, n int, f func(int)) {
-	writeArrayLen(w, n)
-	for i := 0; i != n; i++ {
+func (wb *writeBuffer) writeVarArray(n int, f func(int)) {
+	wb.writeVarInt(int64(n))
+	for i := 0; i < n; i++ {
 		f(i)
 	}
 }
 
-func writeStringArray(w *bufio.Writer, a []string) {
-	writeArray(w, len(a), func(i int) { writeString(w, a[i]) })
+func (wb *writeBuffer) writeStringArray(a []string) {
+	wb.writeArray(len(a), func(i int) { wb.writeString(a[i]) })
 }
 
-func writeInt32Array(w *bufio.Writer, a []int32) {
-	writeArray(w, len(a), func(i int) { writeInt32(w, a[i]) })
+func (wb *writeBuffer) writeInt32Array(a []int32) {
+	wb.writeArray(len(a), func(i int) { wb.writeInt32(a[i]) })
 }
 
-func write(w *bufio.Writer, a interface{}) {
+func (wb *writeBuffer) write(a interface{}) {
 	switch v := a.(type) {
 	case int8:
-		writeInt8(w, v)
+		wb.writeInt8(v)
 	case int16:
-		writeInt16(w, v)
+		wb.writeInt16(v)
 	case int32:
-		writeInt32(w, v)
+		wb.writeInt32(v)
 	case int64:
-		writeInt64(w, v)
+		wb.writeInt64(v)
 	case string:
-		writeString(w, v)
+		wb.writeString(v)
 	case []byte:
-		writeBytes(w, v)
+		wb.writeBytes(v)
 	case bool:
-		writeBool(w, v)
+		wb.writeBool(v)
 	case writable:
-		v.writeTo(w)
+		v.writeTo(wb)
 	default:
 		panic(fmt.Sprintf("unsupported type: %T", a))
 	}
 }
 
-// The functions bellow are used as optimizations to avoid dynamic memory
-// allocations that occur when building the data structures representing the
-// kafka protocol requests.
+func (wb *writeBuffer) Write(b []byte) (int, error) {
+	return wb.w.Write(b)
+}
+
+func (wb *writeBuffer) WriteString(s string) (int, error) {
+	return io.WriteString(wb.w, s)
+}
+
+func (wb *writeBuffer) Flush() error {
+	if x, ok := wb.w.(interface{ Flush() error }); ok {
+		return x.Flush()
+	}
+	return nil
+}
 
-func writeFetchRequestV2(w *bufio.Writer, correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration) error {
+type writable interface {
+	writeTo(*writeBuffer)
+}
+
+func (wb *writeBuffer) writeFetchRequestV2(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration) error {
 	h := requestHeader{
-		ApiKey:        int16(fetchRequest),
+		ApiKey:        int16(fetch),
 		ApiVersion:    int16(v2),
 		CorrelationID: correlationID,
 		ClientID:      clientID,
@@ -131,27 +183,121 @@ func writeFetchRequestV2(w *bufio.Writer
 		8 + // offset
 		4 // max bytes
 
-	h.writeTo(w)
-	writeInt32(w, -1) // replica ID
-	writeInt32(w, milliseconds(maxWait))
-	writeInt32(w, int32(minBytes))
+	h.writeTo(wb)
+	wb.writeInt32(-1) // replica ID
+	wb.writeInt32(milliseconds(maxWait))
+	wb.writeInt32(int32(minBytes))
 
 	// topic array
-	writeArrayLen(w, 1)
-	writeString(w, topic)
+	wb.writeArrayLen(1)
+	wb.writeString(topic)
 
 	// partition array
-	writeArrayLen(w, 1)
-	writeInt32(w, partition)
-	writeInt64(w, offset)
-	writeInt32(w, int32(maxBytes))
+	wb.writeArrayLen(1)
+	wb.writeInt32(partition)
+	wb.writeInt64(offset)
+	wb.writeInt32(int32(maxBytes))
 
-	return w.Flush()
+	return wb.Flush()
 }
 
-func writeListOffsetRequestV1(w *bufio.Writer, correlationID int32, clientID, topic string, partition int32, time int64) error {
+func (wb *writeBuffer) writeFetchRequestV5(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration, isolationLevel int8) error {
 	h := requestHeader{
-		ApiKey:        int16(listOffsetRequest),
+		ApiKey:        int16(fetch),
+		ApiVersion:    int16(v5),
+		CorrelationID: correlationID,
+		ClientID:      clientID,
+	}
+	h.Size = (h.size() - 4) +
+		4 + // replica ID
+		4 + // max wait time
+		4 + // min bytes
+		4 + // max bytes
+		1 + // isolation level
+		4 + // topic array length
+		sizeofString(topic) +
+		4 + // partition array length
+		4 + // partition
+		8 + // offset
+		8 + // log start offset
+		4 // max bytes
+
+	h.writeTo(wb)
+	wb.writeInt32(-1) // replica ID
+	wb.writeInt32(milliseconds(maxWait))
+	wb.writeInt32(int32(minBytes))
+	wb.writeInt32(int32(maxBytes))
+	wb.writeInt8(isolationLevel) // isolation level 0 - read uncommitted
+
+	// topic array
+	wb.writeArrayLen(1)
+	wb.writeString(topic)
+
+	// partition array
+	wb.writeArrayLen(1)
+	wb.writeInt32(partition)
+	wb.writeInt64(offset)
+	wb.writeInt64(int64(0)) // log start offset only used when is sent by follower
+	wb.writeInt32(int32(maxBytes))
+
+	return wb.Flush()
+}
+
+func (wb *writeBuffer) writeFetchRequestV10(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration, isolationLevel int8) error {
+	h := requestHeader{
+		ApiKey:        int16(fetch),
+		ApiVersion:    int16(v10),
+		CorrelationID: correlationID,
+		ClientID:      clientID,
+	}
+	h.Size = (h.size() - 4) +
+		4 + // replica ID
+		4 + // max wait time
+		4 + // min bytes
+		4 + // max bytes
+		1 + // isolation level
+		4 + // session ID
+		4 + // session epoch
+		4 + // topic array length
+		sizeofString(topic) +
+		4 + // partition array length
+		4 + // partition
+		4 + // current leader epoch
+		8 + // fetch offset
+		8 + // log start offset
+		4 + // partition max bytes
+		4 // forgotten topics data
+
+	h.writeTo(wb)
+	wb.writeInt32(-1) // replica ID
+	wb.writeInt32(milliseconds(maxWait))
+	wb.writeInt32(int32(minBytes))
+	wb.writeInt32(int32(maxBytes))
+	wb.writeInt8(isolationLevel) // isolation level 0 - read uncommitted
+	wb.writeInt32(0)             //FIXME
+	wb.writeInt32(-1)            //FIXME
+
+	// topic array
+	wb.writeArrayLen(1)
+	wb.writeString(topic)
+
+	// partition array
+	wb.writeArrayLen(1)
+	wb.writeInt32(partition)
+	wb.writeInt32(-1) //FIXME
+	wb.writeInt64(offset)
+	wb.writeInt64(int64(0)) // log start offset only used when is sent by follower
+	wb.writeInt32(int32(maxBytes))
+
+	// forgotten topics array
+	wb.writeArrayLen(0) // forgotten topics not supported yet
+
+	return wb.Flush()
+}
+
+func (wb *writeBuffer) writeListOffsetRequestV1(correlationID int32, clientID, topic string, partition int32, time int64) error {
+	h := requestHeader{
+		ApiKey:        int16(listOffsets),
 		ApiVersion:    int16(v1),
 		CorrelationID: correlationID,
 		ClientID:      clientID,
@@ -164,48 +310,38 @@ func writeListOffsetRequestV1(w *bufio.W
 		4 + // partition
 		8 // time
 
-	h.writeTo(w)
-	writeInt32(w, -1) // replica ID
+	h.writeTo(wb)
+	wb.writeInt32(-1) // replica ID
 
 	// topic array
-	writeArrayLen(w, 1)
-	writeString(w, topic)
+	wb.writeArrayLen(1)
+	wb.writeString(topic)
 
 	// partition array
-	writeArrayLen(w, 1)
-	writeInt32(w, partition)
-	writeInt64(w, time)
+	wb.writeArrayLen(1)
+	wb.writeInt32(partition)
+	wb.writeInt64(time)
 
-	return w.Flush()
+	return wb.Flush()
 }
 
-func writeProduceRequestV2(w *bufio.Writer, codec CompressionCodec, correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, msgs ...Message) error {
+func (wb *writeBuffer) writeProduceRequestV2(codec CompressionCodec, correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, msgs ...Message) (err error) {
 	var size int32
-	attributes := int8(CompressionNoneCode)
+	var attributes int8
+	var compressed *bytes.Buffer
 
-	// if compressing, replace the slice of messages with a single compressed
-	// message set.
-	if codec != nil {
-		var err error
-		if msgs, err = compress(codec, msgs...); err != nil {
-			return err
+	if codec == nil {
+		size = messageSetSize(msgs...)
+	} else {
+		compressed, attributes, size, err = compressMessageSet(codec, msgs...)
+		if err != nil {
+			return
 		}
-		attributes = codec.Code()
-	}
-
-	for _, msg := range msgs {
-		size += 8 + // offset
-			4 + // message size
-			4 + // crc
-			1 + // magic byte
-			1 + // attributes
-			8 + // timestamp
-			sizeofBytes(msg.Key) +
-			sizeofBytes(msg.Value)
+		msgs = []Message{{Value: compressed.Bytes()}}
 	}
 
 	h := requestHeader{
-		ApiKey:        int16(produceRequest),
+		ApiKey:        int16(produce),
 		ApiVersion:    int16(v2),
 		CorrelationID: correlationID,
 		ClientID:      clientID,
@@ -220,65 +356,241 @@ func writeProduceRequestV2(w *bufio.Writ
 		4 + // message set size
 		size
 
-	h.writeTo(w)
-	writeInt16(w, requiredAcks) // required acks
-	writeInt32(w, milliseconds(timeout))
+	h.writeTo(wb)
+	wb.writeInt16(requiredAcks) // required acks
+	wb.writeInt32(milliseconds(timeout))
 
 	// topic array
-	writeArrayLen(w, 1)
-	writeString(w, topic)
+	wb.writeArrayLen(1)
+	wb.writeString(topic)
 
 	// partition array
-	writeArrayLen(w, 1)
-	writeInt32(w, partition)
-	writeInt32(w, size)
+	wb.writeArrayLen(1)
+	wb.writeInt32(partition)
+
+	wb.writeInt32(size)
+	cw := &crc32Writer{table: crc32.IEEETable}
 
 	for _, msg := range msgs {
-		writeMessage(w, msg.Offset, attributes, msg.Time, msg.Key, msg.Value)
+		wb.writeMessage(msg.Offset, attributes, msg.Time, msg.Key, msg.Value, cw)
 	}
 
-	return w.Flush()
+	releaseBuffer(compressed)
+	return wb.Flush()
 }
 
-func compress(codec CompressionCodec, msgs ...Message) ([]Message, error) {
-	estimatedLen := 0
-	for _, msg := range msgs {
-		estimatedLen += int(msgSize(msg.Key, msg.Value))
+func (wb *writeBuffer) writeProduceRequestV3(correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, transactionalID *string, recordBatch *recordBatch) (err error) {
+
+	h := requestHeader{
+		ApiKey:        int16(produce),
+		ApiVersion:    int16(v3),
+		CorrelationID: correlationID,
+		ClientID:      clientID,
+	}
+
+	h.Size = (h.size() - 4) +
+		sizeofNullableString(transactionalID) +
+		2 + // required acks
+		4 + // timeout
+		4 + // topic array length
+		sizeofString(topic) + // topic
+		4 + // partition array length
+		4 + // partition
+		4 + // message set size
+		recordBatch.size
+
+	h.writeTo(wb)
+	wb.writeNullableString(transactionalID)
+	wb.writeInt16(requiredAcks) // required acks
+	wb.writeInt32(milliseconds(timeout))
+
+	// topic array
+	wb.writeArrayLen(1)
+	wb.writeString(topic)
+
+	// partition array
+	wb.writeArrayLen(1)
+	wb.writeInt32(partition)
+
+	recordBatch.writeTo(wb)
+
+	return wb.Flush()
+}
+
+func (wb *writeBuffer) writeProduceRequestV7(correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, transactionalID *string, recordBatch *recordBatch) (err error) {
+
+	h := requestHeader{
+		ApiKey:        int16(produce),
+		ApiVersion:    int16(v7),
+		CorrelationID: correlationID,
+		ClientID:      clientID,
 	}
-	buf := &bytes.Buffer{}
-	buf.Grow(estimatedLen)
-	bufWriter := bufio.NewWriter(buf)
+	h.Size = (h.size() - 4) +
+		sizeofNullableString(transactionalID) +
+		2 + // required acks
+		4 + // timeout
+		4 + // topic array length
+		sizeofString(topic) + // topic
+		4 + // partition array length
+		4 + // partition
+		4 + // message set size
+		recordBatch.size
+
+	h.writeTo(wb)
+	wb.writeNullableString(transactionalID)
+	wb.writeInt16(requiredAcks) // required acks
+	wb.writeInt32(milliseconds(timeout))
+
+	// topic array
+	wb.writeArrayLen(1)
+	wb.writeString(topic)
+
+	// partition array
+	wb.writeArrayLen(1)
+	wb.writeInt32(partition)
+
+	recordBatch.writeTo(wb)
+
+	return wb.Flush()
+}
+
+func (wb *writeBuffer) writeRecordBatch(attributes int16, size int32, count int, baseTime, lastTime time.Time, write func(*writeBuffer)) {
+	var (
+		baseTimestamp   = timestamp(baseTime)
+		lastTimestamp   = timestamp(lastTime)
+		lastOffsetDelta = int32(count - 1)
+		producerID      = int64(-1)    // default producer id for now
+		producerEpoch   = int16(-1)    // default producer epoch for now
+		baseSequence    = int32(-1)    // default base sequence
+		recordCount     = int32(count) // record count
+		writerBackup    = wb.w
+	)
+
+	// dry run to compute the checksum
+	cw := &crc32Writer{table: crc32.MakeTable(crc32.Castagnoli)}
+	wb.w = cw
+	cw.writeInt16(attributes) // attributes, timestamp type 0 - create time, not part of a transaction, no control messages
+	cw.writeInt32(lastOffsetDelta)
+	cw.writeInt64(baseTimestamp)
+	cw.writeInt64(lastTimestamp)
+	cw.writeInt64(producerID)
+	cw.writeInt16(producerEpoch)
+	cw.writeInt32(baseSequence)
+	cw.writeInt32(recordCount)
+	write(wb)
+	wb.w = writerBackup
+
+	// actual write to the output buffer
+	wb.writeInt64(int64(0))
+	wb.writeInt32(int32(size - 12)) // 12 = batch length + base offset sizes
+	wb.writeInt32(-1)               // partition leader epoch
+	wb.writeInt8(2)                 // magic byte
+	wb.writeInt32(int32(cw.crc32))
+
+	wb.writeInt16(attributes)
+	wb.writeInt32(lastOffsetDelta)
+	wb.writeInt64(baseTimestamp)
+	wb.writeInt64(lastTimestamp)
+	wb.writeInt64(producerID)
+	wb.writeInt16(producerEpoch)
+	wb.writeInt32(baseSequence)
+	wb.writeInt32(recordCount)
+	write(wb)
+}
+
+func compressMessageSet(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int8, size int32, err error) {
+	compressed = acquireBuffer()
+	compressor := codec.NewWriter(compressed)
+	wb := &writeBuffer{w: compressor}
+	cw := &crc32Writer{table: crc32.IEEETable}
+
 	for offset, msg := range msgs {
-		writeMessage(bufWriter, int64(offset), CompressionNoneCode, msg.Time, msg.Key, msg.Value)
+		wb.writeMessage(int64(offset), 0, msg.Time, msg.Key, msg.Value, cw)
 	}
-	bufWriter.Flush()
 
-	compressed, err := codec.Encode(buf.Bytes())
-	if err != nil {
-		return nil, err
+	if err = compressor.Close(); err != nil {
+		releaseBuffer(compressed)
+		return
 	}
 
-	return []Message{{Value: compressed}}, nil
+	attributes = codec.Code()
+	size = messageSetSize(Message{Value: compressed.Bytes()})
+	return
 }
 
-const magicByte = 1 // compatible with kafka 0.10.0.0+
+func (wb *writeBuffer) writeMessage(offset int64, attributes int8, time time.Time, key, value []byte, cw *crc32Writer) {
+	const magicByte = 1 // compatible with kafka 0.10.0.0+
 
-func writeMessage(w *bufio.Writer, offset int64, attributes int8, time time.Time, key, value []byte) {
 	timestamp := timestamp(time)
-	crc32 := crc32OfMessage(magicByte, attributes, timestamp, key, value)
-	size := msgSize(key, value)
+	size := messageSize(key, value)
+
+	// dry run to compute the checksum
+	cw.crc32 = 0
+	cw.writeInt8(magicByte)
+	cw.writeInt8(attributes)
+	cw.writeInt64(timestamp)
+	cw.writeBytes(key)
+	cw.writeBytes(value)
+
+	// actual write to the output buffer
+	wb.writeInt64(offset)
+	wb.writeInt32(size)
+	wb.writeInt32(int32(cw.crc32))
+	wb.writeInt8(magicByte)
+	wb.writeInt8(attributes)
+	wb.writeInt64(timestamp)
+	wb.writeBytes(key)
+	wb.writeBytes(value)
+}
+
+// Messages with magic >2 are called records. This method writes messages using message format 2.
+func (wb *writeBuffer) writeRecord(attributes int8, baseTime time.Time, offset int64, msg Message) {
+	timestampDelta := msg.Time.Sub(baseTime)
+	offsetDelta := int64(offset)
+
+	wb.writeVarInt(int64(recordSize(&msg, timestampDelta, offsetDelta)))
+	wb.writeInt8(attributes)
+	wb.writeVarInt(int64(milliseconds(timestampDelta)))
+	wb.writeVarInt(offsetDelta)
+
+	wb.writeVarBytes(msg.Key)
+	wb.writeVarBytes(msg.Value)
+	wb.writeVarArray(len(msg.Headers), func(i int) {
+		h := &msg.Headers[i]
+		wb.writeVarString(h.Key)
+		wb.writeVarBytes(h.Value)
+	})
+}
+
+func varIntLen(i int64) int {
+	u := uint64((i << 1) ^ (i >> 63)) // zig-zag encoding
+	n := 0
+
+	for u >= 0x80 {
+		u >>= 7
+		n++
+	}
 
-	writeInt64(w, offset)
-	writeInt32(w, size)
-	writeInt32(w, int32(crc32))
-	writeInt8(w, magicByte)
-	writeInt8(w, attributes)
-	writeInt64(w, timestamp)
-	writeBytes(w, key)
-	writeBytes(w, value)
+	return n + 1
 }
 
-func msgSize(key, value []byte) int32 {
+func varBytesLen(b []byte) int {
+	return varIntLen(int64(len(b))) + len(b)
+}
+
+func varStringLen(s string) int {
+	return varIntLen(int64(len(s))) + len(s)
+}
+
+func varArrayLen(n int, f func(int) int) int {
+	size := varIntLen(int64(n))
+	for i := 0; i < n; i++ {
+		size += f(i)
+	}
+	return size
+}
+
+func messageSize(key, value []byte) int32 {
 	return 4 + // crc
 		1 + // magic byte
 		1 + // attributes
@@ -286,3 +598,17 @@ func msgSize(key, value []byte) int32 {
 		sizeofBytes(key) +
 		sizeofBytes(value)
 }
+
+func messageSetSize(msgs ...Message) (size int32) {
+	for _, msg := range msgs {
+		size += 8 + // offset
+			4 + // message size
+			4 + // crc
+			1 + // magic byte
+			1 + // attributes
+			8 + // timestamp
+			sizeofBytes(msg.Key) +
+			sizeofBytes(msg.Value)
+	}
+	return
+}
diff -pruN 0.2.1-1.1/write_test.go 0.4.49+ds1-1/write_test.go
--- 0.2.1-1.1/write_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/write_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,10 +1,14 @@
 package kafka
 
 import (
-	"bufio"
 	"bytes"
+	"context"
+	"fmt"
+	"hash/crc32"
 	"testing"
 	"time"
+
+	ktesting "github.com/segmentio/kafka-go/testing"
 )
 
 const (
@@ -14,8 +18,35 @@ const (
 	testPartition     = 42
 )
 
+type WriteVarIntTestCase struct {
+	v  []byte
+	tc int64
+}
+
+func TestWriteVarInt(t *testing.T) {
+	testCases := []*WriteVarIntTestCase{
+		{v: []byte{0}, tc: 0},
+		{v: []byte{2}, tc: 1},
+		{v: []byte{1}, tc: -1},
+		{v: []byte{3}, tc: -2},
+		{v: []byte{128, 2}, tc: 128},
+		{v: []byte{254, 1}, tc: 127},
+		{v: []byte{142, 6}, tc: 391},
+		{v: []byte{142, 134, 6}, tc: 49543},
+	}
+
+	for _, tc := range testCases {
+		b := &bytes.Buffer{}
+		w := &writeBuffer{w: b}
+		w.writeVarInt(tc.tc)
+
+		if !bytes.Equal(b.Bytes(), tc.v) {
+			t.Errorf("Expected %v; got %v", tc.v, b.Bytes())
+		}
+	}
+}
+
 func TestWriteOptimizations(t *testing.T) {
-	t.Parallel()
 	t.Run("writeFetchRequestV2", testWriteFetchRequestV2)
 	t.Run("writeListOffsetRequestV1", testWriteListOffsetRequestV1)
 	t.Run("writeProduceRequestV2", testWriteProduceRequestV2)
@@ -28,7 +59,7 @@ func testWriteFetchRequestV2(t *testing.
 	const maxWait = 100 * time.Millisecond
 	testWriteOptimization(t,
 		requestHeader{
-			ApiKey:        int16(fetchRequest),
+			ApiKey:        int16(fetch),
 			ApiVersion:    int16(v2),
 			CorrelationID: testCorrelationID,
 			ClientID:      testClientID,
@@ -46,8 +77,8 @@ func testWriteFetchRequestV2(t *testing.
 				}},
 			}},
 		},
-		func(w *bufio.Writer) {
-			writeFetchRequestV2(w, testCorrelationID, testClientID, testTopic, testPartition, offset, minBytes, maxBytes, maxWait)
+		func(w *writeBuffer) {
+			w.writeFetchRequestV2(testCorrelationID, testClientID, testTopic, testPartition, offset, minBytes, maxBytes, maxWait)
 		},
 	)
 }
@@ -56,7 +87,7 @@ func testWriteListOffsetRequestV1(t *tes
 	const time = -1
 	testWriteOptimization(t,
 		requestHeader{
-			ApiKey:        int16(listOffsetRequest),
+			ApiKey:        int16(listOffsets),
 			ApiVersion:    int16(v1),
 			CorrelationID: testCorrelationID,
 			ClientID:      testClientID,
@@ -71,8 +102,8 @@ func testWriteListOffsetRequestV1(t *tes
 				}},
 			}},
 		},
-		func(w *bufio.Writer) {
-			writeListOffsetRequestV1(w, testCorrelationID, testClientID, testTopic, testPartition, time)
+		func(w *writeBuffer) {
+			w.writeListOffsetRequestV1(testCorrelationID, testClientID, testTopic, testPartition, time)
 		},
 	)
 }
@@ -91,12 +122,14 @@ func testWriteProduceRequestV2(t *testin
 		},
 	}
 	msg.MessageSize = msg.Message.size()
-	msg.Message.CRC = msg.Message.crc32()
+	msg.Message.CRC = msg.Message.crc32(&crc32Writer{
+		table: crc32.IEEETable,
+	})
 
 	const timeout = 100
 	testWriteOptimization(t,
 		requestHeader{
-			ApiKey:        int16(produceRequest),
+			ApiKey:        int16(produce),
 			ApiVersion:    int16(v2),
 			CorrelationID: testCorrelationID,
 			ClientID:      testClientID,
@@ -108,13 +141,12 @@ func testWriteProduceRequestV2(t *testin
 				TopicName: testTopic,
 				Partitions: []produceRequestPartitionV2{{
 					Partition:      testPartition,
-					MessageSetSize: msg.size(),
-					MessageSet:     messageSet{msg},
+					MessageSetSize: msg.size(), MessageSet: messageSet{msg},
 				}},
 			}},
 		},
-		func(w *bufio.Writer) {
-			writeProduceRequestV2(w, nil, testCorrelationID, testClientID, testTopic, testPartition, timeout*time.Millisecond, -1, Message{
+		func(w *writeBuffer) {
+			w.writeProduceRequestV2(nil, testCorrelationID, testClientID, testTopic, testPartition, timeout*time.Millisecond, -1, Message{
 				Offset: 10,
 				Key:    key,
 				Value:  val,
@@ -123,20 +155,18 @@ func testWriteProduceRequestV2(t *testin
 	)
 }
 
-func testWriteOptimization(t *testing.T, h requestHeader, r request, f func(*bufio.Writer)) {
+func testWriteOptimization(t *testing.T, h requestHeader, r request, f func(*writeBuffer)) {
 	b1 := &bytes.Buffer{}
-	w1 := bufio.NewWriter(b1)
+	w1 := &writeBuffer{w: b1}
 
 	b2 := &bytes.Buffer{}
-	w2 := bufio.NewWriter(b2)
+	w2 := &writeBuffer{w: b2}
 
 	h.Size = (h.size() + r.size()) - 4
 	h.writeTo(w1)
 	r.writeTo(w1)
-	w1.Flush()
 
 	f(w2)
-	w2.Flush()
 
 	c1 := b1.Bytes()
 	c2 := b2.Bytes()
@@ -160,3 +190,58 @@ func testWriteOptimization(t *testing.T,
 		}
 	}
 }
+
+func TestWriteV2RecordBatch(t *testing.T) {
+	if !ktesting.KafkaIsAtLeast("0.11.0") {
+		t.Skip("RecordBatch was added in kafka 0.11.0")
+		return
+	}
+
+	client, topic, shutdown := newLocalClientAndTopic()
+	defer shutdown()
+
+	msgs := make([]Message, 15)
+	for i := range msgs {
+		value := fmt.Sprintf("Sample message content: %d!", i)
+		msgs[i] = Message{Key: []byte("Key"), Value: []byte(value), Headers: []Header{{Key: "hk", Value: []byte("hv")}}}
+	}
+
+	w := &Writer{
+		Addr:         TCP("localhost:9092"),
+		Topic:        topic,
+		BatchTimeout: 100 * time.Millisecond,
+		BatchSize:    5,
+		Transport:    client.Transport,
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	if err := w.WriteMessages(ctx, msgs...); err != nil {
+		t.Errorf("Failed to write v2 messages to kafka: %v", err)
+		return
+	}
+	w.Close()
+
+	r := NewReader(ReaderConfig{
+		Brokers: []string{"localhost:9092"},
+		Topic:   topic,
+		MaxWait: 100 * time.Millisecond,
+	})
+	defer r.Close()
+
+	msg, err := r.ReadMessage(context.Background())
+	if err != nil {
+		t.Error("Failed to read message")
+		return
+	}
+
+	if string(msg.Key) != "Key" {
+		t.Error("Received message's key doesn't match")
+		return
+	}
+	if msg.Headers[0].Key != "hk" {
+		t.Error("Received message header's key doesn't match")
+		return
+	}
+}
diff -pruN 0.2.1-1.1/writer.go 0.4.49+ds1-1/writer.go
--- 0.2.1-1.1/writer.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/writer.go	2025-08-21 19:15:53.000000000 +0000
@@ -1,37 +1,232 @@
 package kafka
 
 import (
+	"bytes"
 	"context"
-	"fmt"
+	"errors"
 	"io"
-	"log"
-	"math/rand"
-	"sort"
+	"net"
 	"sync"
+	"sync/atomic"
 	"time"
+
+	metadataAPI "github.com/segmentio/kafka-go/protocol/metadata"
 )
 
 // The Writer type provides the implementation of a producer of kafka messages
 // that automatically distributes messages across partitions of a single topic
 // using a configurable balancing policy.
 //
-// Instances of Writer are safe to use concurrently from multiple goroutines.
+// Writes manage the dispatch of messages across partitions of the topic they
+// are configured to write to using a Balancer, and aggregate batches to
+// optimize the writes to kafka.
+//
+// Writers may be configured to be used synchronously or asynchronously. When
+// use synchronously, calls to WriteMessages block until the messages have been
+// written to kafka. In this mode, the program should inspect the error returned
+// by the function and test if it an instance of kafka.WriteErrors in order to
+// identify which messages have succeeded or failed, for example:
+//
+//		// Construct a synchronous writer (the default mode).
+//		w := &kafka.Writer{
+//			Addr:         kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+//			Topic:        "topic-A",
+//			RequiredAcks: kafka.RequireAll,
+//		}
+//
+//		...
+//
+//	 // Passing a context can prevent the operation from blocking indefinitely.
+//		switch err := w.WriteMessages(ctx, msgs...).(type) {
+//		case nil:
+//		case kafka.WriteErrors:
+//			for i := range msgs {
+//				if err[i] != nil {
+//					// handle the error writing msgs[i]
+//					...
+//				}
+//			}
+//		default:
+//			// handle other errors
+//			...
+//		}
+//
+// In asynchronous mode, the program may configure a completion handler on the
+// writer to receive notifications of messages being written to kafka:
+//
+//	w := &kafka.Writer{
+//		Addr:         kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
+//		Topic:        "topic-A",
+//		RequiredAcks: kafka.RequireAll,
+//		Async:        true, // make the writer asynchronous
+//		Completion: func(messages []kafka.Message, err error) {
+//			...
+//		},
+//	}
+//
+//	...
+//
+//	// Because the writer is asynchronous, there is no need for the context to
+//	// be cancelled, the call will never block.
+//	if err := w.WriteMessages(context.Background(), msgs...); err != nil {
+//		// Only validation errors would be reported in this case.
+//		...
+//	}
+//
+// Methods of Writer are safe to use concurrently from multiple goroutines,
+// however the writer configuration should not be modified after first use.
 type Writer struct {
-	config WriterConfig
+	// Address of the kafka cluster that this writer is configured to send
+	// messages to.
+	//
+	// This field is required, attempting to write messages to a writer with a
+	// nil address will error.
+	Addr net.Addr
 
-	mutex  sync.RWMutex
-	closed bool
+	// Topic is the name of the topic that the writer will produce messages to.
+	//
+	// Setting this field or not is a mutually exclusive option. If you set Topic
+	// here, you must not set Topic for any produced Message. Otherwise, if you	do
+	// not set Topic, every Message must have Topic specified.
+	Topic string
+
+	// The balancer used to distribute messages across partitions.
+	//
+	// The default is to use a round-robin distribution.
+	Balancer Balancer
+
+	// Limit on how many attempts will be made to deliver a message.
+	//
+	// The default is to try at most 10 times.
+	MaxAttempts int
+
+	// WriteBackoffMin optionally sets the smallest amount of time the writer waits before
+	// it attempts to write a batch of messages
+	//
+	// Default: 100ms
+	WriteBackoffMin time.Duration
+
+	// WriteBackoffMax optionally sets the maximum amount of time the writer waits before
+	// it attempts to write a batch of messages
+	//
+	// Default: 1s
+	WriteBackoffMax time.Duration
+
+	// Limit on how many messages will be buffered before being sent to a
+	// partition.
+	//
+	// The default is to use a target batch size of 100 messages.
+	BatchSize int
+
+	// Limit the maximum size of a request in bytes before being sent to
+	// a partition.
+	//
+	// The default is to use a kafka default value of 1048576.
+	BatchBytes int64
+
+	// Time limit on how often incomplete message batches will be flushed to
+	// kafka.
+	//
+	// The default is to flush at least every second.
+	BatchTimeout time.Duration
+
+	// Timeout for read operations performed by the Writer.
+	//
+	// Defaults to 10 seconds.
+	ReadTimeout time.Duration
+
+	// Timeout for write operation performed by the Writer.
+	//
+	// Defaults to 10 seconds.
+	WriteTimeout time.Duration
+
+	// Number of acknowledges from partition replicas required before receiving
+	// a response to a produce request, the following values are supported:
+	//
+	//  RequireNone (0)  fire-and-forget, do not wait for acknowledgements from the
+	//  RequireOne  (1)  wait for the leader to acknowledge the writes
+	//  RequireAll  (-1) wait for the full ISR to acknowledge the writes
+	//
+	// Defaults to RequireNone.
+	RequiredAcks RequiredAcks
+
+	// Setting this flag to true causes the WriteMessages method to never block.
+	// It also means that errors are ignored since the caller will not receive
+	// the returned value. Use this only if you don't care about guarantees of
+	// whether the messages were written to kafka.
+	//
+	// Defaults to false.
+	Async bool
+
+	// An optional function called when the writer succeeds or fails the
+	// delivery of messages to a kafka partition. When writing the messages
+	// fails, the `err` parameter will be non-nil.
+	//
+	// The messages that the Completion function is called with have their
+	// topic, partition, offset, and time set based on the Produce responses
+	// received from kafka. All messages passed to a call to the function have
+	// been written to the same partition. The keys and values of messages are
+	// referencing the original byte slices carried by messages in the calls to
+	// WriteMessages.
+	//
+	// The function is called from goroutines started by the writer. Calls to
+	// Close will block on the Completion function calls. When the Writer is
+	// not writing asynchronously, the WriteMessages call will also block on
+	// Completion function, which is a useful guarantee if the byte slices
+	// for the message keys and values are intended to be reused after the
+	// WriteMessages call returned.
+	//
+	// If a completion function panics, the program terminates because the
+	// panic is not recovered by the writer and bubbles up to the top of the
+	// goroutine's call stack.
+	Completion func(messages []Message, err error)
+
+	// Compression set the compression codec to be used to compress messages.
+	Compression Compression
+
+	// If not nil, specifies a logger used to report internal changes within the
+	// writer.
+	Logger Logger
+
+	// ErrorLogger is the logger used to report errors. If nil, the writer falls
+	// back to using Logger instead.
+	ErrorLogger Logger
+
+	// A transport used to send messages to kafka clusters.
+	//
+	// If nil, DefaultTransport is used.
+	Transport RoundTripper
 
-	join sync.WaitGroup
-	msgs chan writerMessage
-	done chan struct{}
+	// AllowAutoTopicCreation notifies writer to create topic if missing.
+	AllowAutoTopicCreation bool
+
+	// Manages the current set of partition-topic writers.
+	group   sync.WaitGroup
+	mutex   sync.Mutex
+	closed  bool
+	writers map[topicPartition]*partitionWriter
 
 	// writer stats are all made of atomic values, no need for synchronization.
-	// Use a pointer to ensure 64-bit alignment of the values.
-	stats *writerStats
+	// Use a pointer to ensure 64-bit alignment of the values. The once value is
+	// used to lazily create the value when first used, allowing programs to use
+	// the zero-value value of Writer.
+	once sync.Once
+	*writerStats
+
+	// If no balancer is configured, the writer uses this one. RoundRobin values
+	// are safe to use concurrently from multiple goroutines, there is no need
+	// for extra synchronization to access this field.
+	roundRobin RoundRobin
+
+	// non-nil when a transport was created by NewWriter, remove in 1.0.
+	transport *Transport
 }
 
 // WriterConfig is a configuration type used to create new instances of Writer.
+//
+// DEPRECATED: writer values should be configured directly by assigning their
+// exported fields. This type is kept for backward compatibility, and will be
+// removed in version 1.0.
 type WriterConfig struct {
 	// The list of brokers used to discover the partitions available on the
 	// kafka cluster.
@@ -42,8 +237,9 @@ type WriterConfig struct {
 
 	// The topic that the writer will produce messages to.
 	//
-	// This field is required, attempting to create a writer with an empty topic
-	// will panic.
+	// If provided, this will be used to set the topic for all produced messages.
+	// If not provided, each Message must specify a topic for itself. This must be
+	// mutually exclusive, otherwise the Writer will return an error.
 	Topic string
 
 	// The dialer used by the writer to establish connections to the kafka
@@ -62,9 +258,10 @@ type WriterConfig struct {
 	// The default is to try at most 10 times.
 	MaxAttempts int
 
-	// A hint on the capacity of the writer's internal message queue.
-	//
-	// The default is to use a queue capacity of 100 messages.
+	// DEPRECATED: in versions prior to 0.4, the writer used channels internally
+	// to dispatch messages to partitions. This has been replaced by an in-memory
+	// aggregation of batches which uses shared state instead of message passing,
+	// making this option unnecessary.
 	QueueCapacity int
 
 	// Limit on how many messages will be buffered before being sent to a
@@ -73,6 +270,12 @@ type WriterConfig struct {
 	// The default is to use a target batch size of 100 messages.
 	BatchSize int
 
+	// Limit the maximum size of a request in bytes before being sent to
+	// a partition.
+	//
+	// The default is to use a kafka default value of 1048576.
+	BatchBytes int
+
 	// Time limit on how often incomplete message batches will be flushed to
 	// kafka.
 	//
@@ -89,16 +292,21 @@ type WriterConfig struct {
 	// Defaults to 10 seconds.
 	WriteTimeout time.Duration
 
-	// This interval defines how often the list of partitions is refreshed from
-	// kafka. It allows the writer to automatically handle when new partitions
-	// are added to a topic.
-	//
-	// The default is to refresh partitions every 15 seconds.
+	// DEPRECATED: in versions prior to 0.4, the writer used to maintain a cache
+	// the topic layout. With the change to use a transport to manage connections,
+	// the responsibility of syncing the cluster layout has been delegated to the
+	// transport.
 	RebalanceInterval time.Duration
 
+	// DEPRECATED: in versions prior to 0.4, the writer used to manage connections
+	// to the kafka cluster directly. With the change to use a transport to manage
+	// connections, the writer has no connections to manage directly anymore.
+	IdleConnTimeout time.Duration
+
 	// Number of acknowledges from partition replicas required before receiving
-	// a response to a produce request (default to -1, which means to wait for
-	// all replicas).
+	// a response to a produce request. The default is -1, which means to wait for
+	// all replicas, and a value above 0 is required to indicate how many replicas
+	// should acknowledge a message to be considered successful.
 	RequiredAcks int
 
 	// Setting this flag to true causes the WriteMessages method to never block.
@@ -108,49 +316,74 @@ type WriterConfig struct {
 	Async bool
 
 	// CompressionCodec set the codec to be used to compress Kafka messages.
-	// Note that messages are allowed to overwrite the compression codec individually.
 	CompressionCodec
 
 	// If not nil, specifies a logger used to report internal changes within the
 	// writer.
-	Logger *log.Logger
+	Logger Logger
 
 	// ErrorLogger is the logger used to report errors. If nil, the writer falls
 	// back to using Logger instead.
-	ErrorLogger *log.Logger
+	ErrorLogger Logger
+}
 
-	newPartitionWriter func(partition int, config WriterConfig, stats *writerStats) partitionWriter
+type topicPartition struct {
+	topic     string
+	partition int32
+}
+
+// Validate method validates WriterConfig properties.
+func (config *WriterConfig) Validate() error {
+	if len(config.Brokers) == 0 {
+		return errors.New("cannot create a kafka writer with an empty list of brokers")
+	}
+	return nil
 }
 
 // WriterStats is a data structure returned by a call to Writer.Stats that
 // exposes details about the behavior of the writer.
 type WriterStats struct {
-	Dials      int64 `metric:"kafka.writer.dial.count"      type:"counter"`
-	Writes     int64 `metric:"kafka.writer.write.count"     type:"counter"`
-	Messages   int64 `metric:"kafka.writer.message.count"   type:"counter"`
-	Bytes      int64 `metric:"kafka.writer.message.bytes"   type:"counter"`
-	Rebalances int64 `metric:"kafka.writer.rebalance.count" type:"counter"`
-	Errors     int64 `metric:"kafka.writer.error.count"     type:"counter"`
-
-	DialTime  DurationStats `metric:"kafka.writer.dial.seconds"`
-	WriteTime DurationStats `metric:"kafka.writer.write.seconds"`
-	WaitTime  DurationStats `metric:"kafka.writer.wait.seconds"`
-	Retries   SummaryStats  `metric:"kafka.writer.retries.count"`
-	BatchSize SummaryStats  `metric:"kafka.writer.batch.size"`
-
-	MaxAttempts       int64         `metric:"kafka.writer.attempts.max"       type:"gauge"`
-	MaxBatchSize      int64         `metric:"kafka.writer.batch.max"          type:"gauge"`
-	BatchTimeout      time.Duration `metric:"kafka.writer.batch.timeout"      type:"gauge"`
-	ReadTimeout       time.Duration `metric:"kafka.writer.read.timeout"       type:"gauge"`
-	WriteTimeout      time.Duration `metric:"kafka.writer.write.timeout"      type:"gauge"`
-	RebalanceInterval time.Duration `metric:"kafka.writer.rebalance.interval" type:"gauge"`
-	RequiredAcks      int64         `metric:"kafka.writer.acks.required"      type:"gauge"`
-	Async             bool          `metric:"kafka.writer.async"              type:"gauge"`
-	QueueLength       int64         `metric:"kafka.writer.queue.length"       type:"gauge"`
-	QueueCapacity     int64         `metric:"kafka.writer.queue.capacity"     type:"gauge"`
-
-	ClientID string `tag:"client_id"`
-	Topic    string `tag:"topic"`
+	Writes   int64 `metric:"kafka.writer.write.count"     type:"counter"`
+	Messages int64 `metric:"kafka.writer.message.count"   type:"counter"`
+	Bytes    int64 `metric:"kafka.writer.message.bytes"   type:"counter"`
+	Errors   int64 `metric:"kafka.writer.error.count"     type:"counter"`
+
+	BatchTime      DurationStats `metric:"kafka.writer.batch.seconds"`
+	BatchQueueTime DurationStats `metric:"kafka.writer.batch.queue.seconds"`
+	WriteTime      DurationStats `metric:"kafka.writer.write.seconds"`
+	WaitTime       DurationStats `metric:"kafka.writer.wait.seconds"`
+	Retries        int64         `metric:"kafka.writer.retries.count" type:"counter"`
+	BatchSize      SummaryStats  `metric:"kafka.writer.batch.size"`
+	BatchBytes     SummaryStats  `metric:"kafka.writer.batch.bytes"`
+
+	MaxAttempts     int64         `metric:"kafka.writer.attempts.max"  type:"gauge"`
+	WriteBackoffMin time.Duration `metric:"kafka.writer.backoff.min"   type:"gauge"`
+	WriteBackoffMax time.Duration `metric:"kafka.writer.backoff.max"   type:"gauge"`
+	MaxBatchSize    int64         `metric:"kafka.writer.batch.max"     type:"gauge"`
+	BatchTimeout    time.Duration `metric:"kafka.writer.batch.timeout" type:"gauge"`
+	ReadTimeout     time.Duration `metric:"kafka.writer.read.timeout"  type:"gauge"`
+	WriteTimeout    time.Duration `metric:"kafka.writer.write.timeout" type:"gauge"`
+	RequiredAcks    int64         `metric:"kafka.writer.acks.required" type:"gauge"`
+	Async           bool          `metric:"kafka.writer.async"         type:"gauge"`
+
+	Topic string `tag:"topic"`
+
+	// DEPRECATED: these fields will only be reported for backward compatibility
+	// if the Writer was constructed with NewWriter.
+	Dials    int64         `metric:"kafka.writer.dial.count" type:"counter"`
+	DialTime DurationStats `metric:"kafka.writer.dial.seconds"`
+
+	// DEPRECATED: these fields were meaningful prior to kafka-go 0.4, changes
+	// to the internal implementation and the introduction of the transport type
+	// made them unnecessary.
+	//
+	// The values will be zero but are left for backward compatibility to avoid
+	// breaking programs that used these fields.
+	Rebalances        int64
+	RebalanceInterval time.Duration
+	QueueLength       int64
+	QueueCapacity     int64
+	ClientID          string
 }
 
 // writerStats is a struct that contains statistics on a writer.
@@ -159,27 +392,29 @@ type WriterStats struct {
 // This is easily accomplished by always allocating this struct directly, (i.e. using a pointer to the struct).
 // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG
 type writerStats struct {
-	dials      counter
-	writes     counter
-	messages   counter
-	bytes      counter
-	rebalances counter
-	errors     counter
-	dialTime   summary
-	writeTime  summary
-	waitTime   summary
-	retries    summary
-	batchSize  summary
+	dials          counter
+	writes         counter
+	messages       counter
+	bytes          counter
+	errors         counter
+	dialTime       summary
+	batchTime      summary
+	batchQueueTime summary
+	writeTime      summary
+	waitTime       summary
+	retries        counter
+	batchSize      summary
+	batchSizeBytes summary
 }
 
 // NewWriter creates and returns a new Writer configured with config.
+//
+// DEPRECATED: Writer value can be instantiated and configured directly,
+// this function is retained for backward compatibility and will be removed
+// in version 1.0.
 func NewWriter(config WriterConfig) *Writer {
-	if len(config.Brokers) == 0 {
-		panic("cannot create a kafka writer with an empty list of brokers")
-	}
-
-	if len(config.Topic) == 0 {
-		panic("cannot create a kafka writer with an empty topic")
+	if err := config.Validate(); err != nil {
+		panic(err)
 	}
 
 	if config.Dialer == nil {
@@ -190,55 +425,158 @@ func NewWriter(config WriterConfig) *Wri
 		config.Balancer = &RoundRobin{}
 	}
 
-	if config.newPartitionWriter == nil {
-		config.newPartitionWriter = func(partition int, config WriterConfig, stats *writerStats) partitionWriter {
-			return newWriter(partition, config, stats)
+	// Converts the pre-0.4 Dialer API into a Transport.
+	kafkaDialer := DefaultDialer
+	if config.Dialer != nil {
+		kafkaDialer = config.Dialer
+	}
+
+	dialer := (&net.Dialer{
+		Timeout:       kafkaDialer.Timeout,
+		Deadline:      kafkaDialer.Deadline,
+		LocalAddr:     kafkaDialer.LocalAddr,
+		DualStack:     kafkaDialer.DualStack,
+		FallbackDelay: kafkaDialer.FallbackDelay,
+		KeepAlive:     kafkaDialer.KeepAlive,
+	})
+
+	var resolver Resolver
+	if r, ok := kafkaDialer.Resolver.(*net.Resolver); ok {
+		dialer.Resolver = r
+	} else {
+		resolver = kafkaDialer.Resolver
+	}
+
+	stats := new(writerStats)
+	// For backward compatibility with the pre-0.4 APIs, support custom
+	// resolvers by wrapping the dial function.
+	dial := func(ctx context.Context, network, addr string) (net.Conn, error) {
+		start := time.Now()
+		defer func() {
+			stats.dials.observe(1)
+			stats.dialTime.observe(int64(time.Since(start)))
+		}()
+		address, err := lookupHost(ctx, addr, resolver)
+		if err != nil {
+			return nil, err
 		}
+		return dialer.DialContext(ctx, network, address)
 	}
 
-	if config.MaxAttempts == 0 {
-		config.MaxAttempts = 10
+	idleTimeout := config.IdleConnTimeout
+	if idleTimeout == 0 {
+		// Historical default value of WriterConfig.IdleTimeout, 9 minutes seems
+		// like it is way too long when there is no ping mechanism in the kafka
+		// protocol.
+		idleTimeout = 9 * time.Minute
 	}
 
-	if config.QueueCapacity == 0 {
-		config.QueueCapacity = 100
+	metadataTTL := config.RebalanceInterval
+	if metadataTTL == 0 {
+		// Historical default value of WriterConfig.RebalanceInterval.
+		metadataTTL = 15 * time.Second
 	}
 
-	if config.BatchSize == 0 {
-		config.BatchSize = 100
+	transport := &Transport{
+		Dial:        dial,
+		SASL:        kafkaDialer.SASLMechanism,
+		TLS:         kafkaDialer.TLS,
+		ClientID:    kafkaDialer.ClientID,
+		IdleTimeout: idleTimeout,
+		MetadataTTL: metadataTTL,
 	}
 
-	if config.BatchTimeout == 0 {
-		config.BatchTimeout = 1 * time.Second
+	w := &Writer{
+		Addr:         TCP(config.Brokers...),
+		Topic:        config.Topic,
+		MaxAttempts:  config.MaxAttempts,
+		BatchSize:    config.BatchSize,
+		Balancer:     config.Balancer,
+		BatchBytes:   int64(config.BatchBytes),
+		BatchTimeout: config.BatchTimeout,
+		ReadTimeout:  config.ReadTimeout,
+		WriteTimeout: config.WriteTimeout,
+		RequiredAcks: RequiredAcks(config.RequiredAcks),
+		Async:        config.Async,
+		Logger:       config.Logger,
+		ErrorLogger:  config.ErrorLogger,
+		Transport:    transport,
+		transport:    transport,
+		writerStats:  stats,
+	}
+
+	if config.RequiredAcks == 0 {
+		// Historically the writers created by NewWriter have used "all" as the
+		// default value when 0 was specified.
+		w.RequiredAcks = RequireAll
 	}
 
-	if config.ReadTimeout == 0 {
-		config.ReadTimeout = 10 * time.Second
+	if config.CompressionCodec != nil {
+		w.Compression = Compression(config.CompressionCodec.Code())
 	}
 
-	if config.WriteTimeout == 0 {
-		config.WriteTimeout = 10 * time.Second
+	return w
+}
+
+// enter is called by WriteMessages to indicate that a new inflight operation
+// has started, which helps synchronize with Close and ensure that the method
+// does not return until all inflight operations were completed.
+func (w *Writer) enter() bool {
+	w.mutex.Lock()
+	defer w.mutex.Unlock()
+	if w.closed {
+		return false
+	}
+	w.group.Add(1)
+	return true
+}
+
+// leave is called by WriteMessages to indicate that the inflight operation has
+// completed.
+func (w *Writer) leave() { w.group.Done() }
+
+// spawn starts a new asynchronous operation on the writer. This method is used
+// instead of starting goroutines inline to help manage the state of the
+// writer's wait group. The wait group is used to block Close calls until all
+// inflight operations have completed, therefore automatically including those
+// started with calls to spawn.
+func (w *Writer) spawn(f func()) {
+	w.group.Add(1)
+	go func() {
+		defer w.group.Done()
+		f()
+	}()
+}
+
+// Close flushes pending writes, and waits for all writes to complete before
+// returning. Calling Close also prevents new writes from being submitted to
+// the writer, further calls to WriteMessages and the like will fail with
+// io.ErrClosedPipe.
+func (w *Writer) Close() error {
+	w.mutex.Lock()
+	// Marking the writer as closed here causes future calls to WriteMessages to
+	// fail with io.ErrClosedPipe. Mutation of this field is synchronized on the
+	// writer's mutex to ensure that no more increments of the wait group are
+	// performed afterwards (which could otherwise race with the Wait below).
+	w.closed = true
+
+	// close all writers to trigger any pending batches
+	for _, writer := range w.writers {
+		writer.close()
 	}
 
-	if config.RebalanceInterval == 0 {
-		config.RebalanceInterval = 15 * time.Second
+	for partition := range w.writers {
+		delete(w.writers, partition)
 	}
 
-	w := &Writer{
-		config: config,
-		msgs:   make(chan writerMessage, config.QueueCapacity),
-		done:   make(chan struct{}),
-		stats: &writerStats{
-			dialTime:  makeSummary(),
-			writeTime: makeSummary(),
-			waitTime:  makeSummary(),
-			retries:   makeSummary(),
-		},
+	w.mutex.Unlock()
+	w.group.Wait()
+
+	if w.transport != nil {
+		w.transport.CloseIdleConnections()
 	}
 
-	w.join.Add(1)
-	go w.run()
-	return w
+	return nil
 }
 
 // WriteMessages writes a batch of messages to the kafka topic configured on this
@@ -248,487 +586,724 @@ func NewWriter(config WriterConfig) *Wri
 // blocks until all messages have been written, or until the maximum number of
 // attempts was reached.
 //
-// When the method returns an error, there's no way to know yet which messages
-// have succeeded of failed.
+// When sending synchronously and the writer's batch size is configured to be
+// greater than 1, this method blocks until either a full batch can be assembled
+// or the batch timeout is reached.  The batch size and timeouts are evaluated
+// per partition, so the choice of Balancer can also influence the flushing
+// behavior.  For example, the Hash balancer will require on average N * batch
+// size messages to trigger a flush where N is the number of partitions.  The
+// best way to achieve good batching behavior is to share one Writer amongst
+// multiple go routines.
+//
+// When the method returns an error, it may be of type kafka.WriteError to allow
+// the caller to determine the status of each message.
 //
 // The context passed as first argument may also be used to asynchronously
 // cancel the operation. Note that in this case there are no guarantees made on
-// whether messages were written to kafka. The program should assume that the
-// whole batch failed and re-write the messages later (which could then cause
-// duplicates).
+// whether messages were written to kafka, they might also still be written
+// after this method has already returned, therefore it is important to not
+// modify byte slices of passed messages if WriteMessages returned early due
+// to a canceled context.
+// The program should assume that the whole batch failed and re-write the
+// messages later (which could then cause duplicates).
 func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error {
+	if w.Addr == nil {
+		return errors.New("kafka.(*Writer).WriteMessages: cannot create a kafka writer with a nil address")
+	}
+
+	if !w.enter() {
+		return io.ErrClosedPipe
+	}
+	defer w.leave()
+
 	if len(msgs) == 0 {
 		return nil
 	}
 
-	var res = make(chan error, len(msgs))
-	var err error
+	balancer := w.balancer()
+	batchBytes := w.batchBytes()
 
-	t0 := time.Now()
+	for i := range msgs {
+		n := int64(msgs[i].totalSize())
+		if n > batchBytes {
+			// This error is left for backward compatibility with historical
+			// behavior, but it can yield O(N^2) behaviors. The expectations
+			// are that the program will check if WriteMessages returned a
+			// MessageTooLargeError, discard the message that was exceeding
+			// the maximum size, and try again.
+			return messageTooLarge(msgs, i)
+		}
+	}
 
-	for attempt := 0; attempt < w.config.MaxAttempts; attempt++ {
-		w.mutex.RLock()
+	// We use int32 here to halve the memory footprint (compared to using int
+	// on 64 bits architectures). We map lists of the message indexes instead
+	// of the message values for the same reason, int32 is 4 bytes, vs a full
+	// Message value which is 100+ bytes and contains pointers and contributes
+	// to increasing GC work.
+	assignments := make(map[topicPartition][]int32)
 
-		if w.closed {
-			w.mutex.RUnlock()
-			return io.ErrClosedPipe
-		}
-
-		for _, msg := range msgs {
-			select {
-			case w.msgs <- writerMessage{
-				msg: msg,
-				res: res,
-			}:
-			case <-ctx.Done():
-				w.mutex.RUnlock()
-				return ctx.Err()
-			}
+	for i, msg := range msgs {
+		topic, err := w.chooseTopic(msg)
+		if err != nil {
+			return err
 		}
 
-		w.mutex.RUnlock()
-
-		if w.config.Async {
-			break
+		numPartitions, err := w.partitions(ctx, topic)
+		if err != nil {
+			return err
 		}
 
-		var retry []Message
+		partition := balancer.Balance(msg, loadCachedPartitions(numPartitions)...)
 
-		for i := 0; i != len(msgs); i++ {
-			select {
-			case e := <-res:
-				if e != nil {
-					if we, ok := e.(*writerError); ok {
-						w.stats.retries.observe(1)
-						retry, err = append(retry, we.msg), we.err
-					} else {
-						err = e
-					}
-				}
-			case <-ctx.Done():
-				return ctx.Err()
-			}
+		key := topicPartition{
+			topic:     topic,
+			partition: int32(partition),
 		}
 
-		if msgs = retry; len(msgs) == 0 {
-			break
-		}
+		assignments[key] = append(assignments[key], int32(i))
+	}
 
-		timer := time.NewTimer(backoff(attempt+1, 100*time.Millisecond, 1*time.Second))
+	batches := w.batchMessages(msgs, assignments)
+	if w.Async {
+		return nil
+	}
+
+	done := ctx.Done()
+	hasErrors := false
+	for batch := range batches {
 		select {
-		case <-timer.C:
-			// Only clear the error (so we retry the loop) if we have more retries, otherwise
-			// we risk silencing the error.
-			if attempt < w.config.MaxAttempts-1 {
-				err = nil
+		case <-done:
+			return ctx.Err()
+		case <-batch.done:
+			if batch.err != nil {
+				hasErrors = true
 			}
-		case <-ctx.Done():
-			err = ctx.Err()
-		case <-w.done:
-			err = io.ErrClosedPipe
 		}
-		timer.Stop()
+	}
 
-		if err != nil {
-			break
-		}
+	if !hasErrors {
+		return nil
 	}
 
-	t1 := time.Now()
-	w.stats.writeTime.observeDuration(t1.Sub(t0))
+	werr := make(WriteErrors, len(msgs))
 
-	return err
+	for batch, indexes := range batches {
+		for _, i := range indexes {
+			werr[i] = batch.err
+		}
+	}
+	return werr
 }
 
-// Stats returns a snapshot of the writer stats since the last time the method
-// was called, or since the writer was created if it is called for the first
-// time.
-//
-// A typical use of this method is to spawn a goroutine that will periodically
-// call Stats on a kafka writer and report the metrics to a stats collection
-// system.
-func (w *Writer) Stats() WriterStats {
-	return WriterStats{
-		Dials:             w.stats.dials.snapshot(),
-		Writes:            w.stats.writes.snapshot(),
-		Messages:          w.stats.messages.snapshot(),
-		Bytes:             w.stats.bytes.snapshot(),
-		Rebalances:        w.stats.rebalances.snapshot(),
-		Errors:            w.stats.errors.snapshot(),
-		DialTime:          w.stats.dialTime.snapshotDuration(),
-		WriteTime:         w.stats.writeTime.snapshotDuration(),
-		WaitTime:          w.stats.waitTime.snapshotDuration(),
-		Retries:           w.stats.retries.snapshot(),
-		BatchSize:         w.stats.batchSize.snapshot(),
-		MaxAttempts:       int64(w.config.MaxAttempts),
-		MaxBatchSize:      int64(w.config.BatchSize),
-		BatchTimeout:      w.config.BatchTimeout,
-		ReadTimeout:       w.config.ReadTimeout,
-		WriteTimeout:      w.config.WriteTimeout,
-		RebalanceInterval: w.config.RebalanceInterval,
-		RequiredAcks:      int64(w.config.RequiredAcks),
-		Async:             w.config.Async,
-		QueueLength:       int64(len(w.msgs)),
-		QueueCapacity:     int64(cap(w.msgs)),
-		ClientID:          w.config.Dialer.ClientID,
-		Topic:             w.config.Topic,
+func (w *Writer) batchMessages(messages []Message, assignments map[topicPartition][]int32) map[*writeBatch][]int32 {
+	var batches map[*writeBatch][]int32
+	if !w.Async {
+		batches = make(map[*writeBatch][]int32, len(assignments))
 	}
-}
 
-// Close flushes all buffered messages and closes the writer. The call to Close
-// aborts any concurrent calls to WriteMessages, which then return with the
-// io.ErrClosedPipe error.
-func (w *Writer) Close() (err error) {
 	w.mutex.Lock()
+	defer w.mutex.Unlock()
 
-	if !w.closed {
-		w.closed = true
-		close(w.msgs)
-		close(w.done)
+	if w.writers == nil {
+		w.writers = map[topicPartition]*partitionWriter{}
 	}
 
-	w.mutex.Unlock()
-	w.join.Wait()
-	return
-}
+	for key, indexes := range assignments {
+		writer := w.writers[key]
+		if writer == nil {
+			writer = newPartitionWriter(w, key)
+			w.writers[key] = writer
+		}
+		wbatches := writer.writeMessages(messages, indexes)
 
-func (w *Writer) run() {
-	defer w.join.Done()
+		for batch, idxs := range wbatches {
+			batches[batch] = idxs
+		}
+	}
 
-	ticker := time.NewTicker(w.config.RebalanceInterval)
-	defer ticker.Stop()
+	return batches
+}
 
-	var rebalance = true
-	var writers = make(map[int]partitionWriter)
-	var partitions []int
-	var err error
+func (w *Writer) produce(key topicPartition, batch *writeBatch) (*ProduceResponse, error) {
+	timeout := w.writeTimeout()
 
-	for {
-		if rebalance {
-			w.stats.rebalances.observe(1)
-			rebalance = false
-
-			var newPartitions []int
-			var oldPartitions = partitions
-
-			if newPartitions, err = w.partitions(); err == nil {
-				for _, partition := range diffp(oldPartitions, newPartitions) {
-					w.close(writers[partition])
-					delete(writers, partition)
-				}
-
-				for _, partition := range diffp(newPartitions, oldPartitions) {
-					writers[partition] = w.open(partition)
-				}
+	ctx, cancel := context.WithTimeout(context.Background(), timeout)
+	defer cancel()
 
-				partitions = newPartitions
+	return w.client(timeout).Produce(ctx, &ProduceRequest{
+		Partition:    int(key.partition),
+		Topic:        key.topic,
+		RequiredAcks: w.RequiredAcks,
+		Compression:  w.Compression,
+		Records: &writerRecords{
+			msgs: batch.msgs,
+		},
+	})
+}
+
+func (w *Writer) partitions(ctx context.Context, topic string) (int, error) {
+	client := w.client(w.readTimeout())
+	// Here we use the transport directly as an optimization to avoid the
+	// construction of temporary request and response objects made by the
+	// (*Client).Metadata API.
+	//
+	// It is expected that the transport will optimize this request by
+	// caching recent results (the kafka.Transport types does).
+	r, err := client.transport().RoundTrip(ctx, client.Addr, &metadataAPI.Request{
+		TopicNames:             []string{topic},
+		AllowAutoTopicCreation: w.AllowAutoTopicCreation,
+	})
+	if err != nil {
+		return 0, err
+	}
+	for _, t := range r.(*metadataAPI.Response).Topics {
+		if t.Name == topic {
+			// This should always hit, unless kafka has a bug.
+			if t.ErrorCode != 0 {
+				return 0, Error(t.ErrorCode)
 			}
+			return len(t.Partitions), nil
 		}
+	}
+	return 0, UnknownTopicOrPartition
+}
 
-		select {
-		case wm, ok := <-w.msgs:
-			if !ok {
-				for _, writer := range writers {
-					w.close(writer)
-				}
-				return
-			}
+func (w *Writer) client(timeout time.Duration) *Client {
+	return &Client{
+		Addr:      w.Addr,
+		Transport: w.Transport,
+		Timeout:   timeout,
+	}
+}
 
-			if len(partitions) != 0 {
-				selectedPartition := w.config.Balancer.Balance(wm.msg, partitions...)
-				writers[selectedPartition].messages() <- wm
-			} else {
-				// No partitions were found because the topic doesn't exist.
-				if err == nil {
-					err = fmt.Errorf("failed to find any partitions for topic %s", w.config.Topic)
-				}
+func (w *Writer) balancer() Balancer {
+	if w.Balancer != nil {
+		return w.Balancer
+	}
+	return &w.roundRobin
+}
 
-				wm.res <- &writerError{msg: wm.msg, err: err}
-			}
+func (w *Writer) maxAttempts() int {
+	if w.MaxAttempts > 0 {
+		return w.MaxAttempts
+	}
+	// TODO: this is a very high default, if something has failed 9 times it
+	// seems unlikely it will succeed on the 10th attempt. However, it does
+	// carry the risk to greatly increase the volume of requests sent to the
+	// kafka cluster. We should consider reducing this default (3?).
+	return 10
+}
 
-		case <-ticker.C:
-			rebalance = true
-		}
+func (w *Writer) writeBackoffMin() time.Duration {
+	if w.WriteBackoffMin > 0 {
+		return w.WriteBackoffMin
 	}
+	return 100 * time.Millisecond
 }
 
-func (w *Writer) partitions() (partitions []int, err error) {
-	for _, broker := range shuffledStrings(w.config.Brokers) {
-		var conn *Conn
-		var plist []Partition
+func (w *Writer) writeBackoffMax() time.Duration {
+	if w.WriteBackoffMax > 0 {
+		return w.WriteBackoffMax
+	}
+	return 1 * time.Second
+}
 
-		if conn, err = w.config.Dialer.Dial("tcp", broker); err != nil {
-			continue
-		}
+func (w *Writer) batchSize() int {
+	if w.BatchSize > 0 {
+		return w.BatchSize
+	}
+	return 100
+}
 
-		conn.SetReadDeadline(time.Now().Add(w.config.ReadTimeout))
-		plist, err = conn.ReadPartitions(w.config.Topic)
-		conn.Close()
+func (w *Writer) batchBytes() int64 {
+	if w.BatchBytes > 0 {
+		return w.BatchBytes
+	}
+	return 1048576
+}
 
-		if err == nil {
-			partitions = make([]int, len(plist))
-			for i, p := range plist {
-				partitions[i] = p.ID
-			}
-			break
-		}
+func (w *Writer) batchTimeout() time.Duration {
+	if w.BatchTimeout > 0 {
+		return w.BatchTimeout
 	}
+	return 1 * time.Second
+}
 
-	sort.Ints(partitions)
-	return
+func (w *Writer) readTimeout() time.Duration {
+	if w.ReadTimeout > 0 {
+		return w.ReadTimeout
+	}
+	return 10 * time.Second
 }
 
-func (w *Writer) open(partition int) partitionWriter {
-	return w.config.newPartitionWriter(partition, w.config, w.stats)
+func (w *Writer) writeTimeout() time.Duration {
+	if w.WriteTimeout > 0 {
+		return w.WriteTimeout
+	}
+	return 10 * time.Second
 }
 
-func (w *Writer) close(writer partitionWriter) {
-	w.join.Add(1)
-	go func() {
-		writer.close()
-		w.join.Done()
-	}()
+func (w *Writer) withLogger(do func(Logger)) {
+	if w.Logger != nil {
+		do(w.Logger)
+	}
 }
 
-func diffp(new []int, old []int) (diff []int) {
-	for _, p := range new {
-		if i := sort.SearchInts(old, p); i == len(old) || old[i] != p {
-			diff = append(diff, p)
-		}
-	}
-	return
-}
-
-type partitionWriter interface {
-	messages() chan<- writerMessage
-	close()
-}
-
-type writer struct {
-	brokers      []string
-	topic        string
-	partition    int
-	requiredAcks int
-	batchSize    int
-	batchTimeout time.Duration
-	writeTimeout time.Duration
-	dialer       *Dialer
-	msgs         chan writerMessage
-	join         sync.WaitGroup
-	stats        *writerStats
-	codec        CompressionCodec
-	logger       *log.Logger
-	errorLogger  *log.Logger
-}
-
-func newWriter(partition int, config WriterConfig, stats *writerStats) *writer {
-	w := &writer{
-		brokers:      config.Brokers,
-		topic:        config.Topic,
-		partition:    partition,
-		requiredAcks: config.RequiredAcks,
-		batchSize:    config.BatchSize,
-		batchTimeout: config.BatchTimeout,
-		writeTimeout: config.WriteTimeout,
-		dialer:       config.Dialer,
-		msgs:         make(chan writerMessage, config.QueueCapacity),
-		stats:        stats,
-		codec:        config.CompressionCodec,
-		logger:       config.Logger,
-		errorLogger:  config.ErrorLogger,
+func (w *Writer) withErrorLogger(do func(Logger)) {
+	if w.ErrorLogger != nil {
+		do(w.ErrorLogger)
+	} else {
+		w.withLogger(do)
 	}
-	w.join.Add(1)
-	go w.run()
-	return w
 }
 
-func (w *writer) close() {
-	close(w.msgs)
-	w.join.Wait()
+func (w *Writer) stats() *writerStats {
+	w.once.Do(func() {
+		// This field is not nil when the writer was constructed with NewWriter
+		// to share the value with the dial function and count dials.
+		if w.writerStats == nil {
+			w.writerStats = new(writerStats)
+		}
+	})
+	return w.writerStats
+}
+
+// Stats returns a snapshot of the writer stats since the last time the method
+// was called, or since the writer was created if it is called for the first
+// time.
+//
+// A typical use of this method is to spawn a goroutine that will periodically
+// call Stats on a kafka writer and report the metrics to a stats collection
+// system.
+func (w *Writer) Stats() WriterStats {
+	stats := w.stats()
+	return WriterStats{
+		Dials:           stats.dials.snapshot(),
+		Writes:          stats.writes.snapshot(),
+		Messages:        stats.messages.snapshot(),
+		Bytes:           stats.bytes.snapshot(),
+		Errors:          stats.errors.snapshot(),
+		DialTime:        stats.dialTime.snapshotDuration(),
+		BatchTime:       stats.batchTime.snapshotDuration(),
+		BatchQueueTime:  stats.batchQueueTime.snapshotDuration(),
+		WriteTime:       stats.writeTime.snapshotDuration(),
+		WaitTime:        stats.waitTime.snapshotDuration(),
+		Retries:         stats.retries.snapshot(),
+		BatchSize:       stats.batchSize.snapshot(),
+		BatchBytes:      stats.batchSizeBytes.snapshot(),
+		MaxAttempts:     int64(w.maxAttempts()),
+		WriteBackoffMin: w.writeBackoffMin(),
+		WriteBackoffMax: w.writeBackoffMax(),
+		MaxBatchSize:    int64(w.batchSize()),
+		BatchTimeout:    w.batchTimeout(),
+		ReadTimeout:     w.readTimeout(),
+		WriteTimeout:    w.writeTimeout(),
+		RequiredAcks:    int64(w.RequiredAcks),
+		Async:           w.Async,
+		Topic:           w.Topic,
+	}
 }
 
-func (w *writer) messages() chan<- writerMessage {
-	return w.msgs
+func (w *Writer) chooseTopic(msg Message) (string, error) {
+	// w.Topic and msg.Topic are mutually exclusive, meaning only 1 must be set
+	// otherwise we will return an error.
+	if w.Topic != "" && msg.Topic != "" {
+		return "", errors.New("kafka.(*Writer): Topic must not be specified for both Writer and Message")
+	} else if w.Topic == "" && msg.Topic == "" {
+		return "", errors.New("kafka.(*Writer): Topic must be specified for Writer or Message")
+	}
+
+	// now we choose the topic, depending on which one is not empty
+	if msg.Topic != "" {
+		return msg.Topic, nil
+	}
+
+	return w.Topic, nil
+}
+
+type batchQueue struct {
+	queue []*writeBatch
+
+	// Pointers are used here to make `go vet` happy, and avoid copying mutexes.
+	// It may be better to revert these to non-pointers and avoid the copies in
+	// a different way.
+	mutex *sync.Mutex
+	cond  *sync.Cond
+
+	closed bool
 }
 
-func (w *writer) withLogger(do func(*log.Logger)) {
-	if w.logger != nil {
-		do(w.logger)
+func (b *batchQueue) Put(batch *writeBatch) bool {
+	b.cond.L.Lock()
+	defer b.cond.L.Unlock()
+	defer b.cond.Broadcast()
+
+	if b.closed {
+		return false
 	}
+	b.queue = append(b.queue, batch)
+	return true
 }
 
-func (w *writer) withErrorLogger(do func(*log.Logger)) {
-	if w.errorLogger != nil {
-		do(w.errorLogger)
-	} else {
-		w.withLogger(do)
+func (b *batchQueue) Get() *writeBatch {
+	b.cond.L.Lock()
+	defer b.cond.L.Unlock()
+
+	for len(b.queue) == 0 && !b.closed {
+		b.cond.Wait()
 	}
+
+	if len(b.queue) == 0 {
+		return nil
+	}
+
+	batch := b.queue[0]
+	b.queue[0] = nil
+	b.queue = b.queue[1:]
+
+	return batch
 }
 
-func (w *writer) run() {
-	defer w.join.Done()
+func (b *batchQueue) Close() {
+	b.cond.L.Lock()
+	defer b.cond.L.Unlock()
+	defer b.cond.Broadcast()
 
-	ticker := time.NewTicker(w.batchTimeout / 10)
-	defer ticker.Stop()
+	b.closed = true
+}
 
-	var conn *Conn
-	var done bool
-	var batch = make([]Message, 0, w.batchSize)
-	var resch = make([](chan<- error), 0, w.batchSize)
-	var lastFlushAt = time.Now()
-
-	defer func() {
-		if conn != nil {
-			conn.Close()
-		}
-	}()
+func newBatchQueue(initialSize int) batchQueue {
+	bq := batchQueue{
+		queue: make([]*writeBatch, 0, initialSize),
+		mutex: &sync.Mutex{},
+		cond:  &sync.Cond{},
+	}
 
-	for !done {
-		var mustFlush bool
+	bq.cond.L = bq.mutex
 
-		select {
-		case wm, ok := <-w.msgs:
-			if !ok {
-				done, mustFlush = true, true
-			} else {
-				batch = append(batch, wm.msg)
-				resch = append(resch, wm.res)
-				mustFlush = len(batch) >= w.batchSize
-			}
+	return bq
+}
+
+// partitionWriter is a writer for a topic-partion pair. It maintains messaging order
+// across batches of messages.
+type partitionWriter struct {
+	meta  topicPartition
+	queue batchQueue
+
+	mutex     sync.Mutex
+	currBatch *writeBatch
+
+	// reference to the writer that owns this batch. Used for the produce logic
+	// as well as stat tracking
+	w *Writer
+}
 
-		case now := <-ticker.C:
-			mustFlush = now.Sub(lastFlushAt) > w.batchTimeout
+func newPartitionWriter(w *Writer, key topicPartition) *partitionWriter {
+	writer := &partitionWriter{
+		meta:  key,
+		queue: newBatchQueue(10),
+		w:     w,
+	}
+	w.spawn(writer.writeBatches)
+	return writer
+}
+
+func (ptw *partitionWriter) writeBatches() {
+	for {
+		batch := ptw.queue.Get()
+
+		// The only time we can return nil is when the queue is closed
+		// and empty. If the queue is closed that means
+		// the Writer is closed so once we're here it's time to exit.
+		if batch == nil {
+			return
 		}
 
-		if mustFlush {
-			lastFlushAt = time.Now()
+		ptw.writeBatch(batch)
+	}
+}
 
-			if len(batch) == 0 {
-				continue
-			}
+func (ptw *partitionWriter) writeMessages(msgs []Message, indexes []int32) map[*writeBatch][]int32 {
+	ptw.mutex.Lock()
+	defer ptw.mutex.Unlock()
 
-			var err error
-			if conn, err = w.write(conn, batch, resch); err != nil {
-				if conn != nil {
-					conn.Close()
-					conn = nil
-				}
-			}
+	batchSize := ptw.w.batchSize()
+	batchBytes := ptw.w.batchBytes()
 
-			for i := range batch {
-				batch[i] = Message{}
-			}
+	var batches map[*writeBatch][]int32
+	if !ptw.w.Async {
+		batches = make(map[*writeBatch][]int32, 1)
+	}
 
-			for i := range resch {
-				resch[i] = nil
-			}
+	for _, i := range indexes {
+	assignMessage:
+		batch := ptw.currBatch
+		if batch == nil {
+			batch = ptw.newWriteBatch()
+			ptw.currBatch = batch
+		}
+		if !batch.add(msgs[i], batchSize, batchBytes) {
+			batch.trigger()
+			ptw.queue.Put(batch)
+			ptw.currBatch = nil
+			goto assignMessage
+		}
 
-			batch = batch[:0]
-			resch = resch[:0]
+		if batch.full(batchSize, batchBytes) {
+			batch.trigger()
+			ptw.queue.Put(batch)
+			ptw.currBatch = nil
+		}
+
+		if !ptw.w.Async {
+			batches[batch] = append(batches[batch], i)
 		}
 	}
+	return batches
 }
 
-func (w *writer) dial() (conn *Conn, err error) {
-	for _, broker := range shuffledStrings(w.brokers) {
-		t0 := time.Now()
-		if conn, err = w.dialer.DialLeader(context.Background(), "tcp", broker, w.topic, w.partition); err == nil {
-			t1 := time.Now()
-			w.stats.dials.observe(1)
-			w.stats.dialTime.observeDuration(t1.Sub(t0))
-			conn.SetRequiredAcks(w.requiredAcks)
-			break
+// ptw.w can be accessed here because this is called with the lock ptw.mutex already held.
+func (ptw *partitionWriter) newWriteBatch() *writeBatch {
+	batch := newWriteBatch(time.Now(), ptw.w.batchTimeout())
+	ptw.w.spawn(func() { ptw.awaitBatch(batch) })
+	return batch
+}
+
+// awaitBatch waits for a batch to either fill up or time out.
+// If the batch is full it only stops the timer, if the timer
+// expires it will queue the batch for writing if needed.
+func (ptw *partitionWriter) awaitBatch(batch *writeBatch) {
+	select {
+	case <-batch.timer.C:
+		ptw.mutex.Lock()
+		// detach the batch from the writer if we're still attached
+		// and queue for writing.
+		// Only the current batch can expire, all previous batches were already written to the queue.
+		// If writeMesseages locks pw.mutex after the timer fires but before this goroutine
+		// can lock pw.mutex it will either have filled the batch and enqueued it which will mean
+		// pw.currBatch != batch so we just move on.
+		// Otherwise, we detach the batch from the ptWriter and enqueue it for writing.
+		if ptw.currBatch == batch {
+			ptw.queue.Put(batch)
+			ptw.currBatch = nil
 		}
+		ptw.mutex.Unlock()
+	case <-batch.ready:
+		// The batch became full, it was removed from the ptwriter and its
+		// ready channel was closed. We need to close the timer to avoid
+		// having it leak until it expires.
+		batch.timer.Stop()
 	}
-	return
+	stats := ptw.w.stats()
+	stats.batchQueueTime.observe(int64(time.Since(batch.time)))
 }
 
-func (w *writer) write(conn *Conn, batch []Message, resch [](chan<- error)) (ret *Conn, err error) {
-	w.stats.writes.observe(1)
-	if conn == nil {
-		if conn, err = w.dial(); err != nil {
-			w.stats.errors.observe(1)
-			w.withErrorLogger(func(logger *log.Logger) {
-				logger.Printf("error dialing kafka brokers for topic %s (partition %d): %s", w.topic, w.partition, err)
+func (ptw *partitionWriter) writeBatch(batch *writeBatch) {
+	stats := ptw.w.stats()
+	stats.batchTime.observe(int64(time.Since(batch.time)))
+	stats.batchSize.observe(int64(len(batch.msgs)))
+	stats.batchSizeBytes.observe(batch.bytes)
+
+	var res *ProduceResponse
+	var err error
+	key := ptw.meta
+	for attempt, maxAttempts := 0, ptw.w.maxAttempts(); attempt < maxAttempts; attempt++ {
+		if attempt != 0 {
+			stats.retries.observe(1)
+			// TODO: should there be a way to asynchronously cancel this
+			// operation?
+			//
+			// * If all goroutines that added message to this batch have stopped
+			//   waiting for it, should we abort?
+			//
+			// * If the writer has been closed? It reduces the durability
+			//   guarantees to abort, but may be better to avoid long wait times
+			//   on close.
+			//
+			delay := backoff(attempt, ptw.w.writeBackoffMin(), ptw.w.writeBackoffMax())
+			ptw.w.withLogger(func(log Logger) {
+				log.Printf("backing off %s writing %d messages to %s (partition: %d)", delay, len(batch.msgs), key.topic, key.partition)
 			})
-			for i, res := range resch {
-				res <- &writerError{msg: batch[i], err: err}
-			}
-			return
+			time.Sleep(delay)
 		}
-	}
-
-	t0 := time.Now()
-	conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
 
-	if _, err = conn.WriteCompressedMessages(w.codec, batch...); err != nil {
-		w.stats.errors.observe(1)
-		w.withErrorLogger(func(logger *log.Logger) {
-			logger.Printf("error writing messages to %s (partition %d): %s", w.topic, w.partition, err)
+		ptw.w.withLogger(func(log Logger) {
+			log.Printf("writing %d messages to %s (partition: %d)", len(batch.msgs), key.topic, key.partition)
 		})
-		for i, res := range resch {
-			res <- &writerError{msg: batch[i], err: err}
+
+		start := time.Now()
+		res, err = ptw.w.produce(key, batch)
+
+		stats.writes.observe(1)
+		stats.messages.observe(int64(len(batch.msgs)))
+		stats.bytes.observe(batch.bytes)
+		// stats.writeTime used to report the duration of WriteMessages, but the
+		// implementation was broken and reporting values in the nanoseconds
+		// range. In kafka-go 0.4, we recylced this value to instead report the
+		// duration of produce requests, and changed the stats.waitTime value to
+		// report the time that kafka has throttled the requests for.
+		stats.writeTime.observe(int64(time.Since(start)))
+
+		if res != nil {
+			err = res.Error
+			stats.waitTime.observe(int64(res.Throttle))
 		}
-	} else {
-		for _, m := range batch {
-			w.stats.messages.observe(1)
-			w.stats.bytes.observe(int64(len(m.Key) + len(m.Value)))
+
+		if err == nil {
+			break
 		}
-		for _, res := range resch {
-			res <- nil
+
+		stats.errors.observe(1)
+
+		ptw.w.withErrorLogger(func(log Logger) {
+			log.Printf("error writing messages to %s (partition %d, attempt %d): %s", key.topic, key.partition, attempt, err)
+		})
+
+		if !isTemporary(err) && !isTransientNetworkError(err) {
+			break
 		}
 	}
 
-	t1 := time.Now()
-	w.stats.waitTime.observeDuration(t1.Sub(t0))
-	w.stats.batchSize.observe(int64(len(batch)))
+	if res != nil {
+		for i := range batch.msgs {
+			m := &batch.msgs[i]
+			m.Topic = key.topic
+			m.Partition = int(key.partition)
+			m.Offset = res.BaseOffset + int64(i)
 
-	ret = conn
-	return
+			if m.Time.IsZero() {
+				m.Time = res.LogAppendTime
+			}
+		}
+	}
+
+	if ptw.w.Completion != nil {
+		ptw.w.Completion(batch.msgs, err)
+	}
+
+	batch.complete(err)
 }
 
-type writerMessage struct {
-	msg Message
-	res chan<- error
+func (ptw *partitionWriter) close() {
+	ptw.mutex.Lock()
+	defer ptw.mutex.Unlock()
+
+	if ptw.currBatch != nil {
+		batch := ptw.currBatch
+		ptw.queue.Put(batch)
+		ptw.currBatch = nil
+		batch.trigger()
+	}
+
+	ptw.queue.Close()
 }
 
-type writerError struct {
-	msg Message
-	err error
+type writeBatch struct {
+	time  time.Time
+	msgs  []Message
+	size  int
+	bytes int64
+	ready chan struct{}
+	done  chan struct{}
+	timer *time.Timer
+	err   error // result of the batch completion
 }
 
-func (e *writerError) Cause() error {
-	return e.err
+func newWriteBatch(now time.Time, timeout time.Duration) *writeBatch {
+	return &writeBatch{
+		time:  now,
+		ready: make(chan struct{}),
+		done:  make(chan struct{}),
+		timer: time.NewTimer(timeout),
+	}
 }
 
-func (e *writerError) Error() string {
-	return e.err.Error()
+func (b *writeBatch) add(msg Message, maxSize int, maxBytes int64) bool {
+	bytes := int64(msg.totalSize())
+
+	if b.size > 0 && (b.bytes+bytes) > maxBytes {
+		return false
+	}
+
+	if cap(b.msgs) == 0 {
+		b.msgs = make([]Message, 0, maxSize)
+	}
+
+	b.msgs = append(b.msgs, msg)
+	b.size++
+	b.bytes += bytes
+	return true
 }
 
-func (e *writerError) Temporary() bool {
-	return isTemporary(e.err)
+func (b *writeBatch) full(maxSize int, maxBytes int64) bool {
+	return b.size >= maxSize || b.bytes >= maxBytes
 }
 
-func (e *writerError) Timeout() bool {
-	return isTimeout(e.err)
+func (b *writeBatch) trigger() {
+	close(b.ready)
 }
 
-func shuffledStrings(list []string) []string {
-	shuffledList := make([]string, len(list))
-	copy(shuffledList, list)
+func (b *writeBatch) complete(err error) {
+	b.err = err
+	close(b.done)
+}
 
-	shufflerMutex.Lock()
+type writerRecords struct {
+	msgs   []Message
+	index  int
+	record Record
+	key    bytesReadCloser
+	value  bytesReadCloser
+}
 
-	for i := range shuffledList {
-		j := shuffler.Intn(i + 1)
-		shuffledList[i], shuffledList[j] = shuffledList[j], shuffledList[i]
+func (r *writerRecords) ReadRecord() (*Record, error) {
+	if r.index >= 0 && r.index < len(r.msgs) {
+		m := &r.msgs[r.index]
+		r.index++
+		r.record = Record{
+			Time:    m.Time,
+			Headers: m.Headers,
+		}
+		if m.Key != nil {
+			r.key.Reset(m.Key)
+			r.record.Key = &r.key
+		}
+		if m.Value != nil {
+			r.value.Reset(m.Value)
+			r.record.Value = &r.value
+		}
+		return &r.record, nil
 	}
-
-	shufflerMutex.Unlock()
-	return shuffledList
+	return nil, io.EOF
 }
 
-var (
-	shufflerMutex = sync.Mutex{}
-	shuffler      = rand.New(rand.NewSource(time.Now().Unix()))
-)
+type bytesReadCloser struct{ bytes.Reader }
+
+func (*bytesReadCloser) Close() error { return nil }
+
+// A cache of []int values passed to balancers of writers, used to amortize the
+// heap allocation of the partition index lists.
+//
+// With hindsight, the use of `...int` to pass the partition list to Balancers
+// was not the best design choice: kafka partition numbers are monotonically
+// increasing, we could have simply passed the number of partitions instead.
+// If we ever revisit this API, we can hopefully remove this cache.
+var partitionsCache atomic.Value
+
+func loadCachedPartitions(numPartitions int) []int {
+	partitions, ok := partitionsCache.Load().([]int)
+	if ok && len(partitions) >= numPartitions {
+		return partitions[:numPartitions]
+	}
+
+	const alignment = 128
+	n := ((numPartitions / alignment) + 1) * alignment
+
+	partitions = make([]int, n)
+	for i := range partitions {
+		partitions[i] = i
+	}
+
+	partitionsCache.Store(partitions)
+	return partitions[:numPartitions]
+}
diff -pruN 0.2.1-1.1/writer_test.go 0.4.49+ds1-1/writer_test.go
--- 0.2.1-1.1/writer_test.go	2018-11-20 17:06:33.000000000 +0000
+++ 0.4.49+ds1-1/writer_test.go	2025-08-21 19:15:53.000000000 +0000
@@ -3,15 +3,100 @@ package kafka
 import (
 	"context"
 	"errors"
+	"fmt"
 	"io"
+	"math"
+	"strconv"
 	"strings"
+	"sync"
 	"testing"
 	"time"
+
+	"github.com/segmentio/kafka-go/sasl/plain"
 )
 
-func TestWriter(t *testing.T) {
-	t.Parallel()
+func TestBatchQueue(t *testing.T) {
+	tests := []struct {
+		scenario string
+		function func(*testing.T)
+	}{
+		{
+			scenario: "the remaining items in a queue can be gotten after closing",
+			function: testBatchQueueGetWorksAfterClose,
+		},
+		{
+			scenario: "putting into a closed queue fails",
+			function: testBatchQueuePutAfterCloseFails,
+		},
+		{
+			scenario: "putting into a queue awakes a goroutine in a get call",
+			function: testBatchQueuePutWakesSleepingGetter,
+		},
+	}
+
+	for _, test := range tests {
+		testFunc := test.function
+		t.Run(test.scenario, func(t *testing.T) {
+			t.Parallel()
+			testFunc(t)
+		})
+	}
+}
+
+func testBatchQueuePutWakesSleepingGetter(t *testing.T) {
+	bq := newBatchQueue(10)
+	var wg sync.WaitGroup
+	ready := make(chan struct{})
+	var batch *writeBatch
+	wg.Add(1)
+	go func() {
+		defer wg.Done()
+		close(ready)
+		batch = bq.Get()
+	}()
+	<-ready
+	bq.Put(newWriteBatch(time.Now(), time.Hour*100))
+	wg.Wait()
+	if batch == nil {
+		t.Fatal("got nil batch")
+	}
+}
+
+func testBatchQueuePutAfterCloseFails(t *testing.T) {
+	bq := newBatchQueue(10)
+	bq.Close()
+	if put := bq.Put(newWriteBatch(time.Now(), time.Hour*100)); put {
+		t.Fatal("put batch into closed queue")
+	}
+}
 
+func testBatchQueueGetWorksAfterClose(t *testing.T) {
+	bq := newBatchQueue(10)
+	enqueueBatches := []*writeBatch{
+		newWriteBatch(time.Now(), time.Hour*100),
+		newWriteBatch(time.Now(), time.Hour*100),
+	}
+
+	for _, batch := range enqueueBatches {
+		put := bq.Put(batch)
+		if !put {
+			t.Fatal("failed to put batch into queue")
+		}
+	}
+
+	bq.Close()
+
+	batchesGotten := 0
+	for batchesGotten != 2 {
+		dequeueBatch := bq.Get()
+		if dequeueBatch == nil {
+			t.Fatalf("no batch returned from get")
+		}
+		batchesGotten++
+	}
+}
+
+func TestWriter(t *testing.T) {
 	tests := []struct {
 		scenario string
 		function func(*testing.T)
@@ -30,6 +115,82 @@ func TestWriter(t *testing.T) {
 			scenario: "running out of max attempts should return an error",
 			function: testWriterMaxAttemptsErr,
 		},
+
+		{
+			scenario: "writing a message larger then the max bytes should return an error",
+			function: testWriterMaxBytes,
+		},
+
+		{
+			scenario: "writing a batch of message based on batch byte size",
+			function: testWriterBatchBytes,
+		},
+
+		{
+			scenario: "writing a batch of messages",
+			function: testWriterBatchSize,
+		},
+
+		{
+			scenario: "writing messages with a small batch byte size",
+			function: testWriterSmallBatchBytes,
+		},
+		{
+			scenario: "writing messages with headers",
+			function: testWriterBatchBytesHeaders,
+		},
+		{
+			scenario: "setting a non default balancer on the writer",
+			function: testWriterSetsRightBalancer,
+		},
+		{
+			scenario: "setting RequiredAcks to None in Writer does not cause a panic",
+			function: testWriterRequiredAcksNone,
+		},
+		{
+			scenario: "writing messages to multiple topics",
+			function: testWriterMultipleTopics,
+		},
+		{
+			scenario: "writing messages without specifying a topic",
+			function: testWriterMissingTopic,
+		},
+		{
+			scenario: "specifying topic for message when already set for writer",
+			function: testWriterUnexpectedMessageTopic,
+		},
+		{
+			scenario: "writing a message to an invalid partition",
+			function: testWriterInvalidPartition,
+		},
+		{
+			scenario: "writing a message to a non-existent topic creates the topic",
+			function: testWriterAutoCreateTopic,
+		},
+		{
+			scenario: "terminates on an attempt to write a message to a nonexistent topic",
+			function: testWriterTerminateMissingTopic,
+		},
+		{
+			scenario: "writing a message with SASL Plain authentication",
+			function: testWriterSasl,
+		},
+		{
+			scenario: "test default configuration values",
+			function: testWriterDefaults,
+		},
+		{
+			scenario: "test default stats values",
+			function: testWriterDefaultStats,
+		},
+		{
+			scenario: "test stats values with override config",
+			function: testWriterOverrideConfigStats,
+		},
+		{
+			scenario: "test write message with writer data",
+			function: testWriteMessageWithWriterData,
+		},
 	}
 
 	for _, test := range tests {
@@ -50,8 +211,9 @@ func newTestWriter(config WriterConfig)
 
 func testWriterClose(t *testing.T) {
 	const topic = "test-writer-0"
-
 	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
 	w := newTestWriter(WriterConfig{
 		Topic: topic,
 	})
@@ -61,10 +223,53 @@ func testWriterClose(t *testing.T) {
 	}
 }
 
-func testWriterRoundRobin1(t *testing.T) {
+func testWriterRequiredAcksNone(t *testing.T) {
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	transport := &Transport{}
+	defer transport.CloseIdleConnections()
+
+	writer := &Writer{
+		Addr:         TCP("localhost:9092"),
+		Topic:        topic,
+		Balancer:     &RoundRobin{},
+		RequiredAcks: RequireNone,
+		Transport:    transport,
+	}
+	defer writer.Close()
+
+	msg := Message{
+		Key:   []byte("ThisIsAKey"),
+		Value: []byte("Test message for required acks test"),
+	}
+
+	err := writer.WriteMessages(context.Background(), msg)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func testWriterSetsRightBalancer(t *testing.T) {
 	const topic = "test-writer-1"
+	balancer := &CRC32Balancer{}
+	w := newTestWriter(WriterConfig{
+		Topic:    topic,
+		Balancer: balancer,
+	})
+	defer w.Close()
 
+	if w.Balancer != balancer {
+		t.Errorf("Balancer not set correctly")
+	}
+}
+
+func testWriterRoundRobin1(t *testing.T) {
+	const topic = "test-writer-1"
 	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
 	offset, err := readOffset(topic, 0)
 	if err != nil {
 		t.Fatal(err)
@@ -84,7 +289,6 @@ func testWriterRoundRobin1(t *testing.T)
 	}
 
 	msgs, err := readPartition(topic, 0, offset)
-
 	if err != nil {
 		t.Error("error reading partition", err)
 		return
@@ -103,38 +307,36 @@ func testWriterRoundRobin1(t *testing.T)
 	}
 }
 
-type fakeWriter struct{}
-
-func (f *fakeWriter) messages() chan<- writerMessage {
-	ch := make(chan writerMessage, 1)
-
-	go func() {
-		for {
-			msg := <-ch
-			msg.res <- &writerError{
-				err: errors.New("bad attempt"),
-			}
+func TestValidateWriter(t *testing.T) {
+	tests := []struct {
+		config       WriterConfig
+		errorOccured bool
+	}{
+		{config: WriterConfig{}, errorOccured: true},
+		{config: WriterConfig{Brokers: []string{"broker1", "broker2"}}, errorOccured: false},
+		{config: WriterConfig{Brokers: []string{"broker1"}, Topic: "topic1"}, errorOccured: false},
+	}
+	for _, test := range tests {
+		err := test.config.Validate()
+		if test.errorOccured && err == nil {
+			t.Fail()
 		}
-	}()
-
-	return ch
-}
-
-func (f *fakeWriter) close() {
-
+		if !test.errorOccured && err != nil {
+			t.Fail()
+		}
+	}
 }
 
 func testWriterMaxAttemptsErr(t *testing.T) {
-	const topic = "test-writer-2"
-
+	topic := makeTopic()
 	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
 	w := newTestWriter(WriterConfig{
+		Brokers:     []string{"localhost:9999"}, // nothing is listening here
 		Topic:       topic,
-		MaxAttempts: 1,
+		MaxAttempts: 3,
 		Balancer:    &RoundRobin{},
-		newPartitionWriter: func(p int, config WriterConfig, stats *writerStats) partitionWriter {
-			return &fakeWriter{}
-		},
 	})
 	defer w.Close()
 
@@ -143,23 +345,81 @@ func testWriterMaxAttemptsErr(t *testing
 	}); err == nil {
 		t.Error("expected error")
 		return
+	}
+}
+
+func testWriterMaxBytes(t *testing.T) {
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	w := newTestWriter(WriterConfig{
+		Topic:      topic,
+		BatchBytes: 25,
+	})
+	defer w.Close()
+
+	if err := w.WriteMessages(context.Background(), Message{
+		Value: []byte("Hi"),
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	firstMsg := []byte("Hello World!")
+	secondMsg := []byte("LeftOver!")
+	msgs := []Message{
+		{
+			Value: firstMsg,
+		},
+		{
+			Value: secondMsg,
+		},
+	}
+	if err := w.WriteMessages(context.Background(), msgs...); err == nil {
+		t.Error("expected error")
+		return
 	} else if err != nil {
-		if !strings.Contains(err.Error(), "bad attempt") {
+		var e MessageTooLargeError
+		switch {
+		case errors.As(err, &e):
+			if string(e.Message.Value) != string(firstMsg) {
+				t.Errorf("unxpected returned message. Expected: %s, Got %s", firstMsg, e.Message.Value)
+				return
+			}
+			if len(e.Remaining) != 1 {
+				t.Error("expected remaining errors; found none")
+				return
+			}
+			if string(e.Remaining[0].Value) != string(secondMsg) {
+				t.Errorf("unxpected returned message. Expected: %s, Got %s", secondMsg, e.Message.Value)
+				return
+			}
+
+		default:
 			t.Errorf("unexpected error: %s", err)
 			return
 		}
 	}
 }
 
+// readOffset gets the latest offset for the given topic/partition.
 func readOffset(topic string, partition int) (offset int64, err error) {
 	var conn *Conn
 
-	if conn, err = DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition); err != nil {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	if conn, err = DialLeader(ctx, "tcp", "localhost:9092", topic, partition); err != nil {
+		err = fmt.Errorf("readOffset, DialLeader: %w", err)
 		return
 	}
 	defer conn.Close()
 
 	offset, err = conn.ReadLastOffset()
+	if err != nil {
+		err = fmt.Errorf("readOffset, conn.ReadLastOffset: %w", err)
+	}
 	return
 }
 
@@ -172,7 +432,7 @@ func readPartition(topic string, partiti
 	defer conn.Close()
 
 	conn.Seek(offset, SeekAbsolute)
-	conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
+	conn.SetReadDeadline(time.Now().Add(10 * time.Second))
 	batch := conn.ReadBatch(0, 1000000000)
 	defer batch.Close()
 
@@ -180,7 +440,7 @@ func readPartition(topic string, partiti
 		var msg Message
 
 		if msg, err = batch.ReadMessage(); err != nil {
-			if err == io.EOF {
+			if errors.Is(err, io.EOF) {
 				err = nil
 			}
 			return
@@ -189,3 +449,591 @@ func readPartition(topic string, partiti
 		msgs = append(msgs, msg)
 	}
 }
+
+func testWriterBatchBytes(t *testing.T) {
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	offset, err := readOffset(topic, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	w := newTestWriter(WriterConfig{
+		Topic:        topic,
+		BatchBytes:   50,
+		BatchTimeout: math.MaxInt32 * time.Second,
+		Balancer:     &RoundRobin{},
+	})
+	defer w.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+	if err := w.WriteMessages(ctx, []Message{
+		{Value: []byte("M0")}, // 25 Bytes
+		{Value: []byte("M1")}, // 25 Bytes
+		{Value: []byte("M2")}, // 25 Bytes
+		{Value: []byte("M3")}, // 25 Bytes
+	}...); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if w.Stats().Writes != 2 {
+		t.Error("didn't create expected batches")
+		return
+	}
+	msgs, err := readPartition(topic, 0, offset)
+	if err != nil {
+		t.Error("error reading partition", err)
+		return
+	}
+
+	if len(msgs) != 4 {
+		t.Error("bad messages in partition", msgs)
+		return
+	}
+
+	for i, m := range msgs {
+		if string(m.Value) == "M"+strconv.Itoa(i) {
+			continue
+		}
+		t.Error("bad messages in partition", string(m.Value))
+	}
+}
+
+func testWriterBatchSize(t *testing.T) {
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	offset, err := readOffset(topic, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	w := newTestWriter(WriterConfig{
+		Topic:        topic,
+		BatchSize:    2,
+		BatchTimeout: math.MaxInt32 * time.Second,
+		Balancer:     &RoundRobin{},
+	})
+	defer w.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	if err := w.WriteMessages(ctx, []Message{
+		{Value: []byte("Hi")}, // 24 Bytes
+		{Value: []byte("By")}, // 24 Bytes
+	}...); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if w.Stats().Writes > 1 {
+		t.Error("didn't batch messages")
+		return
+	}
+	msgs, err := readPartition(topic, 0, offset)
+	if err != nil {
+		t.Error("error reading partition", err)
+		return
+	}
+
+	if len(msgs) != 2 {
+		t.Error("bad messages in partition", msgs)
+		return
+	}
+
+	for _, m := range msgs {
+		if string(m.Value) == "Hi" || string(m.Value) == "By" {
+			continue
+		}
+		t.Error("bad messages in partition", msgs)
+	}
+}
+
+func testWriterSmallBatchBytes(t *testing.T) {
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	offset, err := readOffset(topic, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	w := newTestWriter(WriterConfig{
+		Topic:        topic,
+		BatchBytes:   25,
+		BatchTimeout: 50 * time.Millisecond,
+		Balancer:     &RoundRobin{},
+	})
+	defer w.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	if err := w.WriteMessages(ctx, []Message{
+		{Value: []byte("Hi")}, // 24 Bytes
+		{Value: []byte("By")}, // 24 Bytes
+	}...); err != nil {
+		t.Error(err)
+		return
+	}
+	ws := w.Stats()
+	if ws.Writes != 2 {
+		t.Error("didn't batch messages; Writes: ", ws.Writes)
+		return
+	}
+	msgs, err := readPartition(topic, 0, offset)
+	if err != nil {
+		t.Error("error reading partition", err)
+		return
+	}
+
+	if len(msgs) != 2 {
+		t.Error("bad messages in partition", msgs)
+		return
+	}
+
+	for _, m := range msgs {
+		if string(m.Value) == "Hi" || string(m.Value) == "By" {
+			continue
+		}
+		t.Error("bad messages in partition", msgs)
+	}
+}
+
+func testWriterBatchBytesHeaders(t *testing.T) {
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	offset, err := readOffset(topic, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	w := newTestWriter(WriterConfig{
+		Topic:        topic,
+		BatchBytes:   100,
+		BatchTimeout: 50 * time.Millisecond,
+		Balancer:     &RoundRobin{},
+	})
+	defer w.Close()
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	if err := w.WriteMessages(ctx, []Message{
+		{
+			Value: []byte("Hello World 1"),
+			Headers: []Header{
+				{Key: "User-Agent", Value: []byte("abc/xyz")},
+			},
+		},
+		{
+			Value: []byte("Hello World 2"),
+			Headers: []Header{
+				{Key: "User-Agent", Value: []byte("abc/xyz")},
+			},
+		},
+	}...); err != nil {
+		t.Error(err)
+		return
+	}
+	ws := w.Stats()
+	if ws.Writes != 2 {
+		t.Error("didn't batch messages; Writes: ", ws.Writes)
+		return
+	}
+	msgs, err := readPartition(topic, 0, offset)
+	if err != nil {
+		t.Error("error reading partition", err)
+		return
+	}
+
+	if len(msgs) != 2 {
+		t.Error("bad messages in partition", msgs)
+		return
+	}
+
+	for _, m := range msgs {
+		if strings.HasPrefix(string(m.Value), "Hello World") {
+			continue
+		}
+		t.Error("bad messages in partition", msgs)
+	}
+}
+
+func testWriterMultipleTopics(t *testing.T) {
+	topic1 := makeTopic()
+	createTopic(t, topic1, 1)
+	defer deleteTopic(t, topic1)
+
+	offset1, err := readOffset(topic1, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	topic2 := makeTopic()
+	createTopic(t, topic2, 1)
+	defer deleteTopic(t, topic2)
+
+	offset2, err := readOffset(topic2, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	w := newTestWriter(WriterConfig{
+		Balancer: &RoundRobin{},
+	})
+	defer w.Close()
+
+	msg1 := Message{Topic: topic1, Value: []byte("Hello")}
+	msg2 := Message{Topic: topic2, Value: []byte("World")}
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+	if err := w.WriteMessages(ctx, msg1, msg2); err != nil {
+		t.Error(err)
+		return
+	}
+	ws := w.Stats()
+	if ws.Writes != 2 {
+		t.Error("didn't batch messages; Writes: ", ws.Writes)
+		return
+	}
+
+	msgs1, err := readPartition(topic1, 0, offset1)
+	if err != nil {
+		t.Error("error reading partition", err)
+		return
+	}
+	if len(msgs1) != 1 {
+		t.Error("bad messages in partition", msgs1)
+		return
+	}
+	if string(msgs1[0].Value) != "Hello" {
+		t.Error("bad message in partition", msgs1)
+	}
+
+	msgs2, err := readPartition(topic2, 0, offset2)
+	if err != nil {
+		t.Error("error reading partition", err)
+		return
+	}
+	if len(msgs2) != 1 {
+		t.Error("bad messages in partition", msgs2)
+		return
+	}
+	if string(msgs2[0].Value) != "World" {
+		t.Error("bad message in partition", msgs2)
+	}
+}
+
+func testWriterMissingTopic(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	w := newTestWriter(WriterConfig{
+		// no topic
+		Balancer: &RoundRobin{},
+	})
+	defer w.Close()
+
+	msg := Message{Value: []byte("Hello World")} // no topic
+
+	if err := w.WriteMessages(ctx, msg); err == nil {
+		t.Error("expected error")
+		return
+	}
+}
+
+func testWriterInvalidPartition(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	w := newTestWriter(WriterConfig{
+		Topic:       topic,
+		MaxAttempts: 1,                              // only try once to get the error back immediately
+		Balancer:    &staticBalancer{partition: -1}, // intentionally invalid partition
+	})
+	defer w.Close()
+
+	msg := Message{
+		Value: []byte("Hello World!"),
+	}
+
+	// this call should return an error and not panic (see issue #517)
+	if err := w.WriteMessages(ctx, msg); err == nil {
+		t.Fatal("expected error attempting to write message")
+	}
+}
+
+func testWriterUnexpectedMessageTopic(t *testing.T) {
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+
+	w := newTestWriter(WriterConfig{
+		Topic:    topic,
+		Balancer: &RoundRobin{},
+	})
+	defer w.Close()
+
+	msg := Message{Topic: "should-fail", Value: []byte("Hello World")}
+
+	if err := w.WriteMessages(ctx, msg); err == nil {
+		t.Error("expected error")
+		return
+	}
+}
+
+func testWriteMessageWithWriterData(t *testing.T) {
+	topic := makeTopic()
+	createTopic(t, topic, 1)
+	defer deleteTopic(t, topic)
+	w := newTestWriter(WriterConfig{
+		Topic:    topic,
+		Balancer: &RoundRobin{},
+	})
+	defer w.Close()
+
+	index := 0
+	w.Completion = func(messages []Message, err error) {
+		if err != nil {
+			t.Errorf("unexpected error %v", err)
+		}
+
+		for _, msg := range messages {
+			meta := msg.WriterData.(int)
+			if index != meta {
+				t.Errorf("metadata is not correct, index = %d, writerData = %d", index, meta)
+			}
+			index += 1
+		}
+	}
+
+	msg := Message{Key: []byte("key"), Value: []byte("Hello World")}
+	for i := 0; i < 5; i++ {
+		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+		defer cancel()
+
+		msg.WriterData = i
+		err := w.WriteMessages(ctx, msg)
+		if err != nil {
+			t.Errorf("unexpected error %v", err)
+		}
+	}
+
+}
+
+func testWriterAutoCreateTopic(t *testing.T) {
+	topic := makeTopic()
+	// Assume it's going to get created.
+	defer deleteTopic(t, topic)
+
+	w := newTestWriter(WriterConfig{
+		Topic:    topic,
+		Balancer: &RoundRobin{},
+	})
+	w.AllowAutoTopicCreation = true
+	defer w.Close()
+
+	msg := Message{Key: []byte("key"), Value: []byte("Hello World")}
+
+	var err error
+	const retries = 5
+	for i := 0; i < retries; i++ {
+		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+		defer cancel()
+		err = w.WriteMessages(ctx, msg)
+		if errors.Is(err, LeaderNotAvailable) || errors.Is(err, context.DeadlineExceeded) || errors.Is(err, UnknownTopicOrPartition) {
+			time.Sleep(time.Millisecond * 250)
+			continue
+		}
+
+		if err != nil {
+			t.Errorf("unexpected error %v", err)
+			return
+		}
+	}
+	if err != nil {
+		t.Errorf("unable to create topic %v", err)
+	}
+}
+
+func testWriterTerminateMissingTopic(t *testing.T) {
+	topic := makeTopic()
+
+	transport := &Transport{}
+	defer transport.CloseIdleConnections()
+
+	writer := &Writer{
+		Addr:                   TCP("localhost:9092"),
+		Topic:                  topic,
+		Balancer:               &RoundRobin{},
+		RequiredAcks:           RequireNone,
+		AllowAutoTopicCreation: false,
+		Transport:              transport,
+	}
+	defer writer.Close()
+
+	msg := Message{Value: []byte("FooBar")}
+
+	if err := writer.WriteMessages(context.Background(), msg); err == nil {
+		t.Fatal("Kafka error [3] UNKNOWN_TOPIC_OR_PARTITION is expected")
+		return
+	}
+}
+
+func testWriterSasl(t *testing.T) {
+	topic := makeTopic()
+	defer deleteTopic(t, topic)
+	dialer := &Dialer{
+		Timeout: 10 * time.Second,
+		SASLMechanism: plain.Mechanism{
+			Username: "adminplain",
+			Password: "admin-secret",
+		},
+	}
+
+	w := newTestWriter(WriterConfig{
+		Dialer:  dialer,
+		Topic:   topic,
+		Brokers: []string{"localhost:9093"},
+	})
+
+	w.AllowAutoTopicCreation = true
+
+	defer w.Close()
+
+	msg := Message{Key: []byte("key"), Value: []byte("Hello World")}
+
+	var err error
+	const retries = 5
+	for i := 0; i < retries; i++ {
+		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+		defer cancel()
+		err = w.WriteMessages(ctx, msg)
+		if errors.Is(err, LeaderNotAvailable) || errors.Is(err, context.DeadlineExceeded) || errors.Is(err, UnknownTopicOrPartition) {
+			time.Sleep(time.Millisecond * 250)
+			continue
+		}
+
+		if err != nil {
+			t.Errorf("unexpected error %v", err)
+			return
+		}
+	}
+	if err != nil {
+		t.Errorf("unable to create topic %v", err)
+	}
+}
+
+func testWriterDefaults(t *testing.T) {
+	w := &Writer{}
+	defer w.Close()
+
+	if w.writeBackoffMin() != 100*time.Millisecond {
+		t.Error("Incorrect default min write backoff delay")
+	}
+
+	if w.writeBackoffMax() != 1*time.Second {
+		t.Error("Incorrect default max write backoff delay")
+	}
+}
+
+func testWriterDefaultStats(t *testing.T) {
+	w := &Writer{}
+	defer w.Close()
+
+	stats := w.Stats()
+
+	if stats.MaxAttempts == 0 {
+		t.Error("Incorrect default MaxAttempts value")
+	}
+
+	if stats.WriteBackoffMin == 0 {
+		t.Error("Incorrect default WriteBackoffMin value")
+	}
+
+	if stats.WriteBackoffMax == 0 {
+		t.Error("Incorrect default WriteBackoffMax value")
+	}
+
+	if stats.MaxBatchSize == 0 {
+		t.Error("Incorrect default MaxBatchSize value")
+	}
+
+	if stats.BatchTimeout == 0 {
+		t.Error("Incorrect default BatchTimeout value")
+	}
+
+	if stats.ReadTimeout == 0 {
+		t.Error("Incorrect default ReadTimeout value")
+	}
+
+	if stats.WriteTimeout == 0 {
+		t.Error("Incorrect default WriteTimeout value")
+	}
+}
+
+func testWriterOverrideConfigStats(t *testing.T) {
+	w := &Writer{
+		MaxAttempts:     6,
+		WriteBackoffMin: 2,
+		WriteBackoffMax: 4,
+		BatchSize:       1024,
+		BatchTimeout:    16,
+		ReadTimeout:     24,
+		WriteTimeout:    32,
+	}
+	defer w.Close()
+
+	stats := w.Stats()
+
+	if stats.MaxAttempts != 6 {
+		t.Error("Incorrect MaxAttempts value")
+	}
+
+	if stats.WriteBackoffMin != 2 {
+		t.Error("Incorrect WriteBackoffMin value")
+	}
+
+	if stats.WriteBackoffMax != 4 {
+		t.Error("Incorrect WriteBackoffMax value")
+	}
+
+	if stats.MaxBatchSize != 1024 {
+		t.Error("Incorrect MaxBatchSize value")
+	}
+
+	if stats.BatchTimeout != 16 {
+		t.Error("Incorrect BatchTimeout value")
+	}
+
+	if stats.ReadTimeout != 24 {
+		t.Error("Incorrect ReadTimeout value")
+	}
+
+	if stats.WriteTimeout != 32 {
+		t.Error("Incorrect WriteTimeout value")
+	}
+}
+
+type staticBalancer struct {
+	partition int
+}
+
+func (b *staticBalancer) Balance(_ Message, partitions ...int) int {
+	return b.partition
+}
diff -pruN 0.2.1-1.1/zstd/zstd.go 0.4.49+ds1-1/zstd/zstd.go
--- 0.2.1-1.1/zstd/zstd.go	1970-01-01 00:00:00.000000000 +0000
+++ 0.4.49+ds1-1/zstd/zstd.go	2025-08-21 19:15:53.000000000 +0000
@@ -0,0 +1,21 @@
+// Package zstd does nothing, it's kept for backward compatibility to avoid
+// breaking the majority of programs that imported it to install the compression
+// codec, which is now always included.
+package zstd
+
+import "github.com/segmentio/kafka-go/compress/zstd"
+
+const (
+	Code                    = 4
+	DefaultCompressionLevel = 3
+)
+
+type CompressionCodec = zstd.Codec
+
+func NewCompressionCodec() *CompressionCodec {
+	return NewCompressionCodecWith(DefaultCompressionLevel)
+}
+
+func NewCompressionCodecWith(level int) *CompressionCodec {
+	return &CompressionCodec{Level: level}
+}
