diff -pruN 1.7.0-1/.github/workflows/codeql-analysis.yml 1.8.0-1/.github/workflows/codeql-analysis.yml
--- 1.7.0-1/.github/workflows/codeql-analysis.yml	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/.github/workflows/codeql-analysis.yml	2025-09-09 11:06:15.000000000 +0000
@@ -13,10 +13,10 @@ name: "CodeQL"
 
 on:
   push:
-    branches: [ master ]
+    branches: [ main ]
   pull_request:
     # The branches below must be a subset of the branches above
-    branches: [ master ]
+    branches: [ main ]
   schedule:
     - cron: '16 17 * * 5'
 
diff -pruN 1.7.0-1/.github/workflows/go.yml 1.8.0-1/.github/workflows/go.yml
--- 1.7.0-1/.github/workflows/go.yml	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/.github/workflows/go.yml	2025-09-09 11:06:15.000000000 +0000
@@ -4,9 +4,9 @@ name: Go
 
 on:
   push:
-    branches: [ "master" ]
+    branches: [ "main" ]
   pull_request:
-    branches: [ "master" ]
+    branches: [ "main" ]
 
 permissions:
   contents: read
@@ -21,7 +21,7 @@ jobs:
     - name: Set up Go
       uses: actions/setup-go@v5
       with:
-        go-version: 1.24
+        go-version: 1.25
 
     - name: Build
       run: go build -v ./...
@@ -32,4 +32,4 @@ jobs:
     - name: golangci-lint
       uses: golangci/golangci-lint-action@v8
       with:
-          version: v2.1.6
+          version: v2.4.0
diff -pruN 1.7.0-1/CHANGELOG.md 1.8.0-1/CHANGELOG.md
--- 1.7.0-1/CHANGELOG.md	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/CHANGELOG.md	2025-09-09 11:06:15.000000000 +0000
@@ -1,5 +1,22 @@
 # CHANGELOG
 
+[v1.8.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.8.0) Release v1.8.0
+
+* [FEATURE] Added `DecodeSelectedFieldArgs` helper function to decode argument values for any (nested) selected field path directly from a resolver context, enabling efficient multi-level prefetching without per-resolver argument reflection. This enables selective, multi‑level batching (Category → Products → Reviews) by loading only requested fields, mitigating N+1 issues despite complex filters or pagination.
+* [CHORE] Bump Go version in go.mod file to v1.24 to be one minor version less than the latest stable Go release.
+
+[v1.7.2](https://github.com/graph-gophers/graphql-go/releases/tag/v1.7.2) Release v1.7.2
+
+* [BUGFIX] Fix checksum mismatch between direct git access and golang proxy for v1.7.1. This version contains identical functionality to v1.7.1 but with proper tag creation to ensure consistent checksums across all proxy configurations.
+
+[v1.7.1](https://github.com/graph-gophers/graphql-go/releases/tag/v1.7.1) Release v1.7.1
+
+* [IMPROVEMENT] `SelectedFieldNames` now returns dot-delimited nested field paths (e.g. `products`, `products.id`, `products.category`, `products.category.id`). Intermediate container object/list paths are included so resolvers can check for both a branch (`products.category`) and its leaves (`products.category.id`). `HasSelectedField` and `SortedSelectedFieldNames` operate on these paths. This aligns behavior with typical resolver projection needs and fixes missing nested selections.
+* [BUGFIX] Reject object, interface, and input object type definitions that declare zero fields/input values (spec compliance).
+* [IMPROVEMENT] Optimize overlapping field validation to avoid quadratic memory blowups on large sibling field lists.
+* [FEATURE] Add configurable safety valve for overlapping field comparison count with `OverlapValidationLimit(n)` schema option (0 disables the cap). When exceeded validation aborts early with rule `OverlapValidationLimitExceeded`. Disabled by default.
+* [TEST] Add benchmarks & randomized overlap stress test for mixed field/fragment patterns.
+
 [v1.7.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.7.0) Release v1.7.0
 
 * [FEATURE] Add resolver field selection inspection helpers (`SelectedFieldNames`, `HasSelectedField`, `SortedSelectedFieldNames`). Helpers are available by default and compute results lazily only when called. An explicit opt-out (`DisableFieldSelections()` schema option) is provided for applications that want to remove even the minimal context insertion overhead when the helpers are never used.
@@ -32,7 +49,7 @@
 
 * [FEATURE] Add types package #437
 * [FEATURE] Expose `packer.Unmarshaler` as `decode.Unmarshaler` to the public #450
-* [FEATURE] Add location fields to type definitions #454 
+* [FEATURE] Add location fields to type definitions #454
 * [FEATURE] `errors.Errorf` preserves original error similar to `fmt.Errorf` #456
 * [BUGFIX] Fix duplicated __typename in response (fixes #369) #443
 
diff -pruN 1.7.0-1/CONTRIBUTING.md 1.8.0-1/CONTRIBUTING.md
--- 1.7.0-1/CONTRIBUTING.md	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/CONTRIBUTING.md	2025-09-09 11:06:15.000000000 +0000
@@ -6,7 +6,7 @@
   - Review existing issues and provide feedback or react to them.
 
 - With pull requests:
-  - Open your pull request against `master`
+  - Open your pull request against `main`
   - Your pull request should have no more than two commits, if not you should squash them.
   - It should pass all tests in the available continuous integrations systems such as TravisCI.
   - You should add/modify tests to cover your proposed code changes.
diff -pruN 1.7.0-1/README.md 1.8.0-1/README.md
--- 1.7.0-1/README.md	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/README.md	2025-09-09 11:06:15.000000000 +0000
@@ -15,6 +15,7 @@ While still under development (`internal
 - resolvers are matched to the schema based on method sets (can resolve a GraphQL schema with a Go interface or Go struct).
 - handles panics in resolvers
 - parallel execution of resolvers
+- inspect the selected fields and their args to prefetch data and avoid the N+1 query problem
 - subscriptions
   - [sample WS transport](https://github.com/graph-gophers/graphql-transport-ws)
 
@@ -98,11 +99,7 @@ func (r *helloWorldResolver) Hello(ctx c
 ```
 
 ### Separate resolvers for different operations
-> **NOTE**: This feature is not in the stable release yet. In order to use it you need to run `go get github.com/graph-gophers/graphql-go@master` and in your `go.mod` file you will have something like:
->  ```
->  v1.5.1-0.20230216224648-5aa631d05992
->  ```
-> It is expected to be released in `v1.6.0` soon.
+This feature was released in `v1.6.0`.
 
 The GraphQL specification allows for fields with the same name defined in different query types. For example, the schema below is a valid schema definition:
 ```graphql
@@ -157,6 +154,7 @@ schema := graphql.MustParseSchema(sdl, &
 - `PanicHandler(panicHandler errors.PanicHandler)` is used to transform panics into errors during query execution. It defaults to `errors.DefaultPanicHandler`.
 - `DisableIntrospection()` disables introspection queries.
 - `DisableFieldSelections()` disables capturing child field selections used by helper APIs (see below).
+- `OverlapValidationLimit(n int)` sets a hard cap on examined overlap pairs during validation; exceeding it emits `OverlapValidationLimitExceeded` error.
 
 ### Field Selection Inspection Helpers
 
diff -pruN 1.7.0-1/debian/changelog 1.8.0-1/debian/changelog
--- 1.7.0-1/debian/changelog	2025-08-25 13:50:55.000000000 +0000
+++ 1.8.0-1/debian/changelog	2025-09-30 11:40:12.000000000 +0000
@@ -1,3 +1,9 @@
+golang-github-graph-gophers-graphql-go (1.8.0-1) unstable; urgency=medium
+
+  * New upstream release.
+
+ -- Sascha Steinbiss <satta@debian.org>  Tue, 30 Sep 2025 13:40:12 +0200
+
 golang-github-graph-gophers-graphql-go (1.7.0-1) unstable; urgency=medium
 
   * Change to new packaging repo layout based on upstream Git.
diff -pruN 1.7.0-1/decode/decode.go 1.8.0-1/decode/decode.go
--- 1.7.0-1/decode/decode.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/decode/decode.go	2025-09-09 11:06:15.000000000 +0000
@@ -1,13 +1,13 @@
 package decode
 
-// Unmarshaler defines the api of Go types mapped to custom GraphQL scalar types
+// Unmarshaler defines the api of Go types mapped to custom GraphQL scalar types.
 type Unmarshaler interface {
 	// ImplementsGraphQLType maps the implementing custom Go type
 	// to the GraphQL scalar type in the schema.
 	ImplementsGraphQLType(name string) bool
-	// UnmarshalGraphQL is the custom unmarshaler for the implementing type
+	// UnmarshalGraphQL is the custom unmarshaler for the implementing type.
 	//
 	// This function will be called whenever you use the
-	// custom GraphQL scalar type as an input
-	UnmarshalGraphQL(input interface{}) error
+	// custom GraphQL scalar type as an input.
+	UnmarshalGraphQL(input any) error
 }
diff -pruN 1.7.0-1/example/federation/integration/gateway/package-lock.json 1.8.0-1/example/federation/integration/gateway/package-lock.json
--- 1.7.0-1/example/federation/integration/gateway/package-lock.json	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/example/federation/integration/gateway/package-lock.json	2025-09-09 11:06:15.000000000 +0000
@@ -1915,6 +1915,21 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/is-typed-array": {
+      "version": "1.1.15",
+      "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz",
+      "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==",
+      "license": "MIT",
+      "dependencies": {
+        "which-typed-array": "^1.1.16"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
     "node_modules/is-weakmap": {
       "version": "2.0.2",
       "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz",
@@ -2716,15 +2731,23 @@
       "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="
     },
     "node_modules/sha.js": {
-      "version": "2.4.11",
-      "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz",
-      "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==",
-      "dependencies": {
-        "inherits": "^2.0.1",
-        "safe-buffer": "^5.0.1"
+      "version": "2.4.12",
+      "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.12.tgz",
+      "integrity": "sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==",
+      "license": "(MIT AND BSD-3-Clause)",
+      "dependencies": {
+        "inherits": "^2.0.4",
+        "safe-buffer": "^5.2.1",
+        "to-buffer": "^1.2.0"
       },
       "bin": {
         "sha.js": "bin.js"
+      },
+      "engines": {
+        "node": ">= 0.10"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
       }
     },
     "node_modules/shebang-command": {
@@ -3064,6 +3087,20 @@
         "node": ">=8"
       }
     },
+    "node_modules/to-buffer": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.2.1.tgz",
+      "integrity": "sha512-tB82LpAIWjhLYbqjx3X4zEeHN6M8CiuOEy2JY8SEQVdYRe3CCHOFaqrBW1doLDrfpWhplcW7BL+bO3/6S3pcDQ==",
+      "license": "MIT",
+      "dependencies": {
+        "isarray": "^2.0.5",
+        "safe-buffer": "^5.2.1",
+        "typed-array-buffer": "^1.0.3"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
     "node_modules/toidentifier": {
       "version": "1.0.1",
       "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
@@ -3107,6 +3144,20 @@
         "node": ">= 0.6"
       }
     },
+    "node_modules/typed-array-buffer": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz",
+      "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "es-errors": "^1.3.0",
+        "is-typed-array": "^1.1.14"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
     "node_modules/unique-filename": {
       "version": "3.0.0",
       "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-3.0.0.tgz",
diff -pruN 1.7.0-1/example/prefetch/main.go 1.8.0-1/example/prefetch/main.go
--- 1.7.0-1/example/prefetch/main.go	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/example/prefetch/main.go	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,167 @@
+/*
+This example demonstrates a 3-level hierarchy (Author -> Books -> Reviews)
+with data prefetching at each level to avoid N+1 query problems.
+To run the example, execute:
+
+	go run example/prefetch/main.go
+
+Then send a query like this (using curl or any GraphQL client):
+
+	curl -X POST http://localhost:8080/query \
+	   -H 'Content-Type: application/json' \
+	   -d '{"query":"query GetAuthors($top:Int!,$last:Int!){ authors { id name books(top:$top){ id title reviews(last:$last){ id content rating } } }}","variables":{"top":2,"last":2}}'
+*/
+package main
+
+import (
+	"context"
+	_ "embed"
+	"log"
+	"net/http"
+
+	"github.com/graph-gophers/graphql-go"
+	"github.com/graph-gophers/graphql-go/relay"
+)
+
+//go:embed schema.graphql
+var sdl string
+
+type Author struct {
+	ID   string
+	Name string
+}
+type Book struct {
+	ID       string
+	AuthorID string
+	Title    string
+}
+type Review struct {
+	ID      string
+	BookID  string
+	Content string
+	Rating  int32
+}
+
+var (
+	allAuthors = []Author{{"A1", "Ann"}, {"A2", "Bob"}}
+	allBooks   = []Book{{"B1", "A1", "Go Tips"}, {"B2", "A1", "Concurrency"}, {"B3", "A2", "GraphQL"}}
+	allReviews = []Review{{"R1", "B1", "Great", 5}, {"R2", "B1", "Okay", 3}, {"R3", "B3", "Wow", 4}}
+)
+
+type root struct {
+	booksByAuthor map[string][]Book
+	reviewsByBook map[string][]Review
+}
+
+func (r *root) Authors(ctx context.Context) ([]*authorResolver, error) {
+	authors := allAuthors
+	// level 1 prefetch: authors already available
+	if graphql.HasSelectedField(ctx, "books") {
+		// level 2 prefetch: books for selected authors only
+		authorSet := make(map[string]struct{}, len(authors))
+		for _, a := range authors {
+			authorSet[a.ID] = struct{}{}
+		}
+		booksByAuthor := make(map[string][]Book)
+		// capture potential Top argument once (shared across authors)
+		var topLimit int
+		var booksArgs struct{ Top int32 }
+		if ok, _ := graphql.DecodeSelectedFieldArgs(ctx, "books", &booksArgs); ok && booksArgs.Top > 0 {
+			topLimit = int(booksArgs.Top)
+		}
+		for _, b := range allBooks {
+			if _, ok := authorSet[b.AuthorID]; ok {
+				list := booksByAuthor[b.AuthorID]
+				if topLimit == 0 || len(list) < topLimit {
+					list = append(list, b)
+					booksByAuthor[b.AuthorID] = list
+				}
+			}
+		}
+		if graphql.HasSelectedField(ctx, "books.reviews") {
+			var lastLimit int
+			var rvArgs struct{ Last int32 }
+			if ok, _ := graphql.DecodeSelectedFieldArgs(ctx, "books.reviews", &rvArgs); ok && rvArgs.Last > 0 {
+				lastLimit = int(rvArgs.Last)
+			}
+			bookSet := map[string]struct{}{}
+			for _, slice := range booksByAuthor {
+				for _, b := range slice {
+					bookSet[b.ID] = struct{}{}
+				}
+			}
+			reviewsByBook := make(map[string][]Review)
+			for _, rv := range allReviews {
+				if _, ok := bookSet[rv.BookID]; ok {
+					grp := reviewsByBook[rv.BookID]
+					grp = append(grp, rv)
+					if lastLimit > 0 && len(grp) > lastLimit {
+						grp = grp[len(grp)-lastLimit:]
+					}
+					reviewsByBook[rv.BookID] = grp
+				}
+			}
+			r.reviewsByBook = reviewsByBook
+		}
+		r.booksByAuthor = booksByAuthor
+	}
+	res := make([]*authorResolver, len(authors))
+	for i, a := range authors {
+		res[i] = &authorResolver{root: r, a: &a}
+	}
+	return res, nil
+}
+
+type authorResolver struct {
+	root *root
+	a    *Author
+}
+
+func (ar *authorResolver) ID() graphql.ID { return graphql.ID(ar.a.ID) }
+func (ar *authorResolver) Name() string   { return ar.a.Name }
+
+func (ar *authorResolver) Books(ctx context.Context, args struct{ Top int32 }) ([]*bookResolver, error) {
+	// books already limited during prefetch phase (Authors resolver)
+	books := ar.root.booksByAuthor[ar.a.ID]
+	out := make([]*bookResolver, len(books))
+	for i, b := range books {
+		out[i] = &bookResolver{root: ar.root, b: &b}
+	}
+	return out, nil
+}
+
+type bookResolver struct {
+	root *root
+	b    *Book
+}
+
+func (br *bookResolver) ID() graphql.ID { return graphql.ID(br.b.ID) }
+func (br *bookResolver) Title() string  { return br.b.Title }
+func (br *bookResolver) Reviews(ctx context.Context, args struct{ Last int32 }) ([]*reviewResolver, error) {
+	revs := br.root.reviewsByBook[br.b.ID]
+	if take := int(args.Last); take > 0 && take < len(revs) {
+		start := len(revs) - take
+		if start < 0 {
+			start = 0
+		}
+		revs = revs[start:]
+	}
+	out := make([]*reviewResolver, len(revs))
+	for i, r := range revs {
+		out[i] = &reviewResolver{r: &r}
+	}
+	return out, nil
+}
+
+type reviewResolver struct{ r *Review }
+
+func (rr *reviewResolver) ID() graphql.ID  { return graphql.ID(rr.r.ID) }
+func (rr *reviewResolver) Content() string { return rr.r.Content }
+func (rr *reviewResolver) Rating() int32   { return rr.r.Rating }
+
+func main() {
+	schema := graphql.MustParseSchema(sdl, &root{})
+	http.Handle("/query", &relay.Handler{Schema: schema})
+	log.Println("Prefetch example listening on :8080 -> POST /query")
+	log.Fatal(http.ListenAndServe(":8080", nil))
+}
diff -pruN 1.7.0-1/example/prefetch/schema.graphql 1.8.0-1/example/prefetch/schema.graphql
--- 1.7.0-1/example/prefetch/schema.graphql	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/example/prefetch/schema.graphql	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,23 @@
+schema { query: Query }
+
+type Query { 
+    authors: [Author!]! 
+}
+
+type Author {
+    id: ID!
+    name: String!
+    books(top: Int!): [Book!]!
+}
+
+type Book {
+    id: ID!
+    title: String!
+    reviews(last: Int!): [Review!]!
+}
+
+type Review {
+    id: ID!
+    content: String!
+    rating: Int!
+}
diff -pruN 1.7.0-1/example_nullbool_test.go 1.8.0-1/example_nullbool_test.go
--- 1.7.0-1/example_nullbool_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/example_nullbool_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -20,6 +20,10 @@ func (*mutnb) Toggle(args struct{ Enable
 	return fmt.Sprintf("enabled '%v'", *args.Enabled.Value)
 }
 
+func (r *mutnb) Name() string {
+	return "test"
+}
+
 // ExampleNullBool demonstrates how to use nullable Bool type when it is necessary to differentiate between nil and not set.
 func ExampleNullBool() {
 	const s = `
@@ -27,7 +31,9 @@ func ExampleNullBool() {
 			query: Query
 			mutation: Mutation
 		}
-		type Query{}
+		type Query{
+			name: String!
+		}
 		type Mutation{
 			toggle(enabled: Boolean): String!
 		}
diff -pruN 1.7.0-1/example_prefetch_test.go 1.8.0-1/example_prefetch_test.go
--- 1.7.0-1/example_prefetch_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/example_prefetch_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,405 @@
+package graphql_test
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"os"
+	"sort"
+
+	"github.com/graph-gophers/graphql-go"
+)
+
+// In this example we demonstrate a 3-level hierarchy (Category -> Products -> Reviews)
+// and show how to prefetch nested data (products & reviews) in a single pass using
+// the selected field/argument inspection helpers.
+// The type names are prefixed with "pf" to avoid clashing with other examples in this package.
+type pfCategory struct {
+	ID   string
+	Name string
+}
+type pfProduct struct {
+	ID         string
+	CategoryID string
+	Name       string
+	Price      int
+}
+type pfReview struct {
+	ID        string
+	ProductID string
+	Body      string
+	Stars     int32
+}
+
+var (
+	pfCategories = []pfCategory{{"C1", "Electronics"}}
+	pfProducts   = []pfProduct{
+		{"P01", "C1", "Adapter", 15},
+		{"P02", "C1", "Battery", 25},
+		{"P03", "C1", "Cable", 5},
+		{"P04", "C1", "Dock", 45},
+		{"P05", "C1", "Earbuds", 55},
+		{"P06", "C1", "Fan", 35},
+		{"P07", "C1", "Gamepad", 65},
+		{"P08", "C1", "Hub", 40},
+		{"P09", "C1", "Indicator", 12},
+		{"P10", "C1", "Joystick", 70},
+		{"P11", "C1", "Keyboard", 80},
+		{"P12", "C1", "Light", 8},
+		{"P13", "C1", "Microphone", 120},
+	}
+	pfReviews = []pfReview{
+		{"R01", "P05", "Great sound", 5},
+		{"R02", "P05", "Decent", 4},
+		{"R03", "P05", "Could be louder", 3},
+		{"R04", "P05", "Nice fit", 5},
+		{"R05", "P05", "Battery ok", 4},
+		{"R06", "P05", "Color faded", 2},
+		{"R07", "P05", "Value for money", 5},
+		{"R08", "P11", "Fast typing", 5},
+		{"R09", "P11", "Loud keys", 3},
+		{"R10", "P02", "Holds charge", 4},
+		{"R11", "P02", "Gets warm", 2},
+	}
+)
+
+// SDL describing the hierarchy with pagination & ordering arguments.
+const prefetchSDL = `
+schema { query: Query }
+
+enum ProductOrder {
+	NAME
+	PRICE
+}
+
+type Query {
+	category(id: ID!): Category
+}
+
+type Category {
+	id: ID!
+	name: String!
+	products(after: ID, first: Int, orderBy: ProductOrder): [Product!]!
+}
+
+type Product {
+	id: ID!
+	name: String!
+	price: Int!
+	reviews(last: Int = 5): [Review!]!
+}
+
+type Review {
+	id: ID!
+	body: String!
+	stars: Int!
+}
+`
+
+type pfRoot struct{}
+
+// ProductOrder represented as plain string for simplicity in this example.
+type ProductOrder string
+
+const (
+	ProductOrderName  ProductOrder = "NAME"
+	ProductOrderPrice ProductOrder = "PRICE"
+)
+
+func (r *pfRoot) Category(ctx context.Context, args struct{ ID graphql.ID }) *pfCategoryResolver {
+	var cat *pfCategory
+	for i := range pfCategories {
+		if pfCategories[i].ID == string(args.ID) {
+			cat = &pfCategories[i]
+			break
+		}
+	}
+	if cat == nil {
+		return nil
+	}
+
+	cr := &pfCategoryResolver{c: cat}
+
+	// Exit early if "products" field wasn't requested
+	if !graphql.HasSelectedField(ctx, "products") {
+		return cr
+	}
+
+	// Prefetch products for this category
+	// Decode any arguments provided to the "products" field
+	// and apply them during prefetching.
+	var prodArgs struct {
+		After   graphql.ID
+		First   *int32
+		OrderBy *string
+	}
+	_, _ = graphql.DecodeSelectedFieldArgs(ctx, "products", &prodArgs)
+	firstVal := int32(10)
+	if prodArgs.First != nil && *prodArgs.First > 0 {
+		firstVal = *prodArgs.First
+	}
+	orderVal := ProductOrderName
+	if prodArgs.OrderBy != nil && *prodArgs.OrderBy != "" {
+		orderVal = ProductOrder(*prodArgs.OrderBy)
+	}
+	filtered := make([]pfProduct, 0, 16)
+	for _, p := range pfProducts {
+		if p.CategoryID == cat.ID {
+			filtered = append(filtered, p)
+		}
+	}
+	switch orderVal {
+	case ProductOrderPrice:
+		sort.Slice(filtered, func(i, j int) bool { return filtered[i].Price < filtered[j].Price })
+	default:
+		sort.Slice(filtered, func(i, j int) bool { return filtered[i].Name < filtered[j].Name })
+	}
+	var start int
+	if prodArgs.After != "" {
+		for i, p := range filtered {
+			if p.ID == string(prodArgs.After) {
+				start = i + 1
+				break
+			}
+		}
+		if start > len(filtered) {
+			start = len(filtered)
+		}
+	}
+	end := start + int(firstVal)
+	if end > len(filtered) {
+		end = len(filtered)
+	}
+	slice := filtered[start:end]
+	cr.prefetchedProducts = make([]*pfProduct, len(slice))
+	for i := range slice {
+		prod := slice[i]
+		cr.prefetchedProducts[i] = &prod
+	}
+
+	// Exit early if "reviews" sub-field wasn't requested for the products
+	if !graphql.HasSelectedField(ctx, "products.reviews") {
+		return cr
+	}
+
+	// Prefetch reviews for all products in this category
+	// Decode any arguments provided to the "reviews" field
+	// and apply them during prefetching.
+	var reviewArgs struct{ Last int32 }
+	_, _ = graphql.DecodeSelectedFieldArgs(ctx, "products.reviews", &reviewArgs)
+	var lastVal int32
+	if reviewArgs.Last > 0 {
+		lastVal = reviewArgs.Last
+	}
+	take := int(lastVal)
+	cr.reviewsByProduct = make(map[string][]*pfReview)
+	productSet := map[string]struct{}{}
+	for _, p := range cr.prefetchedProducts {
+		productSet[p.ID] = struct{}{}
+	}
+	for i := range pfReviews {
+		rv := pfReviews[i]
+		if _, ok := productSet[rv.ProductID]; !ok {
+			continue
+		}
+		arr := cr.reviewsByProduct[rv.ProductID]
+		arr = append(arr, &rv)
+		if take > 0 && len(arr) > take {
+			arr = arr[len(arr)-take:]
+		}
+		cr.reviewsByProduct[rv.ProductID] = arr
+	}
+	return cr
+}
+
+type pfCategoryResolver struct {
+	c                  *pfCategory
+	prefetchedProducts []*pfProduct
+	reviewsByProduct   map[string][]*pfReview
+}
+
+func (c *pfCategoryResolver) ID() graphql.ID { return graphql.ID(c.c.ID) }
+func (c *pfCategoryResolver) Name() string   { return c.c.Name }
+
+type pfProductArgs struct {
+	After   *graphql.ID
+	First   *int32
+	OrderBy *string
+}
+
+func (c *pfCategoryResolver) Products(ctx context.Context, args pfProductArgs) ([]*pfProductResolver, error) {
+	out := make([]*pfProductResolver, len(c.prefetchedProducts))
+	for i, p := range c.prefetchedProducts {
+		out[i] = &pfProductResolver{parent: c, p: p}
+	}
+	return out, nil
+}
+
+type pfProductResolver struct {
+	parent *pfCategoryResolver
+	p      *pfProduct
+}
+
+func (p *pfProductResolver) ID() graphql.ID { return graphql.ID(p.p.ID) }
+func (p *pfProductResolver) Name() string   { return p.p.Name }
+func (p *pfProductResolver) Price() int32   { return int32(p.p.Price) }
+func (p *pfProductResolver) Reviews(ctx context.Context, args struct{ Last int32 }) ([]*pfReviewResolver, error) {
+	rs := p.parent.reviewsByProduct[p.p.ID]
+	out := make([]*pfReviewResolver, len(rs))
+	for i, r := range rs {
+		out[i] = &pfReviewResolver{r: r}
+	}
+	return out, nil
+}
+
+type pfReviewResolver struct{ r *pfReview }
+
+func (r *pfReviewResolver) ID() graphql.ID { return graphql.ID(r.r.ID) }
+func (r *pfReviewResolver) Body() string   { return r.r.Body }
+func (r *pfReviewResolver) Stars() int32   { return r.r.Stars }
+
+// ExamplePrefetchData demonstrates data prefetching for a 3-level hierarchy depending on the requested fields.
+func Example_prefetchData() {
+	schema := graphql.MustParseSchema(prefetchSDL, &pfRoot{})
+
+	// Query 1: order products by NAME, starting after P02, first 5, with default last 5 reviews.
+	q1 := `{
+  category(id:"C1") {
+    id
+    name
+    products(after:"P02", first:5, orderBy: NAME) {
+      id
+      name
+      price
+      reviews {
+        id
+        stars
+      }
+    }
+  }
+}`
+
+	// Query 2: order by PRICE, no cursor (after), first 4 products only.
+	q2 := `{
+  category(id:"C1") {
+    products(first:4, orderBy: PRICE) {
+      id
+      name
+      price
+    }
+  }
+}`
+
+	fmt.Println("Order by NAME result:")
+	res1 := schema.Exec(context.Background(), q1, "", nil)
+	enc := json.NewEncoder(os.Stdout)
+	enc.SetIndent("", "  ")
+	_ = enc.Encode(res1)
+
+	fmt.Println("Order by PRICE result:")
+	res2 := schema.Exec(context.Background(), q2, "", nil)
+	enc = json.NewEncoder(os.Stdout)
+	enc.SetIndent("", "  ")
+	_ = enc.Encode(res2)
+
+	// output:
+	// Order by NAME result:
+	// {
+	//   "data": {
+	//     "category": {
+	//       "id": "C1",
+	//       "name": "Electronics",
+	//       "products": [
+	//         {
+	//           "id": "P03",
+	//           "name": "Cable",
+	//           "price": 5,
+	//           "reviews": []
+	//         },
+	//         {
+	//           "id": "P04",
+	//           "name": "Dock",
+	//           "price": 45,
+	//           "reviews": []
+	//         },
+	//         {
+	//           "id": "P05",
+	//           "name": "Earbuds",
+	//           "price": 55,
+	//           "reviews": [
+	//             {
+	//               "id": "R01",
+	//               "stars": 5
+	//             },
+	//             {
+	//               "id": "R02",
+	//               "stars": 4
+	//             },
+	//             {
+	//               "id": "R03",
+	//               "stars": 3
+	//             },
+	//             {
+	//               "id": "R04",
+	//               "stars": 5
+	//             },
+	//             {
+	//               "id": "R05",
+	//               "stars": 4
+	//             },
+	//             {
+	//               "id": "R06",
+	//               "stars": 2
+	//             },
+	//             {
+	//               "id": "R07",
+	//               "stars": 5
+	//             }
+	//           ]
+	//         },
+	//         {
+	//           "id": "P06",
+	//           "name": "Fan",
+	//           "price": 35,
+	//           "reviews": []
+	//         },
+	//         {
+	//           "id": "P07",
+	//           "name": "Gamepad",
+	//           "price": 65,
+	//           "reviews": []
+	//         }
+	//       ]
+	//     }
+	//   }
+	// }
+	// Order by PRICE result:
+	// {
+	//   "data": {
+	//     "category": {
+	//       "products": [
+	//         {
+	//           "id": "P03",
+	//           "name": "Cable",
+	//           "price": 5
+	//         },
+	//         {
+	//           "id": "P12",
+	//           "name": "Light",
+	//           "price": 8
+	//         },
+	//         {
+	//           "id": "P09",
+	//           "name": "Indicator",
+	//           "price": 12
+	//         },
+	//         {
+	//           "id": "P01",
+	//           "name": "Adapter",
+	//           "price": 15
+	//         }
+	//       ]
+	//     }
+	//   }
+	// }
+}
diff -pruN 1.7.0-1/example_scalar_map_test.go 1.8.0-1/example_scalar_map_test.go
--- 1.7.0-1/example_scalar_map_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/example_scalar_map_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -31,6 +31,10 @@ type Args struct {
 
 type mutation struct{}
 
+func (m *mutation) Name() string {
+	return "test"
+}
+
 func (*mutation) Hello(args Args) string {
 	fmt.Println(args)
 	return "Args accepted!"
@@ -40,7 +44,9 @@ func Example_customScalarMap() {
 	s := `
 		scalar Map
 	
-		type Query {}
+		type Query {
+			name: String!
+		}
 		
 		type Mutation {
 			hello(
diff -pruN 1.7.0-1/example_selection_test.go 1.8.0-1/example_selection_test.go
--- 1.7.0-1/example_selection_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/example_selection_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -12,10 +12,16 @@ type (
 	userResolver struct{ u user }
 )
 
-func (r *userResolver) ID() graphql.ID                              { return graphql.ID(r.u.id) }
-func (r *userResolver) Name() *string                               { return &r.u.name }
-func (r *userResolver) Email() *string                              { return &r.u.email }
-func (r *userResolver) Friends(ctx context.Context) []*userResolver { return nil }
+func (r *userResolver) ID() graphql.ID { return graphql.ID(r.u.id) }
+func (r *userResolver) Name() *string  { return &r.u.name }
+func (r *userResolver) Email() *string { return &r.u.email }
+func (r *userResolver) Friends(ctx context.Context) []*userResolver {
+	// Return a couple of dummy friends (data itself not important for field selection example)
+	return []*userResolver{
+		{u: user{id: "F1", name: "Bob"}},
+		{u: user{id: "F2", name: "Carol"}},
+	}
+}
 
 type root struct{}
 
@@ -34,8 +40,8 @@ func Example_selectedFieldNames() {
         type User { id: ID! name: String email: String friends: [User!]! }
     `
 	schema := graphql.MustParseSchema(s, &root{})
-	query := `query { user(id: "U1") { id name } }`
+	query := `query { user(id: "U1") { id name friends { id name } } }`
 	_ = schema.Exec(context.Background(), query, "", nil)
 	// Output:
-	// [id name]
+	// [id name friends friends.id friends.name]
 }
diff -pruN 1.7.0-1/go.mod 1.8.0-1/go.mod
--- 1.7.0-1/go.mod	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/go.mod	2025-09-09 11:06:15.000000000 +0000
@@ -1,9 +1,16 @@
 module github.com/graph-gophers/graphql-go
 
-go 1.16
+go 1.24.0
 
 require (
 	github.com/opentracing/opentracing-go v1.2.0
-	go.opentelemetry.io/otel v1.6.3
-	go.opentelemetry.io/otel/trace v1.6.3
+	go.opentelemetry.io/otel v1.38.0
+	go.opentelemetry.io/otel/trace v1.38.0
+)
+
+require (
+	github.com/go-logr/logr v1.4.3 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
+	go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+	go.opentelemetry.io/otel/metric v1.38.0 // indirect
 )
diff -pruN 1.7.0-1/go.sum 1.8.0-1/go.sum
--- 1.7.0-1/go.sum	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/go.sum	2025-09-09 11:06:15.000000000 +0000
@@ -1,25 +1,28 @@
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
 github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
 github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE=
-go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI=
-go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc=
-go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff -pruN 1.7.0-1/graphql.go 1.8.0-1/graphql.go
--- 1.7.0-1/graphql.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/graphql.go	2025-09-09 11:06:15.000000000 +0000
@@ -86,6 +86,7 @@ type Schema struct {
 	subscribeResolverTimeout time.Duration
 	useFieldResolvers        bool
 	disableFieldSelections   bool
+	overlapPairLimit         int
 }
 
 // AST returns the abstract syntax tree of the GraphQL schema definition.
@@ -152,6 +153,14 @@ func MaxQueryLength(n int) SchemaOpt {
 	}
 }
 
+// OverlapValidationLimit caps the number of overlapping selection pairs that will be examined
+// during validation of a single operation (including fragments). A value of 0 disables the cap.
+// When the cap is exceeded validation aborts early with an error (rule: OverlapValidationLimitExceeded)
+// to protect against maliciously constructed queries designed to exhaust memory/CPU.
+func OverlapValidationLimit(n int) SchemaOpt {
+	return func(s *Schema) { s.overlapPairLimit = n }
+}
+
 // Tracer is used to trace queries and fields. It defaults to [noop.Tracer].
 func Tracer(t tracer.Tracer) SchemaOpt {
 	return func(s *Schema) {
@@ -247,7 +256,7 @@ func (s *Schema) ValidateWithVariables(q
 		return []*errors.QueryError{errors.Errorf("executable document must contain at least one operation")}
 	}
 
-	return validation.Validate(s.schema, doc, variables, s.maxDepth)
+	return validation.Validate(s.schema, doc, variables, s.maxDepth, s.overlapPairLimit)
 }
 
 // Exec executes the given query with the schema's resolver. It panics if the schema was created
@@ -270,7 +279,7 @@ func (s *Schema) exec(ctx context.Contex
 	}
 
 	validationFinish := s.validationTracer.TraceValidation(ctx)
-	errs := validation.Validate(s.schema, doc, variables, s.maxDepth)
+	errs := validation.Validate(s.schema, doc, variables, s.maxDepth, s.overlapPairLimit)
 	validationFinish(errs)
 	if len(errs) != 0 {
 		return &Response{Errors: errs}
diff -pruN 1.7.0-1/graphql_test.go 1.8.0-1/graphql_test.go
--- 1.7.0-1/graphql_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/graphql_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -1459,6 +1459,10 @@ func (r *testDeprecatedDirectiveResolver
 	return 0
 }
 
+func (r *testDeprecatedDirectiveResolver) Name() string {
+	return "test"
+}
+
 func TestDeprecatedDirective(t *testing.T) {
 	t.Parallel()
 
@@ -1511,6 +1515,7 @@ func TestDeprecatedDirective(t *testing.
 				}
 
 				type Query {
+					name: String!
 				}
 
 				enum Test {
@@ -1552,6 +1557,9 @@ func TestDeprecatedDirective(t *testing.
 }
 
 func TestSpecifiedByDirective(t *testing.T) {
+	type nameResolver struct {
+		Name string
+	}
 	gqltesting.RunTests(t, []*gqltesting.Test{
 		{
 			Schema: graphql.MustParseSchema(`
@@ -1559,11 +1567,12 @@ func TestSpecifiedByDirective(t *testing
 				query: Query
 			}
 			type Query {
+			    name: String!
 			}
 			scalar UUID @specifiedBy(
 				url: "https://tools.ietf.org/html/rfc4122"
 			)
-			`, &struct{}{}),
+			`, &nameResolver{Name: "Pavel"}, graphql.UseFieldResolvers()),
 			Query: `
 				query {
 					__type(name: "UUID") {
diff -pruN 1.7.0-1/internal/exec/exec.go 1.8.0-1/internal/exec/exec.go
--- 1.7.0-1/internal/exec/exec.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/internal/exec/exec.go	2025-09-09 11:06:15.000000000 +0000
@@ -13,8 +13,8 @@ import (
 	"github.com/graph-gophers/graphql-go/errors"
 	"github.com/graph-gophers/graphql-go/internal/exec/resolvable"
 	"github.com/graph-gophers/graphql-go/internal/exec/selected"
+	"github.com/graph-gophers/graphql-go/internal/exec/selections"
 	"github.com/graph-gophers/graphql-go/internal/query"
-	"github.com/graph-gophers/graphql-go/internal/selections"
 	"github.com/graph-gophers/graphql-go/log"
 	"github.com/graph-gophers/graphql-go/trace/tracer"
 )
diff -pruN 1.7.0-1/internal/exec/packer/packer.go 1.8.0-1/internal/exec/packer/packer.go
--- 1.7.0-1/internal/exec/packer/packer.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/internal/exec/packer/packer.go	2025-09-09 11:06:15.000000000 +0000
@@ -314,7 +314,7 @@ func (p *ValuePacker) Pack(value interfa
 		return reflect.Value{}, errors.Errorf("got null for non-null")
 	}
 
-	coerced, err := unmarshalInput(p.ValueType, value)
+	coerced, err := UnmarshalInput(p.ValueType, value)
 	if err != nil {
 		return reflect.Value{}, fmt.Errorf("could not unmarshal %#v (%T) into %s: %s", value, value, p.ValueType, err)
 	}
@@ -337,7 +337,7 @@ func (p *unmarshalerPacker) Pack(value i
 	return v.Elem(), nil
 }
 
-func unmarshalInput(typ reflect.Type, input interface{}) (interface{}, error) {
+func UnmarshalInput(typ reflect.Type, input interface{}) (interface{}, error) {
 	if reflect.TypeOf(input) == typ {
 		return input, nil
 	}
diff -pruN 1.7.0-1/internal/exec/selections/context.go 1.8.0-1/internal/exec/selections/context.go
--- 1.7.0-1/internal/exec/selections/context.go	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/internal/exec/selections/context.go	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,276 @@
+// Package selections is for internal use to share selection context between
+// the execution engine and the public graphql package without creating an
+// import cycle.
+//
+// The execution layer stores the flattened child selection set for the field
+// currently being resolved. The public API converts this into user-friendly
+// helpers (SelectedFieldNames, etc.).
+package selections
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+
+	"github.com/graph-gophers/graphql-go/decode"
+	"github.com/graph-gophers/graphql-go/internal/exec/packer"
+	"github.com/graph-gophers/graphql-go/internal/exec/selected"
+)
+
+type ctxKey struct{}
+
+// Lazy holds raw selections and computes the flattened, deduped name list once on demand.
+type Lazy struct {
+	raw     []selected.Selection
+	once    sync.Once
+	names   []string
+	set     map[string]struct{}
+	decoded map[string]map[reflect.Type]reflect.Value // path -> type -> value copy
+}
+
+// Args returns the argument map for the first occurrence of the provided
+// dot-delimited field path under the current resolver. The boolean reports
+// if a matching field was found. The returned map MUST NOT be mutated by
+// callers (it is the internal map). Paths follow the same format produced by
+// SelectedFieldNames (e.g. "books", "books.reviews").
+func (l *Lazy) Args(path string) (map[string]interface{}, bool) {
+	if l == nil || len(path) == 0 {
+		return nil, false
+	}
+	// Fast path: ensure raw exists.
+	for _, sel := range l.raw {
+		if m, ok := matchArgsRecursive(sel, path, ""); ok {
+			return m, true
+		}
+	}
+	return nil, false
+}
+
+func matchArgsRecursive(sel selected.Selection, want, prefix string) (map[string]interface{}, bool) {
+	switch s := sel.(type) {
+	case *selected.SchemaField:
+		name := s.Name
+		if len(name) >= 2 && name[:2] == "__" { // skip meta
+			return nil, false
+		}
+		cur := name
+		if prefix != "" {
+			cur = prefix + "." + name
+		}
+		if cur == want {
+			return s.Args, true
+		}
+		for _, child := range s.Sels {
+			if m, ok := matchArgsRecursive(child, want, cur); ok {
+				return m, true
+			}
+		}
+	case *selected.TypeAssertion:
+		for _, child := range s.Sels {
+			if m, ok := matchArgsRecursive(child, want, prefix); ok {
+				return m, true
+			}
+		}
+	case *selected.TypenameField:
+		return nil, false
+	}
+	return nil, false
+}
+
+// Names returns the deduplicated child field names computing them once.
+func (l *Lazy) Names() []string {
+	if l == nil {
+		return nil
+	}
+	l.once.Do(func() {
+		seen := make(map[string]struct{}, len(l.raw))
+		ordered := make([]string, 0, len(l.raw))
+		collectNestedPaths(&ordered, seen, "", l.raw)
+		l.names = ordered
+		l.set = seen
+	})
+	out := make([]string, len(l.names))
+	copy(out, l.names)
+	return out
+}
+
+// Has reports if a field name is in the selection list.
+func (l *Lazy) Has(name string) bool {
+	if l == nil {
+		return false
+	}
+	if l.set == nil { // ensure computed
+		_ = l.Names()
+	}
+	_, ok := l.set[name]
+	return ok
+}
+
+func collectNestedPaths(dst *[]string, seen map[string]struct{}, prefix string, sels []selected.Selection) {
+	for _, sel := range sels {
+		switch s := sel.(type) {
+		case *selected.SchemaField:
+			name := s.Name
+			if len(name) >= 2 && name[:2] == "__" {
+				continue
+			}
+			path := name
+			if prefix != "" {
+				path = prefix + "." + name
+			}
+			if _, ok := seen[path]; !ok {
+				seen[path] = struct{}{}
+				*dst = append(*dst, path)
+			}
+			if len(s.Sels) > 0 {
+				collectNestedPaths(dst, seen, path, s.Sels)
+			}
+		case *selected.TypeAssertion:
+			collectNestedPaths(dst, seen, prefix, s.Sels)
+		case *selected.TypenameField:
+			continue
+		}
+	}
+}
+
+// With stores a lazy wrapper for selections in the context.
+func With(ctx context.Context, sels []selected.Selection) context.Context {
+	if len(sels) == 0 {
+		return ctx
+	}
+	return context.WithValue(ctx, ctxKey{}, &Lazy{raw: sels})
+}
+
+// FromContext retrieves the lazy wrapper (may be nil).
+func FromContext(ctx context.Context) *Lazy {
+	v, _ := ctx.Value(ctxKey{}).(*Lazy)
+	return v
+}
+
+// DecodeArgsInto decodes the argument map for the dot path into dst (pointer to struct).
+// Returns (true,nil) if decoded, (false,nil) if path missing. Caches per path+type.
+func (l *Lazy) DecodeArgsInto(path string, dst interface{}) (bool, error) {
+	if l == nil || dst == nil {
+		return false, nil
+	}
+	args, ok := l.Args(path)
+	if !ok || len(args) == 0 {
+		return false, nil
+	}
+	rv := reflect.ValueOf(dst)
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
+		return false, fmt.Errorf("destination must be non-nil pointer")
+	}
+	rv = rv.Elem()
+	if rv.Kind() != reflect.Struct {
+		return false, fmt.Errorf("destination must point to struct")
+	}
+	rt := rv.Type()
+	if l.decoded == nil {
+		l.decoded = make(map[string]map[reflect.Type]reflect.Value)
+	}
+	if m := l.decoded[path]; m != nil {
+		if cached, ok := m[rt]; ok {
+			rv.Set(cached)
+			return true, nil
+		}
+	}
+	// decode
+	for i := 0; i < rt.NumField(); i++ {
+		sf := rt.Field(i)
+		if sf.PkgPath != "" { // unexported
+			continue
+		}
+		name := sf.Tag.Get("graphql")
+		if name == "" {
+			name = lowerFirst(sf.Name)
+		}
+		raw, present := args[name]
+		if !present || raw == nil {
+			continue
+		}
+		if err := assignArg(rv.Field(i), raw); err != nil {
+			return false, fmt.Errorf("arg %s: %w", name, err)
+		}
+	}
+	if l.decoded[path] == nil {
+		l.decoded[path] = make(map[reflect.Type]reflect.Value)
+	}
+	// create a copy to cache so future mutations to dst by caller don't taint cache
+	copyVal := reflect.New(rt).Elem()
+	copyVal.Set(rv)
+	l.decoded[path][rt] = copyVal
+	return true, nil
+}
+
+func assignArg(dst reflect.Value, src interface{}) error {
+	if !dst.IsValid() {
+		return nil
+	}
+	// Support custom scalars implementing decode.Unmarshaler (pointer receiver).
+	if dst.CanAddr() {
+		if um, ok := dst.Addr().Interface().(decode.Unmarshaler); ok {
+			if err := um.UnmarshalGraphQL(src); err != nil {
+				return err
+			}
+			return nil
+		}
+	}
+	switch dst.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.String, reflect.Bool, reflect.Float32, reflect.Float64:
+		coerced, err := packer.UnmarshalInput(dst.Type(), src)
+		if err != nil {
+			return err
+		}
+		dst.Set(reflect.ValueOf(coerced))
+	case reflect.Struct:
+		m, ok := src.(map[string]interface{})
+		if !ok {
+			return fmt.Errorf("expected map for struct, got %T", src)
+		}
+		for i := 0; i < dst.NumField(); i++ {
+			sf := dst.Type().Field(i)
+			if sf.PkgPath != "" { // unexported
+				continue
+			}
+			name := sf.Tag.Get("graphql")
+			if name == "" {
+				name = lowerFirst(sf.Name)
+			}
+			if v, ok2 := m[name]; ok2 {
+				if err := assignArg(dst.Field(i), v); err != nil {
+					return err
+				}
+			}
+		}
+	case reflect.Slice:
+		sv := reflect.ValueOf(src)
+		if sv.Kind() != reflect.Slice {
+			return fmt.Errorf("cannot convert %T to slice", src)
+		}
+		out := reflect.MakeSlice(dst.Type(), sv.Len(), sv.Len())
+		for i := 0; i < sv.Len(); i++ {
+			if err := assignArg(out.Index(i), sv.Index(i).Interface()); err != nil {
+				return err
+			}
+		}
+		dst.Set(out)
+	case reflect.Ptr:
+		if dst.IsNil() {
+			dst.Set(reflect.New(dst.Type().Elem()))
+		}
+		return assignArg(dst.Elem(), src)
+	default:
+		// silently ignore unsupported kinds
+	}
+	return nil
+}
+
+func lowerFirst(s string) string {
+	if s == "" {
+		return s
+	}
+	return strings.ToLower(s[:1]) + s[1:]
+}
diff -pruN 1.7.0-1/internal/schema/schema.go 1.8.0-1/internal/schema/schema.go
--- 1.7.0-1/internal/schema/schema.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/internal/schema/schema.go	2025-09-09 11:06:15.000000000 +0000
@@ -259,12 +259,18 @@ func mergeExtensions(s *ast.Schema) erro
 func resolveNamedType(s *ast.Schema, t ast.NamedType) error {
 	switch t := t.(type) {
 	case *ast.ObjectTypeDefinition:
+		if len(t.Fields) == 0 {
+			return errors.Errorf("object type %q must define one or more fields", t.Name)
+		}
 		for _, f := range t.Fields {
 			if err := resolveField(s, f); err != nil {
 				return err
 			}
 		}
 	case *ast.InterfaceTypeDefinition:
+		if len(t.Fields) == 0 {
+			return errors.Errorf("interface type %q must define one or more fields", t.Name)
+		}
 		for _, f := range t.Fields {
 			if err := resolveField(s, f); err != nil {
 				return err
@@ -274,6 +280,9 @@ func resolveNamedType(s *ast.Schema, t a
 			return err
 		}
 	case *ast.InputObject:
+		if len(t.Values) == 0 {
+			return errors.Errorf("input object type %q must define one or more fields", t.Name)
+		}
 		if err := resolveInputObject(s, t.Values); err != nil {
 			return err
 		}
diff -pruN 1.7.0-1/internal/schema/schema_test.go 1.8.0-1/internal/schema/schema_test.go
--- 1.7.0-1/internal/schema/schema_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/internal/schema/schema_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -41,8 +41,9 @@ func TestParse(t *testing.T) {
 			sdl: `
 			interface Greeting { 
 				message: String!
-			} 
+			}
 			type Welcome implements Greeting {
+				id: ID!
 			}`,
 			validateError: func(err error) error {
 				if err == nil {
@@ -967,7 +968,7 @@ Second line of the description.
 		{
 			name: "Decorating input object with an undeclared directive should return an error",
 			sdl: `
-			input InputObject @undeclareddirective{}
+			input InputObject @undeclareddirective{field: String!}
 			`,
 			validateError: func(err error) error {
 				prefix := `graphql: directive "undeclareddirective" not found`
@@ -980,7 +981,7 @@ Second line of the description.
 		{
 			name: "Decorating interface with an undeclared directive should return an error",
 			sdl: `
-			interface I @undeclareddirective {}
+			interface I @undeclareddirective {field: String!}
 			`,
 			validateError: func(err error) error {
 				prefix := `graphql: directive "undeclareddirective" not found`
diff -pruN 1.7.0-1/internal/selections/context.go 1.8.0-1/internal/selections/context.go
--- 1.7.0-1/internal/selections/context.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/internal/selections/context.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,107 +0,0 @@
-// Package selections is for internal use to share selection context between
-// the execution engine and the public graphql package without creating an
-// import cycle.
-//
-// The execution layer stores the flattened child selection set for the field
-// currently being resolved. The public API converts this into user-friendly
-// helpers (SelectedFieldNames, etc.).
-package selections
-
-import (
-	"context"
-	"sync"
-
-	"github.com/graph-gophers/graphql-go/internal/exec/selected"
-)
-
-// ctxKey is an unexported unique type used as context key.
-type ctxKey struct{}
-
-// Lazy holds raw selections and computes the flattened, deduped name list once on demand.
-type Lazy struct {
-	raw   []selected.Selection
-	once  sync.Once
-	names []string
-	set   map[string]struct{}
-}
-
-// Names returns the deduplicated child field names computing them once.
-func (l *Lazy) Names() []string {
-	if l == nil {
-		return nil
-	}
-	l.once.Do(func() {
-		seen := make(map[string]struct{}, len(l.raw))
-		ordered := make([]string, 0, len(l.raw))
-		for _, s := range l.raw {
-			switch s := s.(type) {
-			case *selected.SchemaField:
-				name := s.Name
-				if len(name) >= 2 && name[:2] == "__" {
-					continue
-				}
-				if _, ok := seen[name]; !ok {
-					seen[name] = struct{}{}
-					ordered = append(ordered, name)
-				}
-			case *selected.TypeAssertion:
-				collectFromTypeAssertion(&ordered, seen, s.Sels)
-			case *selected.TypenameField:
-				continue
-			}
-		}
-		l.names = ordered
-		l.set = seen
-	})
-	// Return a copy to keep internal slice immutable to callers.
-	out := make([]string, len(l.names))
-	copy(out, l.names)
-	return out
-}
-
-// Has reports if a field name is in the selection list.
-func (l *Lazy) Has(name string) bool {
-	if l == nil {
-		return false
-	}
-	if l.set == nil { // ensure computed
-		_ = l.Names()
-	}
-	_, ok := l.set[name]
-	return ok
-}
-
-// collectFromTypeAssertion flattens selections under a type assertion fragment.
-func collectFromTypeAssertion(dst *[]string, seen map[string]struct{}, sels []selected.Selection) {
-	for _, s := range sels {
-		switch s := s.(type) {
-		case *selected.SchemaField:
-			name := s.Name
-			if len(name) >= 2 && name[:2] == "__" {
-				continue
-			}
-			if _, ok := seen[name]; !ok {
-				seen[name] = struct{}{}
-				*dst = append(*dst, name)
-			}
-		case *selected.TypeAssertion:
-			collectFromTypeAssertion(dst, seen, s.Sels)
-		case *selected.TypenameField:
-			continue
-		}
-	}
-}
-
-// With stores a lazy wrapper for selections in the context.
-func With(ctx context.Context, sels []selected.Selection) context.Context {
-	if len(sels) == 0 {
-		return ctx
-	}
-	return context.WithValue(ctx, ctxKey{}, &Lazy{raw: sels})
-}
-
-// FromContext retrieves the lazy wrapper (may be nil).
-func FromContext(ctx context.Context) *Lazy {
-	v, _ := ctx.Value(ctxKey{}).(*Lazy)
-	return v
-}
diff -pruN 1.7.0-1/internal/validation/overlap_fuzz_test.go 1.8.0-1/internal/validation/overlap_fuzz_test.go
--- 1.7.0-1/internal/validation/overlap_fuzz_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/internal/validation/overlap_fuzz_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,85 @@
+package validation_test
+
+import (
+	"math/rand"
+	"testing"
+	"time"
+
+	"github.com/graph-gophers/graphql-go/internal/query"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+	v "github.com/graph-gophers/graphql-go/internal/validation"
+)
+
+// FuzzValidateOverlapMixed exercises the overlap validation logic with randomly generated queries
+// containing many sibling fields and fragment spreads to ensure it does not panic or explode in memory.
+// It uses a modest overlap pair cap to keep each iteration bounded.
+func FuzzValidateOverlapMixed(f *testing.F) {
+	baseQueries := []string{
+		"query{root{id}}",
+		"query Q{root{id name}}",
+	}
+	for _, q := range baseQueries {
+		f.Add(q)
+	}
+
+	s := schema.New()
+	_ = schema.Parse(s, `schema{query:Query} type Query{root: Thing} type Thing { id: ID name: String value: String }`, false)
+
+	randSource := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+	f.Fuzz(func(t *testing.T, seed string) {
+		// Use hash of seed to deterministically generate but bound complexity.
+		r := rand.New(rand.NewSource(int64(len(seed)) + randSource.Int63()))
+		fieldCount := 50 + r.Intn(150) // 50-199
+		fragCount := 1 + r.Intn(5)
+
+		// Build fragments.
+		fragBodies := make([]string, fragCount)
+		for i := 0; i < fragCount; i++ {
+			// each fragment gets subset of fields
+			var body string
+			innerFields := 5 + r.Intn(20)
+			for j := 0; j < innerFields; j++ {
+				body += " f" + nameIdx(r.Intn(500)) + ":id"
+			}
+			fragBodies[i] = "fragment F" + nameIdx(i) + " on Thing{" + body + " }"
+		}
+
+		// Root selection
+		sel := "query{root{"
+		for i := 0; i < fieldCount; i++ {
+			sel += " a" + nameIdx(r.Intn(1000)) + ":id"
+		}
+		// Sprinkle fragment spreads
+		for i := 0; i < fragCount; i++ {
+			sel += " ...F" + nameIdx(i)
+		}
+		sel += "}}"
+		queryText := sel
+		for _, fb := range fragBodies {
+			queryText += fb
+		}
+
+		doc, err := query.Parse(queryText)
+		if err != nil {
+			return
+		} // parser fuzzing not our goal
+		if len(doc.Operations) == 0 {
+			return
+		}
+		// Use overlap limit to bound cost.
+		errs := v.Validate(s, doc, nil, 0, 10_000)
+		// Ensure no panic (implicit). Optionally sanity check: errors slice must not be ridiculously huge.
+		if len(errs) > 1000 {
+			t.Fatalf("too many errors: %d", len(errs))
+		}
+	})
+}
+
+func nameIdx(i int) string {
+	const letters = "abcdefghijklmnopqrstuvwxyz"
+	if i < len(letters) {
+		return string(letters[i])
+	}
+	return string(letters[i%len(letters)]) + nameIdx(i/len(letters))
+}
diff -pruN 1.7.0-1/internal/validation/validate_max_depth_test.go 1.8.0-1/internal/validation/validate_max_depth_test.go
--- 1.7.0-1/internal/validation/validate_max_depth_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/internal/validation/validate_max_depth_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -83,7 +83,7 @@ func (tc maxDepthTestCase) Run(t *testin
 			t.Fatal(qErr)
 		}
 
-		errs := Validate(s, doc, nil, tc.depth)
+		errs := Validate(s, doc, nil, tc.depth, 0)
 		if len(tc.expectedErrors) > 0 {
 			if len(errs) > 0 {
 				for _, expected := range tc.expectedErrors {
@@ -489,7 +489,7 @@ func TestMaxDepthValidation(t *testing.T
 				t.Fatal(err)
 			}
 
-			context := newContext(s, doc, tc.maxDepth)
+			context := newContext(s, doc, tc.maxDepth, 0)
 			op := doc.Operations[0]
 
 			opc := &opContext{context: context, ops: doc.Operations}
diff -pruN 1.7.0-1/internal/validation/validation.go 1.8.0-1/internal/validation/validation.go
--- 1.7.0-1/internal/validation/validation.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/internal/validation/validation.go	2025-09-09 11:06:15.000000000 +0000
@@ -26,14 +26,17 @@ type fieldInfo struct {
 }
 
 type context struct {
-	schema           *ast.Schema
-	doc              *ast.ExecutableDefinition
-	errs             []*errors.QueryError
-	opErrs           map[*ast.OperationDefinition][]*errors.QueryError
-	usedVars         map[*ast.OperationDefinition]varSet
-	fieldMap         map[*ast.Field]fieldInfo
-	overlapValidated map[selectionPair]struct{}
-	maxDepth         int
+	schema               *ast.Schema
+	doc                  *ast.ExecutableDefinition
+	errs                 []*errors.QueryError
+	opErrs               map[*ast.OperationDefinition][]*errors.QueryError
+	usedVars             map[*ast.OperationDefinition]varSet
+	fieldMap             map[*ast.Field]fieldInfo
+	overlapValidated     map[selectionPair]struct{}
+	maxDepth             int
+	overlapPairLimit     int
+	overlapPairsObserved int
+	overlapLimitHit      bool
 }
 
 func (c *context) addErr(loc errors.Location, rule string, format string, a ...interface{}) {
@@ -53,7 +56,7 @@ type opContext struct {
 	ops []*ast.OperationDefinition
 }
 
-func newContext(s *ast.Schema, doc *ast.ExecutableDefinition, maxDepth int) *context {
+func newContext(s *ast.Schema, doc *ast.ExecutableDefinition, maxDepth int, overlapPairLimit int) *context {
 	return &context{
 		schema:           s,
 		doc:              doc,
@@ -62,11 +65,12 @@ func newContext(s *ast.Schema, doc *ast.
 		fieldMap:         make(map[*ast.Field]fieldInfo),
 		overlapValidated: make(map[selectionPair]struct{}),
 		maxDepth:         maxDepth,
+		overlapPairLimit: overlapPairLimit,
 	}
 }
 
-func Validate(s *ast.Schema, doc *ast.ExecutableDefinition, variables map[string]interface{}, maxDepth int) []*errors.QueryError {
-	c := newContext(s, doc, maxDepth)
+func Validate(s *ast.Schema, doc *ast.ExecutableDefinition, variables map[string]interface{}, maxDepth int, overlapPairLimit int) []*errors.QueryError {
+	c := newContext(s, doc, maxDepth, overlapPairLimit)
 
 	opNames := make(nameSet, len(doc.Operations))
 	fragUsedBy := make(map[*ast.FragmentDefinition][]*ast.OperationDefinition)
@@ -303,13 +307,76 @@ func validateMaxDepth(c *opContext, sels
 }
 
 func validateSelectionSet(c *opContext, sels []ast.Selection, t ast.NamedType) {
+	if len(sels) == 0 {
+		return
+	}
+
+	// First pass: validate each selection and bucket fields by response name (alias or name).
+	fieldGroups := make(map[string][]ast.Selection)
+	var fragments []ast.Selection // fragment spreads & inline fragments
 	for _, sel := range sels {
+		if c.overlapLimitHit {
+			return
+		}
 		validateSelection(c, sel, t)
+		switch s := sel.(type) {
+		case *ast.Field:
+			name := s.Alias.Name
+			if name == "" {
+				name = s.Name.Name
+			}
+			fieldGroups[name] = append(fieldGroups[name], sel)
+		default:
+			fragments = append(fragments, sel)
+		}
 	}
 
-	for i, a := range sels {
-		for _, b := range sels[i+1:] {
-			c.validateOverlap(a, b, nil, nil)
+	// Compare fields only within same response name group (was O(n^2) across all fields previously).
+	for _, group := range fieldGroups {
+		if c.overlapLimitHit {
+			break
+		}
+		if len(group) < 2 {
+			continue
+		}
+		for i, a := range group {
+			if c.overlapLimitHit {
+				break
+			}
+			for _, b := range group[i+1:] {
+				if c.overlapLimitHit {
+					break
+				}
+				c.validateOverlap(a, b, nil, nil)
+			}
+		}
+	}
+
+	// Fragments can introduce any field names, so we must compare them with all fields and each other.
+	if len(fragments) > 0 && !c.overlapLimitHit {
+		// Flatten fields for fragment comparison.
+		var allFields []ast.Selection
+		for _, group := range fieldGroups {
+			allFields = append(allFields, group...)
+		}
+		for i, fa := range fragments {
+			if c.overlapLimitHit {
+				break
+			}
+			// Compare fragment with all fields
+			for _, fld := range allFields {
+				if c.overlapLimitHit {
+					break
+				}
+				c.validateOverlap(fa, fld, nil, nil)
+			}
+			// Compare fragment with following fragments
+			for _, fb := range fragments[i+1:] {
+				if c.overlapLimitHit {
+					break
+				}
+				c.validateOverlap(fa, fb, nil, nil)
+			}
 		}
 	}
 }
@@ -523,11 +590,38 @@ func (c *context) validateOverlap(a, b a
 		return
 	}
 
-	if _, ok := c.overlapValidated[selectionPair{a, b}]; ok {
+	// Optimisation 1: store only one direction of the pair to halve memory and lookups.
+	pa := reflect.ValueOf(a).Pointer()
+	pb := reflect.ValueOf(b).Pointer()
+	if pb < pa { // canonical ordering
+		a, b = b, a
+	}
+	key := selectionPair{a: a, b: b}
+	if _, ok := c.overlapValidated[key]; ok {
 		return
 	}
-	c.overlapValidated[selectionPair{a, b}] = struct{}{}
-	c.overlapValidated[selectionPair{b, a}] = struct{}{}
+	c.overlapValidated[key] = struct{}{}
+
+	if c.overlapPairLimit > 0 && !c.overlapLimitHit {
+		c.overlapPairsObserved++
+		if c.overlapPairsObserved > c.overlapPairLimit {
+			c.overlapLimitHit = true
+			// determine a representative location for error reporting
+			var loc errors.Location
+			switch sel := a.(type) {
+			case *ast.Field:
+				loc = sel.Alias.Loc
+			case *ast.InlineFragment:
+				loc = sel.Loc
+			case *ast.FragmentSpread:
+				loc = sel.Loc
+			default:
+				// leave zero value
+			}
+			c.addErr(loc, "OverlapValidationLimitExceeded", "Overlapping field validation aborted after examining %d pairs (limit %d). Consider restructuring the query or increasing the limit.", c.overlapPairsObserved-1, c.overlapPairLimit)
+			return
+		}
+	}
 
 	switch a := a.(type) {
 	case *ast.Field:
@@ -608,11 +702,54 @@ func (c *context) validateFieldOverlap(a
 
 	var reasons []string
 	var locs []errors.Location
+
+	// Fast-path: if either side has no subselections we are done.
+	if len(a.SelectionSet) == 0 || len(b.SelectionSet) == 0 {
+		return nil, nil
+	}
+
+	// Optimisation 2: avoid O(m*n) cartesian product for large sibling lists with mostly
+	// distinct response names (common & exploitable for DoS). Instead, index B's field
+	// selections by response name (alias/name). For each field in A we only compare
+	// against fields in B with the same response name plus all fragment spreads / inline
+	// fragments (which can expand to any field names and must be compared exhaustively).
+	bFieldIndex := make(map[string][]ast.Selection, len(b.SelectionSet))
+	var bNonField []ast.Selection
+	for _, bs := range b.SelectionSet {
+		if f, ok := bs.(*ast.Field); ok {
+			name := f.Alias.Name
+			if name == "" { // alias may be empty, fall back to field name
+				name = f.Name.Name
+			}
+			bFieldIndex[name] = append(bFieldIndex[name], bs)
+			continue
+		}
+		bNonField = append(bNonField, bs)
+	}
+
 	for _, a2 := range a.SelectionSet {
+		if af, ok := a2.(*ast.Field); ok {
+			name := af.Alias.Name
+			if name == "" {
+				name = af.Name.Name
+			}
+			// Compare only against same-name fields + all non-field selections.
+			if matches := bFieldIndex[name]; len(matches) != 0 {
+				for _, bMatch := range matches {
+					c.validateOverlap(a2, bMatch, &reasons, &locs)
+				}
+			}
+			for _, bnf := range bNonField {
+				c.validateOverlap(a2, bnf, &reasons, &locs)
+			}
+			continue
+		}
+		// For fragments / inline fragments we still need to compare against every selection in B.
 		for _, b2 := range b.SelectionSet {
 			c.validateOverlap(a2, b2, &reasons, &locs)
 		}
 	}
+
 	return reasons, locs
 }
 
@@ -743,7 +880,7 @@ func validateName(c *context, locs []err
 
 func validateNameCustomMsg(c *context, locs []errors.Location, rule string, msg func() string) {
 	if len(locs) > 1 {
-		c.addErrMultiLoc(locs, rule, msg())
+		c.addErrMultiLoc(locs, rule, "%s", msg())
 		return
 	}
 }
diff -pruN 1.7.0-1/internal/validation/validation_test.go 1.8.0-1/internal/validation/validation_test.go
--- 1.7.0-1/internal/validation/validation_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/internal/validation/validation_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -88,7 +88,7 @@ func TestValidate(t *testing.T) {
 			if err != nil {
 				t.Fatalf("failed to parse query: %s", err)
 			}
-			errs := validation.Validate(schemas[test.Schema], d, test.Vars, 0)
+			errs := validation.Validate(schemas[test.Schema], d, test.Vars, 0, 0)
 			got := []*errors.QueryError{}
 			for _, err := range errs {
 				if err.Rule == test.Rule {
diff -pruN 1.7.0-1/overlap_limit_test.go 1.8.0-1/overlap_limit_test.go
--- 1.7.0-1/overlap_limit_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/overlap_limit_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,77 @@
+package graphql_test
+
+import (
+	"testing"
+
+	graphql "github.com/graph-gophers/graphql-go"
+	gqlerrors "github.com/graph-gophers/graphql-go/errors"
+)
+
+const overlapLimitSchemaSDL = `schema { query: Query } type Query { root: Thing } type Thing { id: ID! name: String }`
+
+type overlapLimitRoot struct{}
+
+func (r *overlapLimitRoot) Root() *overlapThing { return &overlapThing{} }
+
+type overlapThing struct{}
+
+func (t *overlapThing) ID() graphql.ID { return graphql.ID("1") }
+func (t *overlapThing) Name() *string  { s := "n"; return &s }
+
+// TestOverlapValidationLimit exercises overlap pair limit behaviors (exceeded, unlimited, not reached)
+// in a single table-driven test for clarity and concision.
+func TestOverlapValidationLimit(t *testing.T) {
+	t.Parallel()
+
+	hasLimitErr := func(errs []*gqlerrors.QueryError) bool {
+		for _, e := range errs {
+			if e.Rule == "OverlapValidationLimitExceeded" {
+				return true
+			}
+		}
+		return false
+	}
+
+	tests := []struct {
+		name           string
+		opts           []graphql.SchemaOpt
+		query          string
+		expectLimitErr bool
+		comment        string
+	}{
+		{
+			name:           "exceeded",
+			opts:           []graphql.SchemaOpt{graphql.OverlapValidationLimit(3)}, // 5 repeated id fields -> combinations C(5,2)=10 > 3 => early abort
+			query:          `query { root { id id id id id } }`,
+			expectLimitErr: true,
+			comment:        "should trigger OverlapValidationLimitExceeded",
+		},
+		{
+			name:           "unlimited_no_option",
+			opts:           []graphql.SchemaOpt{}, // no option => unlimited
+			query:          `query { root { id id id id id } }`,
+			expectLimitErr: false,
+			comment:        "no limit option supplied, cap disabled",
+		},
+		{
+			name:           "not_reached",
+			opts:           []graphql.SchemaOpt{graphql.OverlapValidationLimit(100)}, // 3 id fields -> combinations C(3,2)=3 < 100 => no error
+			query:          `query { root { id id id } }`,
+			expectLimitErr: false,
+			comment:        "below configured limit",
+		},
+	}
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			t.Parallel()
+			schema := graphql.MustParseSchema(overlapLimitSchemaSDL, &overlapLimitRoot{}, tc.opts...)
+			errs := schema.Validate(tc.query)
+			gotLimitErr := hasLimitErr(errs)
+			if gotLimitErr != tc.expectLimitErr {
+				t.Fatalf("%s: expected limitErr=%v, got %v (errs=%#v)", tc.comment, tc.expectLimitErr, gotLimitErr, errs)
+			}
+		})
+	}
+}
diff -pruN 1.7.0-1/schema_empty_types_test.go 1.8.0-1/schema_empty_types_test.go
--- 1.7.0-1/schema_empty_types_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/schema_empty_types_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,48 @@
+package graphql_test
+
+import (
+	"testing"
+
+	"github.com/graph-gophers/graphql-go"
+)
+
+func TestSchemaEmptyTypeDefinitions(t *testing.T) {
+	cases := []struct {
+		name    string
+		sdl     string
+		wantErr bool
+	}{
+		{
+			name:    "empty object type",
+			sdl:     `type Query { dummy: Int } type Empty { }`,
+			wantErr: true,
+		},
+		{
+			name:    "empty interface type",
+			sdl:     `type Query { dummy: Int } interface EmptyInterface { }`,
+			wantErr: true,
+		},
+		{
+			name:    "empty input object type",
+			sdl:     `type Query { dummy(arg: EmptyInput): Int } input EmptyInput { }`,
+			wantErr: true,
+		},
+		{
+			name:    "valid types (controls)",
+			sdl:     `type Query { dummy: Int } interface Node { id: ID! } input Something { v: Int }`,
+			wantErr: false,
+		},
+	}
+
+	for _, tc := range cases {
+		t.Run(tc.name, func(t *testing.T) {
+			_, err := graphql.ParseSchema(tc.sdl, nil)
+			if tc.wantErr && err == nil {
+				t.Fatalf("expected error for %s, got none", tc.name)
+			}
+			if !tc.wantErr && err != nil {
+				t.Fatalf("unexpected error for %s: %v", tc.name, err)
+			}
+		})
+	}
+}
diff -pruN 1.7.0-1/selection.go 1.8.0-1/selection.go
--- 1.7.0-1/selection.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/selection.go	2025-09-09 11:06:15.000000000 +0000
@@ -4,25 +4,24 @@ import (
 	"context"
 	"sort"
 
-	"github.com/graph-gophers/graphql-go/internal/selections"
+	"github.com/graph-gophers/graphql-go/internal/exec/selections"
 )
 
-// SelectedFieldNames returns the set of immediate child field names selected
-// on the value returned by the current resolver. It returns an empty slice
-// when the current field's return type is a leaf (scalar / enum) or when the
-// feature was disabled at schema construction via DisableFieldSelections.
-// The returned slice is a copy and is safe for the caller to modify.
-//
-// It is intentionally simple and does not expose the internal AST. If more
-// detailed information is needed in the future (e.g. arguments per child,
-// nested trees) a separate API can be added without breaking this one.
+// SelectedFieldNames returns the set of selected field paths underneath the
+// current resolver. Paths are dot-delimited for nested structures (e.g. "products",
+// "products.id", "products.category.id"). Immediate child field names are always
+// present (even when they have further children). Order preserves the first
+// appearance in the query after fragment flattening, performing a depth-first
+// traversal.
+// It returns an empty slice when the current field's return type is a leaf
+// (scalar / enum) or when DisableFieldSelections was used at schema creation.
+// The returned slice is a copy safe for caller modification.
 //
 // Notes:
-//   - Fragment spreads & inline fragments are flattened; the union of all
-//     possible child fields is returned (deduplicated, preserving first
-//     appearance order in the query document).
-//   - Field aliases are ignored; the original schema field names are returned.
+//   - Fragment spreads & inline fragments are flattened.
+//   - Field aliases are ignored; original schema field names are used.
 //   - Meta fields beginning with "__" (including __typename) are excluded.
+//   - Duplicate paths are removed, preserving the earliest occurrence.
 func SelectedFieldNames(ctx context.Context) []string {
 	// If no selection info is present (leaf field or no child selections), return empty slice.
 	lazy := selections.FromContext(ctx)
@@ -32,9 +31,9 @@ func SelectedFieldNames(ctx context.Cont
 	return lazy.Names()
 }
 
-// HasSelectedField returns true if the immediate child selection list contains
-// the provided field name (case sensitive). It returns false for leaf return
-// types and when DisableFieldSelections was used.
+// HasSelectedField returns true if the child selection list contains the provided
+// (possibly nested) path (case sensitive). It returns false for leaf resolvers
+// and when DisableFieldSelections was used.
 func HasSelectedField(ctx context.Context, name string) bool {
 	lazy := selections.FromContext(ctx)
 	if lazy == nil {
@@ -57,3 +56,25 @@ func SortedSelectedFieldNames(ctx contex
 	sort.Strings(out)
 	return out
 }
+
+// DecodeSelectedFieldArgs decodes the argument map for the given path into dst.
+// It returns ok=false if the path or its arguments are absent. Results are cached per
+// (path, concrete struct type) to avoid repeated reflection cost; repeated successful decodes
+// copy a previously cached value into dst.
+//
+// Example:
+//
+//	type BooksArgs struct { Top int32 }
+//	var args BooksArgs
+//	ok, err := graphql.DecodeSelectedFieldArgs(ctx, "books", &args)
+//	if ok { /* use args.Top */ }
+func DecodeSelectedFieldArgs(ctx context.Context, path string, dst interface{}) (bool, error) {
+	if dst == nil {
+		return false, nil
+	}
+	lazy := selections.FromContext(ctx)
+	if lazy == nil {
+		return false, nil
+	}
+	return lazy.DecodeArgsInto(path, dst)
+}
diff -pruN 1.7.0-1/selection_args_test.go 1.8.0-1/selection_args_test.go
--- 1.7.0-1/selection_args_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/selection_args_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,181 @@
+package graphql_test
+
+import (
+	"context"
+	"fmt"
+	"testing"
+
+	"github.com/graph-gophers/graphql-go"
+)
+
+// Date is a custom scalar implementing decode.Unmarshaler.
+type Date struct{ Value string }
+
+func (d *Date) ImplementsGraphQLType(name string) bool { return name == "Date" }
+func (d *Date) UnmarshalGraphQL(input any) error {
+	s, ok := input.(string)
+	if !ok {
+		return fmt.Errorf("Date expects string got %T", input)
+	}
+	d.Value = s
+	return nil
+}
+
+// harness captures decoded argument structs from inside resolvers.
+type harness struct {
+	got any
+}
+
+type parentResolver struct{}
+
+func (p *parentResolver) ScalarField(ctx context.Context, args struct{ X int32 }) int32 {
+	return args.X
+}
+
+func (p *parentResolver) StringField(ctx context.Context, args struct{ S string }) string {
+	return args.S
+}
+
+func (p *parentResolver) EnumField(ctx context.Context, args struct{ Color string }) string {
+	return args.Color
+}
+
+func (p *parentResolver) CustomField(ctx context.Context, args struct{ D Date }) string {
+	return args.D.Value
+}
+
+func (p *parentResolver) ComplexField(ctx context.Context, args complexArgs) string {
+	return "ok"
+}
+
+// decoded argument holder structs
+type scalarArgs struct{ X int32 }
+
+type stringArgs struct{ S string }
+
+type enumArgs struct{ Color string }
+
+type customArgs struct{ D Date }
+
+type complexArgs struct {
+	R struct {
+		Start int32
+		End   int32
+	}
+	Colors []string
+}
+
+type queryResolver struct {
+	h    *harness
+	path string
+}
+
+func (q *queryResolver) Parent(ctx context.Context) *parentResolver {
+	// decode any child arguments and assign to the harness
+	dec := func(path string, dst any) {
+		if ok, _ := graphql.DecodeSelectedFieldArgs(ctx, path, dst); ok && q.h.got == nil {
+			q.h.got = dst
+		}
+	}
+	switch q.path {
+	case "scalarField":
+		dec(q.path, &scalarArgs{})
+	case "stringField":
+		dec(q.path, &stringArgs{})
+	case "enumField":
+		dec(q.path, &enumArgs{})
+	case "customField":
+		dec(q.path, &customArgs{})
+	case "complexField":
+		dec(q.path, &complexArgs{})
+	}
+	return &parentResolver{}
+}
+
+func TestDecodeSelectedFieldArgs(t *testing.T) {
+	schemaSDL := `
+		scalar Date
+		enum Color { RED GREEN BLUE }
+		input Range { start: Int! end: Int! }
+		type Query { parent: Parent! }
+		type Parent {
+			scalarField(x: Int!): Int!
+			stringField(s: String!): String!
+			enumField(color: Color!): String!
+			customField(d: Date!): String!
+			complexField(r: Range!, colors: [Color!]!): String!
+		}
+	`
+
+	tests := []struct {
+		name   string
+		query  string
+		path   string
+		expect func(t *testing.T, v any)
+	}{
+		{
+			name:  "scalar int",
+			query: `query { parent { scalarField(x: 42) } }`,
+			path:  "scalarField",
+			expect: func(t *testing.T, v any) {
+				got := v.(*scalarArgs)
+				if got.X != 42 {
+					t.Errorf("want 42 got %d", got.X)
+				}
+			},
+		},
+		{
+			name:  "string",
+			query: `query { parent { stringField(s: "abc") } }`,
+			path:  "stringField",
+			expect: func(t *testing.T, v any) {
+				got := v.(*stringArgs)
+				if got.S != "abc" {
+					t.Errorf("want abc got %s", got.S)
+				}
+			},
+		},
+		{
+			name:  "custom scalar",
+			query: `query { parent { customField(d: "2025-01-02") } }`,
+			path:  "customField",
+			expect: func(t *testing.T, v any) {
+				got := v.(*customArgs)
+				if got.D.Value != "2025-01-02" {
+					t.Errorf("want date got %s", got.D.Value)
+				}
+			},
+		},
+		{
+			name:  "complex",
+			query: `query { parent { complexField(r: { start: 1, end: 5 }, colors: [GREEN, BLUE]) } }`,
+			path:  "complexField",
+			expect: func(t *testing.T, v any) {
+				got := v.(*complexArgs)
+				if got.R.Start != 1 || got.R.End != 5 {
+					t.Errorf("range mismatch: %+v", got.R)
+				}
+				if len(got.Colors) != 2 || got.Colors[0] != "GREEN" || got.Colors[1] != "BLUE" {
+					t.Errorf("colors mismatch: %#v", got.Colors)
+				}
+			},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			h := &harness{}
+			q := &queryResolver{h: h, path: tt.path}
+			schema := graphql.MustParseSchema(schemaSDL, q)
+			res := schema.Exec(context.Background(), tt.query, "", nil)
+			if len(res.Errors) > 0 {
+				t.Fatalf("unexpected errors: %+v", res.Errors)
+			}
+			if h.got == nil {
+				t.Errorf("resolver did not capture decoded args (path %s)", tt.path)
+				return
+			}
+			tt.expect(t, h.got)
+		})
+	}
+}
diff -pruN 1.7.0-1/selection_test.go 1.8.0-1/selection_test.go
--- 1.7.0-1/selection_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/selection_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -8,9 +8,11 @@ import (
 )
 
 const selectionTestSchema = `
-    schema { query: Query }
-    type Query { hero: Human }
-    type Human { id: ID! name: String }
+	schema { query: Query }
+	type Query { customer: Customer }
+	type Customer { id: ID! name: String items: [Item!]! }
+	type Item { id: ID! name: String category: Category }
+	type Category { id: ID! }
 `
 
 type selectionRoot struct {
@@ -19,80 +21,100 @@ type selectionRoot struct {
 	expectSorted []string
 	hasChecks    map[string]bool
 }
+type selectionCustomer struct {
+	t        *testing.T
+	id, name string
+}
 
-type selectionHuman struct {
-	t    *testing.T
-	id   string
-	name string
-}
-
-func (r *selectionRoot) Hero(ctx context.Context) *selectionHuman {
-	names := graphql.SelectedFieldNames(ctx)
-	sorted := graphql.SortedSelectedFieldNames(ctx)
-	if !equalStringSlices(names, r.expectNames) {
-		r.t.Errorf("SelectedFieldNames = %v, want %v", names, r.expectNames)
-	}
-	if !equalStringSlices(sorted, r.expectSorted) {
-		r.t.Errorf("SortedSelectedFieldNames = %v, want %v", sorted, r.expectSorted)
-	}
-	for name, want := range r.hasChecks {
-		if got := graphql.HasSelectedField(ctx, name); got != want {
-			r.t.Errorf("HasSelectedField(%q) = %v, want %v", name, got, want)
+func (r *selectionRoot) Customer(ctx context.Context) *selectionCustomer {
+	if r.expectNames != nil {
+		names := graphql.SelectedFieldNames(ctx)
+		if !equalStringSlices(names, r.expectNames) {
+			r.t.Errorf("SelectedFieldNames = %v, want %v", names, r.expectNames)
+		}
+	}
+	if r.expectSorted != nil {
+		sorted := graphql.SortedSelectedFieldNames(ctx)
+		if !equalStringSlices(sorted, r.expectSorted) {
+			r.t.Errorf("SortedSelectedFieldNames = %v, want %v", sorted, r.expectSorted)
 		}
 	}
-	return &selectionHuman{t: r.t, id: "h1", name: "Luke"}
+	for n, want := range r.hasChecks {
+		if got := graphql.HasSelectedField(ctx, n); got != want {
+			r.t.Errorf("HasSelectedField(%q) = %v, want %v", n, got, want)
+		}
+	}
+	return &selectionCustomer{t: r.t, id: "c1", name: "Alice"}
 }
 
-// Object-level assertions happen in Hero via a wrapper test function; leaf behavior tested here.
-func (h *selectionHuman) ID() graphql.ID { return graphql.ID(h.id) }
-
-func (h *selectionHuman) Name(ctx context.Context) *string {
-	// Leaf field: should always produce empty selections regardless of enable/disable.
-	if got := graphql.SelectedFieldNames(ctx); len(got) != 0 {
-		h.t.Errorf("leaf field SelectedFieldNames = %v, want empty", got)
+func (h *selectionCustomer) ID() graphql.ID { return graphql.ID(h.id) }
+func (h *selectionCustomer) Name(ctx context.Context) *string {
+	if len(graphql.SelectedFieldNames(ctx)) != 0 {
+		h.t.Errorf("leaf selections should be empty")
 	}
 	if graphql.HasSelectedField(ctx, "anything") {
-		h.t.Errorf("leaf field HasSelectedField unexpectedly true")
+		h.t.Errorf("unexpected leaf HasSelectedField true")
 	}
-	if sorted := graphql.SortedSelectedFieldNames(ctx); len(sorted) != 0 {
-		h.t.Errorf("leaf field SortedSelectedFieldNames = %v, want empty", sorted)
+	if len(graphql.SortedSelectedFieldNames(ctx)) != 0 {
+		h.t.Errorf("leaf sorted selections should be empty")
 	}
 	return &h.name
 }
 
+// nested types for extended schema
+type selectionItem struct {
+	id, name string
+	category *selectionCategory
+}
+type selectionCategory struct{ id string }
+
+func (h *selectionCustomer) Items() []*selectionItem {
+	return []*selectionItem{{id: "i1", name: "Item", category: &selectionCategory{id: "cat1"}}}
+}
+func (p *selectionItem) ID() graphql.ID               { return graphql.ID(p.id) }
+func (p *selectionItem) Name() *string                { return &p.name }
+func (p *selectionItem) Category() *selectionCategory { return p.category }
+func (c *selectionCategory) ID() graphql.ID           { return graphql.ID(c.id) }
+
 func TestFieldSelectionHelpers(t *testing.T) {
 	tests := []struct {
 		name         string
 		schemaOpts   []graphql.SchemaOpt
 		query        string
-		expectNames  []string // expected order from SelectedFieldNames at object boundary
-		expectSorted []string // expected from SortedSelectedFieldNames at object boundary
+		expectNames  []string
+		expectSorted []string
 		hasChecks    map[string]bool
 	}{
 		{
-			name:         "enabled object order preserved and sorted copy",
-			query:        `query { hero { name id } }`, // order intentionally name,id
+			name:         "enabled order",
+			query:        `query { customer { name id } }`,
 			expectNames:  []string{"name", "id"},
 			expectSorted: []string{"id", "name"},
-			hasChecks:    map[string]bool{"id": true, "name": true, "missing": false},
+			hasChecks:    map[string]bool{"id": true, "name": true},
 		},
 		{
-			name:         "enabled only one field selected",
-			query:        `query { hero { id } }`, // order intentionally name,id
+			name:         "one field",
+			query:        `query { customer { id } }`,
 			expectNames:  []string{"id"},
 			expectSorted: []string{"id"},
-			hasChecks:    map[string]bool{"id": true, "name": false, "missing": false},
+			hasChecks:    map[string]bool{"id": true, "name": false},
+		},
+		{
+			name:         "nested paths",
+			query:        `query { customer { items { id name category { id } } id } }`,
+			expectNames:  []string{"items", "items.id", "items.name", "items.category", "items.category.id", "id"},
+			expectSorted: []string{"id", "items", "items.category", "items.category.id", "items.id", "items.name"},
+			hasChecks:    map[string]bool{"items": true, "items.id": true, "items.name": true, "items.category": true, "items.category.id": true, "id": true},
 		},
 		{
-			name:         "disabled object returns empty",
+			name:         "disabled",
 			schemaOpts:   []graphql.SchemaOpt{graphql.DisableFieldSelections()},
-			query:        `query { hero { name id } }`,
+			query:        `query { customer { name id } }`,
 			expectNames:  []string{},
 			expectSorted: []string{},
 			hasChecks:    map[string]bool{"id": false, "name": false},
 		},
 	}
-
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			root := &selectionRoot{t: t, expectNames: tt.expectNames, expectSorted: tt.expectSorted, hasChecks: tt.hasChecks}
@@ -107,37 +129,35 @@ func TestFieldSelectionHelpers(t *testin
 
 func TestSelectedFieldNames_FragmentsAliasesMeta(t *testing.T) {
 	tests := []struct {
-		name        string
-		query       string
+		name, query string
 		expectNames []string
 		hasChecks   map[string]bool
 	}{
 		{
-			name:        "alias ignored order preserved",
-			query:       `query { hero { idAlias: id name } }`,
+			name:        "alias ignored",
+			query:       `query { customer { idAlias: id name } }`,
 			expectNames: []string{"id", "name"},
 			hasChecks:   map[string]bool{"id": true, "idAlias": false, "name": true},
 		},
 		{
-			name:        "fragment spread flattened",
-			query:       `fragment HFields on Human { id name } query { hero { ...HFields } }`,
+			name:        "fragment spread",
+			query:       `fragment CFields on Customer { id name } query { customer { ...CFields } }`,
 			expectNames: []string{"id", "name"},
 			hasChecks:   map[string]bool{"id": true, "name": true},
 		},
 		{
-			name:        "inline fragment dedup",
-			query:       `query { hero { id ... on Human { id name } } }`,
+			name:        "inline fragment",
+			query:       `query { customer { id ... on Customer { id name } } }`,
 			expectNames: []string{"id", "name"},
 			hasChecks:   map[string]bool{"id": true, "name": true},
 		},
 		{
-			name:        "meta field excluded",
-			query:       `query { hero { id __typename name } }`,
+			name:        "meta excluded",
+			query:       `query { customer { id __typename name } }`,
 			expectNames: []string{"id", "name"},
 			hasChecks:   map[string]bool{"id": true, "name": true, "__typename": false},
 		},
 	}
-
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
 			root := &selectionRoot{t: t, expectNames: tt.expectNames, expectSorted: tt.expectNames, hasChecks: tt.hasChecks}
@@ -150,7 +170,6 @@ func TestSelectedFieldNames_FragmentsAli
 	}
 }
 
-// equalStringSlices compares content and order.
 func equalStringSlices(a, b []string) bool {
 	if len(a) != len(b) {
 		return false
diff -pruN 1.7.0-1/subscription_test.go 1.8.0-1/subscription_test.go
--- 1.7.0-1/subscription_test.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/subscription_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -499,7 +499,9 @@ const schema = `
 	}
 `
 
-type subscriptionsCustomTimeout struct{}
+type subscriptionsCustomTimeout struct {
+	Name string // at least one Query field is required
+}
 
 type messageResolver struct{}
 
@@ -521,7 +523,10 @@ func (r *subscriptionsCustomTimeout) OnT
 func TestSchemaSubscribe_CustomResolverTimeout(t *testing.T) {
 	gqltesting.RunSubscribe(t, &gqltesting.TestSubscription{
 		Schema: graphql.MustParseSchema(`
-			type Query {}
+			type Query {
+				# at least one Query field is required
+				name: String!
+			}
 			type Subscription {
 				onTimeout : Message!
 			}
@@ -529,9 +534,9 @@ func TestSchemaSubscribe_CustomResolverT
 			type Message {
 				msg: String!
 			}
-		`,
-			&subscriptionsCustomTimeout{},
-			graphql.SubscribeResolverTimeout(1*time.Nanosecond)),
+		`, &subscriptionsCustomTimeout{Name: "test"},
+			graphql.SubscribeResolverTimeout(1*time.Nanosecond),
+			graphql.UseFieldResolvers()),
 		Query: `
 			subscription {
 				onTimeout { msg }
@@ -552,16 +557,19 @@ func (r *subscriptionsPanicInResolver) O
 func TestSchemaSubscribe_PanicInResolver(t *testing.T) {
 	r := &struct {
 		*subscriptionsPanicInResolver
+		Name string
 	}{
 		subscriptionsPanicInResolver: &subscriptionsPanicInResolver{},
 	}
 	gqltesting.RunSubscribe(t, &gqltesting.TestSubscription{
 		Schema: graphql.MustParseSchema(`
-			type Query {}
+			type Query {
+				name: String!
+			}
 			type Subscription {
 				onPanic : String!
 			}
-		`, r),
+		`, r, graphql.UseFieldResolvers()),
 		Query: `
 			subscription {
 				onPanic
diff -pruN 1.7.0-1/subscriptions.go 1.8.0-1/subscriptions.go
--- 1.7.0-1/subscriptions.go	2025-08-19 13:15:23.000000000 +0000
+++ 1.8.0-1/subscriptions.go	2025-09-09 11:06:15.000000000 +0000
@@ -36,7 +36,7 @@ func (s *Schema) subscribe(ctx context.C
 	}
 
 	validationFinish := s.validationTracer.TraceValidation(ctx)
-	errs := validation.Validate(s.schema, doc, variables, s.maxDepth)
+	errs := validation.Validate(s.schema, doc, variables, s.maxDepth, s.overlapPairLimit)
 	validationFinish(errs)
 	if len(errs) != 0 {
 		return sendAndReturnClosed(&Response{Errors: errs})
diff -pruN 1.7.0-1/validation_overlap_bench_test.go 1.8.0-1/validation_overlap_bench_test.go
--- 1.7.0-1/validation_overlap_bench_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 1.8.0-1/validation_overlap_bench_test.go	2025-09-09 11:06:15.000000000 +0000
@@ -0,0 +1,95 @@
+package graphql_test
+
+import (
+	"context"
+	"strconv"
+	"strings"
+	"testing"
+
+	graphql "github.com/graph-gophers/graphql-go"
+)
+
+const overlapBenchSchema = `schema { query: Query } type Query { root: Thing } type Thing { id: ID! name: String value: String }`
+
+func buildLargeQuery(count int) string {
+	var b strings.Builder
+	b.Grow(20 + count*8)
+	b.WriteString("query{root{")
+	for i := 0; i < count; i++ {
+		b.WriteString("f")
+		b.WriteString(strconv.Itoa(i))
+		b.WriteString(":id ")
+	}
+	b.WriteString("}}")
+	return b.String()
+}
+
+func buildFragmentedQuery(total int) string {
+	if total < 4 {
+		return buildLargeQuery(total)
+	}
+	top := total / 2
+	rest := total - top
+	fragA := rest / 2
+	fragB := rest - fragA
+	var topSel strings.Builder
+	topSel.Grow(32 + top*8)
+	topSel.WriteString("query{root{")
+	for i := 0; i < top; i++ {
+		topSel.WriteString("t")
+		topSel.WriteString(strconv.Itoa(i))
+		topSel.WriteString(":id ")
+	}
+	topSel.WriteString(" ...FragA ...FragB }}")
+	var frags strings.Builder
+	frags.Grow(32 + (fragA+fragB)*8)
+	frags.WriteString(" fragment FragA on Thing {")
+	for i := 0; i < fragA; i++ {
+		frags.WriteString(" a")
+		frags.WriteString(strconv.Itoa(i))
+		frags.WriteString(":id")
+	}
+	frags.WriteString(" }")
+	frags.WriteString(" fragment FragB on Thing {")
+	for i := 0; i < fragB; i++ {
+		frags.WriteString(" b")
+		frags.WriteString(strconv.Itoa(i))
+		frags.WriteString(":id")
+	}
+	frags.WriteString(" }")
+	return topSel.String() + frags.String()
+}
+
+type overlapRoot struct{}
+
+func (r *overlapRoot) Root() *thingResolver { return &thingResolver{} }
+
+type thingResolver struct{}
+
+func (t *thingResolver) ID() graphql.ID { return graphql.ID("1") }
+func (t *thingResolver) Name() *string  { s := "n"; return &s }
+func (t *thingResolver) Value() *string { s := "v"; return &s }
+
+func BenchmarkValidateOverlap(b *testing.B) {
+	sizes := []int{500, 1000, 2000, 5000}
+	for _, n := range sizes {
+		b.Run("fields_"+strconv.Itoa(n), func(b *testing.B) {
+			schema := graphql.MustParseSchema(overlapBenchSchema, &overlapRoot{})
+			query := buildLargeQuery(n)
+			ctx := context.Background()
+			b.ReportAllocs()
+			for b.Loop() {
+				_ = schema.Exec(ctx, query, "", nil)
+			}
+		})
+	}
+	b.Run("fragments_1000", func(b *testing.B) {
+		schema := graphql.MustParseSchema(overlapBenchSchema, &overlapRoot{})
+		query := buildFragmentedQuery(1000)
+		ctx := context.Background()
+		b.ReportAllocs()
+		for b.Loop() {
+			_ = schema.Exec(ctx, query, "", nil)
+		}
+	})
+}
