From 651a8d81baf03a87c244ad317d69cf2ee20fcccd Mon Sep 17 00:00:00 2001 From: Benny Siegert Date: Sat, 5 Feb 2022 17:23:26 +0100 Subject: [release-branch.go1.17] cmd/dist: skip internal linking tests on arm64 The previous workaround for issue #39466 only disabled this test for Linux. However, the issue manifests for all arm64 systems with gcc 9.4 and above. The new netbsd-arm64 builder uses NetBSD-current with gcc 10.3, so it fails in the same way. Updates #39466. For #53050. Change-Id: I276a99a5e60914e5c22f74a680e461bea17cfe92 Reviewed-on: https://go-review.googlesource.com/c/go/+/383554 Trust: Benny Siegert Run-TryBot: Ian Lance Taylor TryBot-Result: Gopher Robot Reviewed-by: Ian Lance Taylor (cherry picked from commit 1d6051380c1faa3e515db73c4cfe14f807e2c686) Reviewed-on: https://go-review.googlesource.com/c/go/+/415074 Run-TryBot: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov Reviewed-by: Cherry Mui --- src/cmd/dist/test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index beb7c4650c..a6ea64b495 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -1140,9 +1140,9 @@ func (t *tester) cgoTest(dt *distTest) error { cmd := t.addCmd(dt, "misc/cgo/test", t.goTest()) setEnv(cmd, "GOFLAGS", "-ldflags=-linkmode=auto") - // Skip internal linking cases on linux/arm64 to support GCC-9.4 and above. + // Skip internal linking cases on arm64 to support GCC-9.4 and above. // See issue #39466. - skipInternalLink := goarch == "arm64" && goos == "linux" + skipInternalLink := goarch == "arm64" && goos != "windows" if t.internalLink() && !skipInternalLink { cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=internal") -- cgit v1.2.3-54-g00ecf From 8d2935ab7ccf621443691fa8c103a6decf4fbfb2 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Tue, 8 Feb 2022 14:46:58 -0500 Subject: [release-branch.go1.17] cmd/dist: test cgo internal linking on darwin-arm64 CL 415074 disables testing cgo internal linking on all ARM64 but Windows, because it doesn't work with newer GCC. But - darwin-arm64 works, and it does not use GCC - we don't support cgo internal linking on windows-arm64 anyway. This CL fixes the condition. Fixes #53050. Change-Id: I9eb7b81ef75e482f5e95d2edae4863ba21396432 Reviewed-on: https://go-review.googlesource.com/c/go/+/384269 Trust: Cherry Mui Run-TryBot: Cherry Mui TryBot-Result: Gopher Robot Reviewed-by: Ian Lance Taylor (cherry picked from commit e4ca3fa345a204b72a011b3634ddcfc09dcc68bc) Reviewed-on: https://go-review.googlesource.com/c/go/+/415075 Reviewed-by: Cherry Mui Run-TryBot: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov --- src/cmd/dist/test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index a6ea64b495..4f1d556dd4 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -1142,7 +1142,7 @@ func (t *tester) cgoTest(dt *distTest) error { // Skip internal linking cases on arm64 to support GCC-9.4 and above. // See issue #39466. - skipInternalLink := goarch == "arm64" && goos != "windows" + skipInternalLink := goarch == "arm64" && goos != "darwin" if t.internalLink() && !skipInternalLink { cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=internal") -- cgit v1.2.3-54-g00ecf From 77cc1c0defaf4cd6c38221504b29ad125aae9ac7 Mon Sep 17 00:00:00 2001 From: hidu Date: Thu, 3 Mar 2022 13:33:38 +0800 Subject: [release-branch.go1.17] cmd/go: pass --no-decorate when listing git tags for a commit This avoids a parse error when the user's global .gitconfig sets log.decorate to true. Updates #51312. Fixes #51351. Change-Id: Ic47b0f604c0c3a404ec50d6e09f4e138045ac2f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/387835 Run-TryBot: Bryan Mills Auto-Submit: Bryan Mills Reviewed-by: Bryan Mills TryBot-Result: Gopher Robot Reviewed-by: Dmitri Shuralyov Reviewed-by: David Chase Reviewed-by: Dmitri Shuralyov (cherry picked from commit a30f4346674ec43bf576e6f56a9cd1c7ca482e1f) Reviewed-on: https://go-review.googlesource.com/c/go/+/414875 --- src/cmd/go/internal/modfetch/codehost/git.go | 2 +- .../script/mod_download_git_decorate_full.txt | 28 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 src/cmd/go/testdata/script/mod_download_git_decorate_full.txt diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go index 4d4964edf4..55aecd04c9 100644 --- a/src/cmd/go/internal/modfetch/codehost/git.go +++ b/src/cmd/go/internal/modfetch/codehost/git.go @@ -440,7 +440,7 @@ func (r *gitRepo) fetchRefsLocked() error { // statLocal returns a RevInfo describing rev in the local git repository. // It uses version as info.Version. func (r *gitRepo) statLocal(version, rev string) (*RevInfo, error) { - out, err := Run(r.dir, "git", "-c", "log.showsignature=false", "log", "-n1", "--format=format:%H %ct %D", rev, "--") + out, err := Run(r.dir, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", rev, "--") if err != nil { return nil, &UnknownRevisionError{Rev: rev} } diff --git a/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt b/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt new file mode 100644 index 0000000000..3b19acc1b1 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt @@ -0,0 +1,28 @@ +env GO111MODULE=on + +[!net] skip +[!exec:git] skip + +env GOPROXY=direct +env HOME=$WORK/home/gopher + + +go env GOPROXY +stdout 'direct' + +exec git config --get log.decorate +stdout 'full' + +# Test that Git log with user's global config '~/gitconfig' has log.decorate=full +# go mod download has an error 'v1.x.y is not a tag' +# with go1.16.14: +# `go1.16.14 list -m vcs-test.golang.org/git/gitrepo1.git@v1.2.3` +# will output with error: +# go list -m: vcs-test.golang.org/git/gitrepo1.git@v1.2.3: invalid version: unknown revision v1.2.3 +# See golang/go#51312. +go list -m vcs-test.golang.org/git/gitrepo1.git@v1.2.3 +stdout 'vcs-test.golang.org/git/gitrepo1.git v1.2.3' + +-- $WORK/home/gopher/.gitconfig -- +[log] + decorate = full \ No newline at end of file -- cgit v1.2.3-54-g00ecf From b1be664d64750bccd5081d51b585036c931b5cf0 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Thu, 9 Jun 2022 18:25:01 +0000 Subject: [release-branch.go1.17] runtime: store consistent total allocation stats as uint64 Currently the consistent total allocation stats are managed as uintptrs, which means they can easily overflow on 32-bit systems. Fix this by storing these stats as uint64s. This will cause some minor performance degradation on 32-bit systems, but there really isn't a way around this, and it affects the correctness of the metrics we export. For #52680. Fixes #52688. Change-Id: I8b1926116e899ae9f03d58e0320bcb9264945b3e Reviewed-on: https://go-review.googlesource.com/c/go/+/411496 Run-TryBot: Michael Knyszek TryBot-Result: Gopher Robot Reviewed-by: Michael Pratt --- src/runtime/mcache.go | 16 ++++++++-------- src/runtime/metrics.go | 12 ++++++------ src/runtime/mgcsweep.go | 6 +++--- src/runtime/mstats.go | 42 ++++++++++++++++++++++-------------------- 4 files changed, 39 insertions(+), 37 deletions(-) diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index a9e959109a..99303358be 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -175,11 +175,11 @@ func (c *mcache) refill(spc spanClass) { // Assume all objects from this span will be allocated in the // mcache. If it gets uncached, we'll adjust this. stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount)) + atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], int64(s.nelems)-int64(s.allocCount)) // Flush tinyAllocs. if spc == tinySpanClass { - atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs) + atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) c.tinyAllocs = 0 } memstats.heapStats.release() @@ -229,8 +229,8 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) (*mspan, b throw("out of memory") } stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize) - atomic.Xadduintptr(&stats.largeAllocCount, 1) + atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize)) + atomic.Xadd64(&stats.largeAllocCount, 1) memstats.heapStats.release() // Update gcController.heapLive and revise pacing if needed. @@ -261,9 +261,9 @@ func (c *mcache) releaseAll() { s := c.alloc[i] if s != &emptymspan { // Adjust nsmallalloc in case the span wasn't fully allocated. - n := uintptr(s.nelems) - uintptr(s.allocCount) + n := int64(s.nelems) - int64(s.allocCount) stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n) + atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], -n) memstats.heapStats.release() if s.sweepgen != sg+1 { // refill conservatively counted unallocated slots in gcController.heapLive. @@ -273,7 +273,7 @@ func (c *mcache) releaseAll() { // gcController.heapLive was totally recomputed since // caching this span, so we don't do this for // stale spans. - atomic.Xadd64(&gcController.heapLive, -int64(n)*int64(s.elemsize)) + atomic.Xadd64(&gcController.heapLive, -n*int64(s.elemsize)) } // Release the span to the mcentral. mheap_.central[i].mcentral.uncacheSpan(s) @@ -286,7 +286,7 @@ func (c *mcache) releaseAll() { // Flush tinyAllocs. stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs) + atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) c.tinyAllocs = 0 memstats.heapStats.release() diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go index ba0a920a5d..4af2b0aef9 100644 --- a/src/runtime/metrics.go +++ b/src/runtime/metrics.go @@ -388,13 +388,13 @@ func (a *heapStatsAggregate) compute() { memstats.heapStats.read(&a.heapStatsDelta) // Calculate derived stats. - a.totalAllocs = uint64(a.largeAllocCount) - a.totalFrees = uint64(a.largeFreeCount) - a.totalAllocated = uint64(a.largeAlloc) - a.totalFreed = uint64(a.largeFree) + a.totalAllocs = a.largeAllocCount + a.totalFrees = a.largeFreeCount + a.totalAllocated = a.largeAlloc + a.totalFreed = a.largeFree for i := range a.smallAllocCount { - na := uint64(a.smallAllocCount[i]) - nf := uint64(a.smallFreeCount[i]) + na := a.smallAllocCount[i] + nf := a.smallFreeCount[i] a.totalAllocs += na a.totalFrees += nf a.totalAllocated += na * uint64(class_to_size[i]) diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index 8fe3a65340..468df4d097 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -581,7 +581,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool { // free slots zeroed. s.needzero = 1 stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.smallFreeCount[spc.sizeclass()], uintptr(nfreed)) + atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed)) memstats.heapStats.release() } if !preserve { @@ -628,8 +628,8 @@ func (sl *sweepLocked) sweep(preserve bool) bool { mheap_.freeSpan(s) } stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.largeFreeCount, 1) - atomic.Xadduintptr(&stats.largeFree, size) + atomic.Xadd64(&stats.largeFreeCount, 1) + atomic.Xadd64(&stats.largeFree, int64(size)) memstats.heapStats.release() return true } diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index eeb2a7b4bc..94c507b45f 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -8,7 +8,6 @@ package runtime import ( "runtime/internal/atomic" - "runtime/internal/sys" "unsafe" ) @@ -565,29 +564,29 @@ func updatememstats() { memstats.heapStats.unsafeRead(&consStats) // Collect large allocation stats. - totalAlloc := uint64(consStats.largeAlloc) - memstats.nmalloc += uint64(consStats.largeAllocCount) - totalFree := uint64(consStats.largeFree) - memstats.nfree += uint64(consStats.largeFreeCount) + totalAlloc := consStats.largeAlloc + memstats.nmalloc += consStats.largeAllocCount + totalFree := consStats.largeFree + memstats.nfree += consStats.largeFreeCount // Collect per-sizeclass stats. for i := 0; i < _NumSizeClasses; i++ { // Malloc stats. - a := uint64(consStats.smallAllocCount[i]) + a := consStats.smallAllocCount[i] totalAlloc += a * uint64(class_to_size[i]) memstats.nmalloc += a memstats.by_size[i].nmalloc = a // Free stats. - f := uint64(consStats.smallFreeCount[i]) + f := consStats.smallFreeCount[i] totalFree += f * uint64(class_to_size[i]) memstats.nfree += f memstats.by_size[i].nfree = f } // Account for tiny allocations. - memstats.nfree += uint64(consStats.tinyAllocCount) - memstats.nmalloc += uint64(consStats.tinyAllocCount) + memstats.nfree += consStats.tinyAllocCount + memstats.nmalloc += consStats.tinyAllocCount // Calculate derived stats. memstats.total_alloc = totalAlloc @@ -703,17 +702,20 @@ type heapStatsDelta struct { inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits // Allocator stats. - tinyAllocCount uintptr // number of tiny allocations - largeAlloc uintptr // bytes allocated for large objects - largeAllocCount uintptr // number of large object allocations - smallAllocCount [_NumSizeClasses]uintptr // number of allocs for small objects - largeFree uintptr // bytes freed for large objects (>maxSmallSize) - largeFreeCount uintptr // number of frees for large objects (>maxSmallSize) - smallFreeCount [_NumSizeClasses]uintptr // number of frees for small objects (<=maxSmallSize) - - // Add a uint32 to ensure this struct is a multiple of 8 bytes in size. - // Only necessary on 32-bit platforms. - _ [(sys.PtrSize / 4) % 2]uint32 + // + // These are all uint64 because they're cumulative, and could quickly wrap + // around otherwise. + tinyAllocCount uint64 // number of tiny allocations + largeAlloc uint64 // bytes allocated for large objects + largeAllocCount uint64 // number of large object allocations + smallAllocCount [_NumSizeClasses]uint64 // number of allocs for small objects + largeFree uint64 // bytes freed for large objects (>maxSmallSize) + largeFreeCount uint64 // number of frees for large objects (>maxSmallSize) + smallFreeCount [_NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize) + + // NOTE: This struct must be a multiple of 8 bytes in size because it + // is stored in an array. If it's not, atomic accesses to the above + // fields may be unaligned and fail on 32-bit platforms. } // merge adds in the deltas from b into a. -- cgit v1.2.3-54-g00ecf From 9ef614f5aa03ed30664c982c706c98c8b78c99bf Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 20 Jun 2022 17:06:09 -0700 Subject: [release-branch.go1.17] cmd/compile: allow 128-bit values to be spilled We sometimes use 16-byte load+store to move values around in memory. In rare circumstances, the loaded value must be spilled because the store can't happen yet. In that case, we need to be able to spill the 16-byte value. Fixes #53470 Change-Id: I09fd08e11a63c6ba3ef781d3f5ede237e9b0132e Reviewed-on: https://go-review.googlesource.com/c/go/+/413294 Reviewed-by: Keith Randall TryBot-Result: Gopher Robot Reviewed-by: David Chase Run-TryBot: Keith Randall Reviewed-by: Keith Randall (cherry picked from commit c2d373d5d1802d7479f3c81dcf01d41bef3646dd) Reviewed-on: https://go-review.googlesource.com/c/go/+/413456 Reviewed-by: Carlos Amedee --- src/cmd/compile/internal/amd64/ssa.go | 2 + src/cmd/compile/internal/dwarfgen/dwarf.go | 5 ++ src/cmd/compile/internal/types/size.go | 6 ++ src/cmd/compile/internal/types/type.go | 5 ++ test/fixedbugs/issue53454.go | 89 ++++++++++++++++++++++++++++++ 5 files changed, 107 insertions(+) create mode 100644 test/fixedbugs/issue53454.go diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index ca5f36e775..52f4937d04 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -78,6 +78,8 @@ func storeByType(t *types.Type) obj.As { return x86.AMOVL case 8: return x86.AMOVQ + case 16: + return x86.AMOVUPS } } panic(fmt.Sprintf("bad store type %v", t)) diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index 0e22b61bc3..3ecadb9b9d 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -91,6 +91,11 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, continue } apdecls = append(apdecls, n) + if n.Type().Kind() == types.TSSA { + // Can happen for TypeInt128 types. This only happens for + // spill locations, so not a huge deal. + continue + } fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type())) } } diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go index f0e695ab96..f2c23887f9 100644 --- a/src/cmd/compile/internal/types/size.go +++ b/src/cmd/compile/internal/types/size.go @@ -625,6 +625,12 @@ func PtrDataSize(t *Type) int64 { } return lastPtrField.Offset + PtrDataSize(lastPtrField.Type) + case TSSA: + if t != TypeInt128 { + base.Fatalf("PtrDataSize: unexpected ssa type %v", t) + } + return 0 + default: base.Fatalf("PtrDataSize: unexpected type, %v", t) return 0 diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 1a9aa6916a..15d40e753f 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1661,6 +1661,11 @@ var ( TypeResultMem = newResults([]*Type{TypeMem}) ) +func init() { + TypeInt128.Width = 16 + TypeInt128.Align = 8 +} + // NewNamed returns a new named type for the given type name. obj should be an // ir.Name. The new type is incomplete (marked as TFORW kind), and the underlying // type should be set later via SetUnderlying(). References to the type are diff --git a/test/fixedbugs/issue53454.go b/test/fixedbugs/issue53454.go new file mode 100644 index 0000000000..8b16d81839 --- /dev/null +++ b/test/fixedbugs/issue53454.go @@ -0,0 +1,89 @@ +// compile + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type T1 struct { + A T5 + B T2 + C T7 + D T4 +} + +type T2 struct { + T3 + A float64 + E float64 + C float64 +} + +type T3 struct { + F float64 + G float64 + H float64 + I float64 + J float64 + K float64 + L float64 +} + +type T4 struct { + M float64 + N float64 + O float64 + P float64 +} + +type T5 struct { + Q float64 + R float64 + S float64 + T float64 + U float64 + V float64 +} + +type T6 struct { + T9 + C T10 +} + +type T7 struct { + T10 + T11 +} + +type T8 struct { + T9 + C T7 +} + +type T9 struct { + A T5 + B T3 + D T4 +} + +type T10 struct { + W float64 +} + +type T11 struct { + X float64 + Y float64 +} + +func MainTest(x T1, y T8, z T6) float64 { + return Test(x.B, x.A, x.D, x.C, y.B, y.A, y.D, y.C, z.B, z.A, z.D, + T7{ + T10: T10{ + W: z.C.W, + }, + T11: T11{}, + }, + ) +} +func Test(a T2, b T5, c T4, d T7, e T3, f T5, g T4, h T7, i T3, j T5, k T4, l T7) float64 -- cgit v1.2.3-54-g00ecf From fc07039e2339a80f53e7db5e214b5be504bc1df6 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Tue, 28 Jun 2022 15:17:12 -0400 Subject: [release-branch.go1.17] runtime: add race annotations to metricsSema metricsSema protects the metrics map. The map implementation is race instrumented regardless of which package is it called from. semacquire/semrelease are not automatically race instrumented, so we can trigger race false positives without manually annotating our lock acquire and release. See similar instrumentation on trace.shutdownSema and reflectOffs.lock. Fixes #53589. For #53542. Change-Id: Ia3fd239ac860e037d09c7cb9c4ad267391e70705 Reviewed-on: https://go-review.googlesource.com/c/go/+/414517 Run-TryBot: Michael Pratt TryBot-Result: Gopher Robot Reviewed-by: Cherry Mui (cherry picked from commit d6481d5b9662b29453004204746945a93a6b4eb2) Reviewed-on: https://go-review.googlesource.com/c/go/+/415196 --- src/runtime/export_test.go | 4 ++-- src/runtime/metrics.go | 33 +++++++++++++++++++++++++-------- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 03ccfe10a8..3cee4dec57 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -321,9 +321,9 @@ func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) // Initialize the metrics beforehand because this could // allocate and skew the stats. - semacquire(&metricsSema) + metricsLock() initMetrics() - semrelease(&metricsSema) + metricsUnlock() systemstack(func() { // Read memstats first. It's going to flush diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go index 4af2b0aef9..922dd2f814 100644 --- a/src/runtime/metrics.go +++ b/src/runtime/metrics.go @@ -12,9 +12,12 @@ import ( ) var ( - // metrics is a map of runtime/metrics keys to - // data used by the runtime to sample each metric's - // value. + // metrics is a map of runtime/metrics keys to data used by the runtime + // to sample each metric's value. metricsInit indicates it has been + // initialized. + // + // These fields are protected by metricsSema which should be + // locked/unlocked with metricsLock() / metricsUnlock(). metricsSema uint32 = 1 metricsInit bool metrics map[string]metricData @@ -34,6 +37,23 @@ type metricData struct { compute func(in *statAggregate, out *metricValue) } +func metricsLock() { + // Acquire the metricsSema but with handoff. Operations are typically + // expensive enough that queueing up goroutines and handing off between + // them will be noticeably better-behaved. + semacquire1(&metricsSema, true, 0, 0) + if raceenabled { + raceacquire(unsafe.Pointer(&metricsSema)) + } +} + +func metricsUnlock() { + if raceenabled { + racerelease(unsafe.Pointer(&metricsSema)) + } + semrelease(&metricsSema) +} + // initMetrics initializes the metrics map if it hasn't been yet. // // metricsSema must be held. @@ -546,10 +566,7 @@ func readMetrics(samplesp unsafe.Pointer, len int, cap int) { sl := slice{samplesp, len, cap} samples := *(*[]metricSample)(unsafe.Pointer(&sl)) - // Acquire the metricsSema but with handoff. This operation - // is expensive enough that queueing up goroutines and handing - // off between them will be noticeably better-behaved. - semacquire1(&metricsSema, true, 0, 0) + metricsLock() // Ensure the map is initialized. initMetrics() @@ -573,5 +590,5 @@ func readMetrics(samplesp unsafe.Pointer, len int, cap int) { data.compute(&agg, &sample.value) } - semrelease(&metricsSema) + metricsUnlock() } -- cgit v1.2.3-54-g00ecf From ae2dfcc1c8891a7610f2d31d457427b71ed9c6e0 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Tue, 28 Jun 2022 16:32:50 -0400 Subject: [release-branch.go1.17] runtime: add race annotations to cbs.lock cbs.lock protects a map. The map implementation is race instrumented regardless of which package is it called from. lock/unlock are not automatically race instrumented, so we can trigger race false positives without manually annotating our lock acquire and release. compileCallback is used during initialization before the P is available, at which point raceacquire will crash during a racecallback to get the race proc. Thus we skip instrumentation until scheduler initialization is complete. Fixes #53612. For #50249. Change-Id: Ie49227c9e9210ffbf0aee65f86f2b7b6a2f64638 Reviewed-on: https://go-review.googlesource.com/c/go/+/414518 TryBot-Result: Gopher Robot Reviewed-by: Cherry Mui Run-TryBot: Michael Pratt (cherry picked from commit 20760cff001e9acc05627dfeab42ea50b57920e6) Reviewed-on: https://go-review.googlesource.com/c/go/+/415197 --- src/runtime/syscall_windows.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 94a61c8823..30ef72bce9 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -12,12 +12,30 @@ import ( // cbs stores all registered Go callbacks. var cbs struct { - lock mutex + lock mutex // use cbsLock / cbsUnlock for race instrumentation. ctxt [cb_max]winCallback index map[winCallbackKey]int n int } +func cbsLock() { + lock(&cbs.lock) + // compileCallback is used by goenvs prior to completion of schedinit. + // raceacquire involves a racecallback to get the proc, which is not + // safe prior to scheduler initialization. Thus avoid instrumentation + // until then. + if raceenabled && mainStarted { + raceacquire(unsafe.Pointer(&cbs.lock)) + } +} + +func cbsUnlock() { + if raceenabled && mainStarted { + racerelease(unsafe.Pointer(&cbs.lock)) + } + unlock(&cbs.lock) +} + // winCallback records information about a registered Go callback. type winCallback struct { fn *funcval // Go function @@ -302,11 +320,11 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { key := winCallbackKey{(*funcval)(fn.data), cdecl} - lock(&cbs.lock) // We don't unlock this in a defer because this is used from the system stack. + cbsLock() // Check if this callback is already registered. if n, ok := cbs.index[key]; ok { - unlock(&cbs.lock) + cbsUnlock() return callbackasmAddr(n) } @@ -316,7 +334,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { } n := cbs.n if n >= len(cbs.ctxt) { - unlock(&cbs.lock) + cbsUnlock() throw("too many callback functions") } c := winCallback{key.fn, retPop, abiMap} @@ -324,7 +342,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { cbs.index[key] = n cbs.n++ - unlock(&cbs.lock) + cbsUnlock() return callbackasmAddr(n) } -- cgit v1.2.3-54-g00ecf From d13431c37ab62f9755f705731536ff74e7165b08 Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Wed, 1 Jun 2022 11:17:07 -0700 Subject: [release-branch.go1.17] net/http: don't strip whitespace from Transfer-Encoding headers Do not accept "Transfer-Encoding: \rchunked" as a valid TE header setting chunked encoding. Thanks to Zeyu Zhang (https://www.zeyu2001.com/) for identifying the issue. For #53188 For CVE-2022-1705 Fixes #53432 Change-Id: I1a16631425159267f2eca68056b057192a7edf6c Reviewed-on: https://go-review.googlesource.com/c/go/+/409874 Reviewed-by: Roland Shoemaker Reviewed-by: Brad Fitzpatrick (cherry picked from commit e5017a93fcde94f09836200bca55324af037ee5f) Reviewed-on: https://go-review.googlesource.com/c/go/+/415217 Reviewed-by: Dmitri Shuralyov Run-TryBot: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov TryBot-Result: Gopher Robot --- src/net/http/serve_test.go | 1 + src/net/http/transfer.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go index 6394da3bb7..bfac783e3a 100644 --- a/src/net/http/serve_test.go +++ b/src/net/http/serve_test.go @@ -6189,6 +6189,7 @@ func TestUnsupportedTransferEncodingsReturn501(t *testing.T) { "fugazi", "foo-bar", "unknown", + "\rchunked", } for _, badTE := range unsupportedTEs { diff --git a/src/net/http/transfer.go b/src/net/http/transfer.go index 85c2e5a360..3894007d30 100644 --- a/src/net/http/transfer.go +++ b/src/net/http/transfer.go @@ -639,7 +639,7 @@ func (t *transferReader) parseTransferEncoding() error { if len(raw) != 1 { return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)} } - if !ascii.EqualFold(textproto.TrimString(raw[0]), "chunked") { + if !ascii.EqualFold(raw[0], "chunked") { return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])} } -- cgit v1.2.3-54-g00ecf From ed2f33e1a7e0d18f61bd56f7ee067331d612c27e Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Fri, 17 Jun 2022 10:09:45 -0700 Subject: [release-branch.go1.17] net/http: preserve nil values in Header.Clone ReverseProxy makes a distinction between nil and zero-length header values. Avoid losing nil-ness when cloning a request. Thanks to Christian Mehlmauer for discovering this. For #53423 For CVE-2022-32148 Fixes #53620 Change-Id: Ice369cdb4712e2d62e25bb881b080847aa4801f5 Reviewed-on: https://go-review.googlesource.com/c/go/+/412857 Reviewed-by: Ian Lance Taylor Reviewed-by: Brad Fitzpatrick (cherry picked from commit b2cc0fecc2ccd80e6d5d16542cc684f97b3a9c8a) Reviewed-on: https://go-review.googlesource.com/c/go/+/415221 Reviewed-by: Heschi Kreinick TryBot-Result: Gopher Robot Run-TryBot: Michael Knyszek Run-TryBot: Heschi Kreinick Reviewed-by: Michael Knyszek --- src/net/http/header.go | 6 ++++++ src/net/http/header_test.go | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/src/net/http/header.go b/src/net/http/header.go index 4c72dcb2c8..ef4ee7ffa8 100644 --- a/src/net/http/header.go +++ b/src/net/http/header.go @@ -101,6 +101,12 @@ func (h Header) Clone() Header { sv := make([]string, nv) // shared backing array for headers' values h2 := make(Header, len(h)) for k, vv := range h { + if vv == nil { + // Preserve nil values. ReverseProxy distinguishes + // between nil and zero-length header values. + h2[k] = nil + continue + } n := copy(sv, vv) h2[k] = sv[:n:n] sv = sv[n:] diff --git a/src/net/http/header_test.go b/src/net/http/header_test.go index 4789362919..80c003551d 100644 --- a/src/net/http/header_test.go +++ b/src/net/http/header_test.go @@ -235,6 +235,11 @@ func TestCloneOrMakeHeader(t *testing.T) { in: Header{"foo": {"bar"}}, want: Header{"foo": {"bar"}}, }, + { + name: "nil value", + in: Header{"foo": nil}, + want: Header{"foo": nil}, + }, } for _, tt := range tests { -- cgit v1.2.3-54-g00ecf From 58facfbe7db2fbb9afed794b281a70bdb12a60ae Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Mon, 28 Mar 2022 18:41:26 -0700 Subject: [release-branch.go1.17] encoding/xml: use iterative Skip, rather than recursive Prevents exhausting the stack limit in _incredibly_ deeply nested structures. Fixes #53711 Updates #53614 Fixes CVE-2022-28131 Change-Id: I47db4595ce10cecc29fbd06afce7b299868599e6 Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1419912 Reviewed-by: Julie Qiu Reviewed-by: Damien Neil (cherry picked from commit 9278cb78443d2b4deb24cbb5b61c9ba5ac688d49) Reviewed-on: https://go-review.googlesource.com/c/go/+/417068 TryBot-Result: Gopher Robot Reviewed-by: Heschi Kreinick Run-TryBot: Michael Knyszek --- src/encoding/xml/read.go | 15 ++++++++------- src/encoding/xml/read_test.go | 18 ++++++++++++++++++ 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go index ef5df3f7f6..e9f9d2efa9 100644 --- a/src/encoding/xml/read.go +++ b/src/encoding/xml/read.go @@ -732,12 +732,12 @@ Loop: } // Skip reads tokens until it has consumed the end element -// matching the most recent start element already consumed. -// It recurs if it encounters a start element, so it can be used to -// skip nested structures. +// matching the most recent start element already consumed, +// skipping nested structures. // It returns nil if it finds an end element matching the start // element; otherwise it returns an error describing the problem. func (d *Decoder) Skip() error { + var depth int64 for { tok, err := d.Token() if err != nil { @@ -745,11 +745,12 @@ func (d *Decoder) Skip() error { } switch tok.(type) { case StartElement: - if err := d.Skip(); err != nil { - return err - } + depth++ case EndElement: - return nil + if depth == 0 { + return nil + } + depth-- } } } diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go index 8c2e70fa22..4ccab3d010 100644 --- a/src/encoding/xml/read_test.go +++ b/src/encoding/xml/read_test.go @@ -5,8 +5,10 @@ package xml import ( + "bytes" "io" "reflect" + "runtime" "strings" "testing" "time" @@ -1079,3 +1081,19 @@ func TestUnmarshalWhitespaceAttrs(t *testing.T) { t.Fatalf("whitespace attrs: Unmarshal:\nhave: %#+v\nwant: %#+v", v, want) } } + +func TestCVE202230633(t *testing.T) { + if runtime.GOARCH == "wasm" { + t.Skip("causes memory exhaustion on js/wasm") + } + defer func() { + p := recover() + if p != nil { + t.Fatal("Unmarshal panicked") + } + }() + var example struct { + Things []string + } + Unmarshal(bytes.Repeat([]byte(""), 17_000_000), &example) +} -- cgit v1.2.3-54-g00ecf From 2678d0c957193dceef336c969a9da74dd716a827 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Tue, 29 Mar 2022 15:52:09 -0700 Subject: [release-branch.go1.17] encoding/xml: limit depth of nesting in unmarshal Prevent exhausting the stack limit when unmarshalling extremely deeply nested structures into nested types. Fixes #53715 Updates #53611 Fixes CVE-2022-30633 Change-Id: Ic6c5d41674c93cfc9a316135a408db9156d39c59 Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1421319 Reviewed-by: Damien Neil Reviewed-by: Julie Qiu (cherry picked from commit ebee00a55e28931b2cad0e76207a73712b000432) Reviewed-on: https://go-review.googlesource.com/c/go/+/417069 Reviewed-by: Heschi Kreinick Run-TryBot: Michael Knyszek TryBot-Result: Gopher Robot --- src/encoding/xml/read.go | 27 +++++++++++++++++++-------- src/encoding/xml/read_test.go | 14 ++++++++++++++ 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go index e9f9d2efa9..c77579880c 100644 --- a/src/encoding/xml/read.go +++ b/src/encoding/xml/read.go @@ -148,7 +148,7 @@ func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { if val.Kind() != reflect.Ptr { return errors.New("non-pointer passed to Unmarshal") } - return d.unmarshal(val.Elem(), start) + return d.unmarshal(val.Elem(), start, 0) } // An UnmarshalError represents an error in the unmarshaling process. @@ -304,8 +304,15 @@ var ( textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() ) +const maxUnmarshalDepth = 10000 + +var errExeceededMaxUnmarshalDepth = errors.New("exceeded max depth") + // Unmarshal a single XML element into val. -func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error { +func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error { + if depth >= maxUnmarshalDepth { + return errExeceededMaxUnmarshalDepth + } // Find start element if we need it. if start == nil { for { @@ -398,7 +405,7 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error { v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem()))) // Recur to read element into slice. - if err := d.unmarshal(v.Index(n), start); err != nil { + if err := d.unmarshal(v.Index(n), start, depth+1); err != nil { v.SetLen(n) return err } @@ -521,13 +528,15 @@ Loop: case StartElement: consumed := false if sv.IsValid() { - consumed, err = d.unmarshalPath(tinfo, sv, nil, &t) + // unmarshalPath can call unmarshal, so we need to pass the depth through so that + // we can continue to enforce the maximum recusion limit. + consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth) if err != nil { return err } if !consumed && saveAny.IsValid() { consumed = true - if err := d.unmarshal(saveAny, &t); err != nil { + if err := d.unmarshal(saveAny, &t, depth+1); err != nil { return err } } @@ -672,7 +681,7 @@ func copyValue(dst reflect.Value, src []byte) (err error) { // The consumed result tells whether XML elements have been consumed // from the Decoder until start's matching end element, or if it's // still untouched because start is uninteresting for sv's fields. -func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { +func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) { recurse := false Loop: for i := range tinfo.fields { @@ -687,7 +696,7 @@ Loop: } if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { // It's a perfect match, unmarshal the field. - return true, d.unmarshal(finfo.value(sv, initNilPointers), start) + return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1) } if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { // It's a prefix for the field. Break and recurse @@ -716,7 +725,9 @@ Loop: } switch t := tok.(type) { case StartElement: - consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t) + // the recursion depth of unmarshalPath is limited to the path length specified + // by the struct field tag, so we don't increment the depth here. + consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth) if err != nil { return true, err } diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go index 4ccab3d010..8c940aefb8 100644 --- a/src/encoding/xml/read_test.go +++ b/src/encoding/xml/read_test.go @@ -6,6 +6,7 @@ package xml import ( "bytes" + "errors" "io" "reflect" "runtime" @@ -1097,3 +1098,16 @@ func TestCVE202230633(t *testing.T) { } Unmarshal(bytes.Repeat([]byte(""), 17_000_000), &example) } + +func TestCVE202228131(t *testing.T) { + type nested struct { + Parent *nested `xml:",any"` + } + var n nested + err := Unmarshal(bytes.Repeat([]byte(""), maxUnmarshalDepth+1), &n) + if err == nil { + t.Fatal("Unmarshal did not fail") + } else if !errors.Is(err, errExeceededMaxUnmarshalDepth) { + t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth) + } +} -- cgit v1.2.3-54-g00ecf From ba8788ebcead55e99e631c6a1157ad7b35535d11 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Wed, 15 Jun 2022 10:43:05 -0700 Subject: [release-branch.go1.17] go/parser: limit recursion depth Limit nested parsing to 100,000, which prevents stack exhaustion when parsing deeply nested statements, types, and expressions. Also limit the scope depth to 1,000 during object resolution. Thanks to Juho Nurminen of Mattermost for reporting this issue. Fixes #53707 Updates #53616 Fixes CVE-2022-1962 Change-Id: I4d7b86c1d75d0bf3c7af1fdea91582aa74272c64 Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1491025 Reviewed-by: Russ Cox Reviewed-by: Damien Neil (cherry picked from commit 6a856f08d58e4b6705c0c337d461c540c1235c83) Reviewed-on: https://go-review.googlesource.com/c/go/+/417070 Reviewed-by: Heschi Kreinick TryBot-Result: Gopher Robot Run-TryBot: Michael Knyszek --- src/go/parser/interface.go | 10 ++- src/go/parser/parser.go | 54 +++++++++++++- src/go/parser/parser_test.go | 169 +++++++++++++++++++++++++++++++++++++++++++ src/go/parser/resolver.go | 9 +++ 4 files changed, 236 insertions(+), 6 deletions(-) diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go index 85486d2f4b..eae429e6ef 100644 --- a/src/go/parser/interface.go +++ b/src/go/parser/interface.go @@ -97,8 +97,11 @@ func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) defer func() { if e := recover(); e != nil { // resume same panic if it's not a bailout - if _, ok := e.(bailout); !ok { + bail, ok := e.(bailout) + if !ok { panic(e) + } else if bail.msg != "" { + p.errors.Add(p.file.Position(bail.pos), bail.msg) } } @@ -203,8 +206,11 @@ func ParseExprFrom(fset *token.FileSet, filename string, src interface{}, mode M defer func() { if e := recover(); e != nil { // resume same panic if it's not a bailout - if _, ok := e.(bailout); !ok { + bail, ok := e.(bailout) + if !ok { panic(e) + } else if bail.msg != "" { + p.errors.Add(p.file.Position(bail.pos), bail.msg) } } p.errors.Sort() diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go index f10c8650af..2c42b9f8cc 100644 --- a/src/go/parser/parser.go +++ b/src/go/parser/parser.go @@ -60,6 +60,10 @@ type parser struct { inRhs bool // if set, the parser is parsing a rhs expression imports []*ast.ImportSpec // list of imports + + // nestLev is used to track and limit the recursion depth + // during parsing. + nestLev int } func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) { @@ -110,6 +114,24 @@ func un(p *parser) { p.printTrace(")") } +// maxNestLev is the deepest we're willing to recurse during parsing +const maxNestLev int = 1e5 + +func incNestLev(p *parser) *parser { + p.nestLev++ + if p.nestLev > maxNestLev { + p.error(p.pos, "exceeded max nesting depth") + panic(bailout{}) + } + return p +} + +// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion. +// It is used along with incNestLev in a similar fashion to how un and trace are used. +func decNestLev(p *parser) { + p.nestLev-- +} + // Advance to the next token. func (p *parser) next0() { // Because of one-token look-ahead, print the previous token @@ -222,8 +244,12 @@ func (p *parser) next() { } } -// A bailout panic is raised to indicate early termination. -type bailout struct{} +// A bailout panic is raised to indicate early termination. pos and msg are +// only populated when bailing out of object resolution. +type bailout struct { + pos token.Pos + msg string +} func (p *parser) error(pos token.Pos, msg string) { if p.trace { @@ -1119,6 +1145,8 @@ func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr { } func (p *parser) tryIdentOrType() ast.Expr { + defer decNestLev(incNestLev(p)) + switch p.tok { case token.IDENT: typ := p.parseTypeName(nil) @@ -1531,7 +1559,13 @@ func (p *parser) parsePrimaryExpr() (x ast.Expr) { } x = p.parseOperand() - for { + // We track the nesting here rather than at the entry for the function, + // since it can iteratively produce a nested output, and we want to + // limit how deep a structure we generate. + var n int + defer func() { p.nestLev -= n }() + for n = 1; ; n++ { + incNestLev(p) switch p.tok { case token.PERIOD: p.next() @@ -1591,6 +1625,8 @@ func (p *parser) parsePrimaryExpr() (x ast.Expr) { } func (p *parser) parseUnaryExpr() ast.Expr { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "UnaryExpr")) } @@ -1673,7 +1709,13 @@ func (p *parser) parseBinaryExpr(prec1 int) ast.Expr { } x := p.parseUnaryExpr() - for { + // We track the nesting here rather than at the entry for the function, + // since it can iteratively produce a nested output, and we want to + // limit how deep a structure we generate. + var n int + defer func() { p.nestLev -= n }() + for n = 1; ; n++ { + incNestLev(p) op, oprec := p.tokPrec() if oprec < prec1 { return x @@ -1962,6 +2004,8 @@ func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) { } func (p *parser) parseIfStmt() *ast.IfStmt { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "IfStmt")) } @@ -2265,6 +2309,8 @@ func (p *parser) parseForStmt() ast.Stmt { } func (p *parser) parseStmt() (s ast.Stmt) { + defer decNestLev(incNestLev(p)) + if p.trace { defer un(trace(p, "Statement")) } diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go index a4f882d368..1a46c87866 100644 --- a/src/go/parser/parser_test.go +++ b/src/go/parser/parser_test.go @@ -10,6 +10,7 @@ import ( "go/ast" "go/token" "io/fs" + "runtime" "strings" "testing" ) @@ -577,3 +578,171 @@ type x int // comment t.Errorf("got %q, want %q", comment, "// comment") } } + +var parseDepthTests = []struct { + name string + format string + // multipler is used when a single statement may result in more than one + // change in the depth level, for instance "1+(..." produces a BinaryExpr + // followed by a UnaryExpr, which increments the depth twice. The test + // case comment explains which nodes are triggering the multiple depth + // changes. + parseMultiplier int + // scope is true if we should also test the statement for the resolver scope + // depth limit. + scope bool + // scopeMultiplier does the same as parseMultiplier, but for the scope + // depths. + scopeMultiplier int +}{ + // The format expands the part inside « » many times. + // A second set of brackets nested inside the first stops the repetition, + // so that for example «(«1»)» expands to (((...((((1))))...))). + {name: "array", format: "package main; var x «[1]»int"}, + {name: "slice", format: "package main; var x «[]»int"}, + {name: "struct", format: "package main; var x «struct { X «int» }»", scope: true}, + {name: "pointer", format: "package main; var x «*»int"}, + {name: "func", format: "package main; var x «func()»int", scope: true}, + {name: "chan", format: "package main; var x «chan »int"}, + {name: "chan2", format: "package main; var x «<-chan »int"}, + {name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType + {name: "map", format: "package main; var x «map[int]»int"}, + {name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit + {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit + {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit + {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2}, // Parser nodes: CompositeLit, KeyValueExpr + {name: "dot", format: "package main; var x = «x.»x"}, + {name: "index", format: "package main; var x = x«[1]»"}, + {name: "slice", format: "package main; var x = x«[1:2]»"}, + {name: "slice3", format: "package main; var x = x«[1:2:3]»"}, + {name: "dottype", format: "package main; var x = x«.(any)»"}, + {name: "callseq", format: "package main; var x = x«()»"}, + {name: "methseq", format: "package main; var x = x«.m()»", parseMultiplier: 2}, // Parser nodes: SelectorExpr, CallExpr + {name: "binary", format: "package main; var x = «1+»1"}, + {name: "binaryparen", format: "package main; var x = «1+(«1»)»", parseMultiplier: 2}, // Parser nodes: BinaryExpr, ParenExpr + {name: "unary", format: "package main; var x = «^»1"}, + {name: "addr", format: "package main; var x = «& »x"}, + {name: "star", format: "package main; var x = «*»x"}, + {name: "recv", format: "package main; var x = «<-»x"}, + {name: "call", format: "package main; var x = «f(«1»)»", parseMultiplier: 2}, // Parser nodes: Ident, CallExpr + {name: "conv", format: "package main; var x = «(*T)(«1»)»", parseMultiplier: 2}, // Parser nodes: ParenExpr, CallExpr + {name: "label", format: "package main; func main() { «Label:» }"}, + {name: "if", format: "package main; func main() { «if true { «» }»}", parseMultiplier: 2, scope: true, scopeMultiplier: 2}, // Parser nodes: IfStmt, BlockStmt. Scopes: IfStmt, BlockStmt + {name: "ifelse", format: "package main; func main() { «if true {} else » {} }", scope: true}, + {name: "switch", format: "package main; func main() { «switch { default: «» }»}", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause + {name: "typeswitch", format: "package main; func main() { «switch x.(type) { default: «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause + {name: "for0", format: "package main; func main() { «for { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt + {name: "for1", format: "package main; func main() { «for x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt + {name: "for3", format: "package main; func main() { «for f(); g(); h() { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt + {name: "forrange0", format: "package main; func main() { «for range x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt + {name: "forrange1", format: "package main; func main() { «for x = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt + {name: "forrange2", format: "package main; func main() { «for x, y = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt + {name: "go", format: "package main; func main() { «go func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: GoStmt, FuncLit + {name: "defer", format: "package main; func main() { «defer func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: DeferStmt, FuncLit + {name: "select", format: "package main; func main() { «select { default: «» }» }", scope: true}, +} + +// split splits pre«mid»post into pre, mid, post. +// If the string does not have that form, split returns x, "", "". +func split(x string) (pre, mid, post string) { + start, end := strings.Index(x, "«"), strings.LastIndex(x, "»") + if start < 0 || end < 0 { + return x, "", "" + } + return x[:start], x[start+len("«") : end], x[end+len("»"):] +} + +func TestParseDepthLimit(t *testing.T) { + if runtime.GOARCH == "wasm" { + t.Skip("causes call stack exhaustion on js/wasm") + } + for _, tt := range parseDepthTests { + for _, size := range []string{"small", "big"} { + t.Run(tt.name+"/"+size, func(t *testing.T) { + n := maxNestLev + 1 + if tt.parseMultiplier > 0 { + n /= tt.parseMultiplier + } + if size == "small" { + // Decrease the number of statements by 10, in order to check + // that we do not fail when under the limit. 10 is used to + // provide some wiggle room for cases where the surrounding + // scaffolding syntax adds some noise to the depth that changes + // on a per testcase basis. + n -= 10 + } + + pre, mid, post := split(tt.format) + if strings.Contains(mid, "«") { + left, base, right := split(mid) + mid = strings.Repeat(left, n) + base + strings.Repeat(right, n) + } else { + mid = strings.Repeat(mid, n) + } + input := pre + mid + post + + fset := token.NewFileSet() + _, err := ParseFile(fset, "", input, ParseComments|SkipObjectResolution) + if size == "small" { + if err != nil { + t.Errorf("ParseFile(...): %v (want success)", err) + } + } else { + expected := "exceeded max nesting depth" + if err == nil || !strings.HasSuffix(err.Error(), expected) { + t.Errorf("ParseFile(...) = _, %v, want %q", err, expected) + } + } + }) + } + } +} + +func TestScopeDepthLimit(t *testing.T) { + if runtime.GOARCH == "wasm" { + t.Skip("causes call stack exhaustion on js/wasm") + } + for _, tt := range parseDepthTests { + if !tt.scope { + continue + } + for _, size := range []string{"small", "big"} { + t.Run(tt.name+"/"+size, func(t *testing.T) { + n := maxScopeDepth + 1 + if tt.scopeMultiplier > 0 { + n /= tt.scopeMultiplier + } + if size == "small" { + // Decrease the number of statements by 10, in order to check + // that we do not fail when under the limit. 10 is used to + // provide some wiggle room for cases where the surrounding + // scaffolding syntax adds some noise to the depth that changes + // on a per testcase basis. + n -= 10 + } + + pre, mid, post := split(tt.format) + if strings.Contains(mid, "«") { + left, base, right := split(mid) + mid = strings.Repeat(left, n) + base + strings.Repeat(right, n) + } else { + mid = strings.Repeat(mid, n) + } + input := pre + mid + post + + fset := token.NewFileSet() + _, err := ParseFile(fset, "", input, DeclarationErrors) + if size == "small" { + if err != nil { + t.Errorf("ParseFile(...): %v (want success)", err) + } + } else { + expected := "exceeded max scope depth during object resolution" + if err == nil || !strings.HasSuffix(err.Error(), expected) { + t.Errorf("ParseFile(...) = _, %v, want %q", err, expected) + } + } + }) + } + } +} diff --git a/src/go/parser/resolver.go b/src/go/parser/resolver.go index cf92c7e4f5..f55bdb7f17 100644 --- a/src/go/parser/resolver.go +++ b/src/go/parser/resolver.go @@ -25,6 +25,7 @@ func resolveFile(file *ast.File, handle *token.File, declErr func(token.Pos, str declErr: declErr, topScope: pkgScope, pkgScope: pkgScope, + depth: 1, } for _, decl := range file.Decls { @@ -53,6 +54,8 @@ func resolveFile(file *ast.File, handle *token.File, declErr func(token.Pos, str file.Unresolved = r.unresolved[0:i] } +const maxScopeDepth int = 1e3 + type resolver struct { handle *token.File declErr func(token.Pos, string) @@ -61,6 +64,7 @@ type resolver struct { pkgScope *ast.Scope // pkgScope.Outer == nil topScope *ast.Scope // top-most scope; may be pkgScope unresolved []*ast.Ident // unresolved identifiers + depth int // scope depth // Label scopes // (maintained by open/close LabelScope) @@ -83,6 +87,10 @@ func (r *resolver) sprintf(format string, args ...interface{}) string { } func (r *resolver) openScope(pos token.Pos) { + r.depth++ + if r.depth > maxScopeDepth { + panic(bailout{pos: pos, msg: "exceeded max scope depth during object resolution"}) + } if debugResolve { r.dump("opening scope @%v", pos) } @@ -90,6 +98,7 @@ func (r *resolver) openScope(pos token.Pos) { } func (r *resolver) closeScope() { + r.depth-- if debugResolve { r.dump("closing scope") } -- cgit v1.2.3-54-g00ecf From 0117dee7dccbbd7803d88f65a2ce8bd686219ad3 Mon Sep 17 00:00:00 2001 From: Tatiana Bradley Date: Fri, 6 May 2022 11:25:06 -0400 Subject: [release-branch.go1.17] compress/gzip: fix stack exhaustion bug in Reader.Read Replace recursion with iteration in Reader.Read to avoid stack exhaustion when there are a large number of files. Fixes CVE-2022-30631 Fixes #53717 Updates #53168 Change-Id: I47d8afe3f2d40b0213ab61431df9b221794dbfe0 Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1455673 Reviewed-by: Roland Shoemaker Reviewed-by: Julie Qiu (cherry picked from commit cf498969c8a0bae9d7a24b98fc1f66c824a4775d) Reviewed-on: https://go-review.googlesource.com/c/go/+/417071 Reviewed-by: Heschi Kreinick Run-TryBot: Michael Knyszek TryBot-Result: Gopher Robot --- src/compress/gzip/gunzip.go | 60 +++++++++++++++++++--------------------- src/compress/gzip/gunzip_test.go | 16 +++++++++++ 2 files changed, 45 insertions(+), 31 deletions(-) diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go index 924bce10b7..237b2b928b 100644 --- a/src/compress/gzip/gunzip.go +++ b/src/compress/gzip/gunzip.go @@ -248,42 +248,40 @@ func (z *Reader) Read(p []byte) (n int, err error) { return 0, z.err } - n, z.err = z.decompressor.Read(p) - z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) - z.size += uint32(n) - if z.err != io.EOF { - // In the normal case we return here. - return n, z.err - } + for n == 0 { + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } - // Finished file; check checksum and size. - if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { - z.err = noEOF(err) - return n, z.err - } - digest := le.Uint32(z.buf[:4]) - size := le.Uint32(z.buf[4:8]) - if digest != z.digest || size != z.size { - z.err = ErrChecksum - return n, z.err - } - z.digest, z.size = 0, 0 + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 - // File is ok; check if there is another. - if !z.multistream { - return n, io.EOF - } - z.err = nil // Remove io.EOF + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF - if _, z.err = z.readHeader(); z.err != nil { - return n, z.err + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } } - // Read from next file, if necessary. - if n > 0 { - return n, nil - } - return z.Read(p) + return n, nil } // Close closes the Reader. It does not close the underlying io.Reader. diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go index 17c23e8a9b..6fe8ddcf55 100644 --- a/src/compress/gzip/gunzip_test.go +++ b/src/compress/gzip/gunzip_test.go @@ -515,3 +515,19 @@ func TestTruncatedStreams(t *testing.T) { } } } + +func TestCVE202230631(t *testing.T) { + var empty = []byte{0x1f, 0x8b, 0x08, 0x00, 0xa7, 0x8f, 0x43, 0x62, 0x00, + 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + r := bytes.NewReader(bytes.Repeat(empty, 4e6)) + z, err := NewReader(r) + if err != nil { + t.Fatalf("NewReader: got %v, want nil", err) + } + // Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due + // to stack exhaustion. + _, err = z.Read(make([]byte, 10)) + if err != io.EOF { + t.Errorf("Reader.Read: got %v, want %v", err, io.EOF) + } +} -- cgit v1.2.3-54-g00ecf From 8c1d8c836270615cfb5b229932269048ef59ac07 Mon Sep 17 00:00:00 2001 From: Julie Qiu Date: Thu, 23 Jun 2022 23:17:53 +0000 Subject: [release-branch.go1.17] io/fs: fix stack exhaustion in Glob A limit is added to the number of path separators allowed by an input to Glob, to prevent stack exhaustion issues. Thanks to Juho Nurminen of Mattermost who reported a similar issue in path/filepath. Fixes #53719 Updates #53415 Fixes CVE-2022-30630 Change-Id: I5a9d02591fed90cd3d52627f5945f1301e53465d Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1497588 Reviewed-by: Roland Shoemaker (cherry picked from commit fdccc5d7bd0f276d0a8de3a818ca844f0bed5d97) Reviewed-on: https://go-review.googlesource.com/c/go/+/417072 Reviewed-by: Heschi Kreinick TryBot-Result: Gopher Robot Run-TryBot: Michael Knyszek --- src/io/fs/glob.go | 14 ++++++++++++-- src/io/fs/glob_test.go | 10 ++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/io/fs/glob.go b/src/io/fs/glob.go index 45d9cb61b9..0e529cd05d 100644 --- a/src/io/fs/glob.go +++ b/src/io/fs/glob.go @@ -31,6 +31,16 @@ type GlobFS interface { // Otherwise, Glob uses ReadDir to traverse the directory tree // and look for matches for the pattern. func Glob(fsys FS, pattern string) (matches []string, err error) { + return globWithLimit(fsys, pattern, 0) +} + +func globWithLimit(fsys FS, pattern string, depth int) (matches []string, err error) { + // This limit is added to prevent stack exhaustion issues. See + // CVE-2022-30630. + const pathSeparatorsLimit = 10000 + if depth > pathSeparatorsLimit { + return nil, path.ErrBadPattern + } if fsys, ok := fsys.(GlobFS); ok { return fsys.Glob(pattern) } @@ -59,9 +69,9 @@ func Glob(fsys FS, pattern string) (matches []string, err error) { } var m []string - m, err = Glob(fsys, dir) + m, err = globWithLimit(fsys, dir, depth+1) if err != nil { - return + return nil, err } for _, d := range m { matches, err = glob(fsys, d, file, matches) diff --git a/src/io/fs/glob_test.go b/src/io/fs/glob_test.go index f19bebed77..d052eab371 100644 --- a/src/io/fs/glob_test.go +++ b/src/io/fs/glob_test.go @@ -8,6 +8,7 @@ import ( . "io/fs" "os" "path" + "strings" "testing" ) @@ -55,6 +56,15 @@ func TestGlobError(t *testing.T) { } } +func TestCVE202230630(t *testing.T) { + // Prior to CVE-2022-30630, a stack exhaustion would occur given a large + // number of separators. There is now a limit of 10,000. + _, err := Glob(os.DirFS("."), "/*"+strings.Repeat("/", 10001)) + if err != path.ErrBadPattern { + t.Fatalf("Glob returned err=%v, want %v", err, path.ErrBadPattern) + } +} + // contains reports whether vector contains the string s. func contains(vector []string, s string) bool { for _, elem := range vector { -- cgit v1.2.3-54-g00ecf From 76f8b7304d1f7c25834e2a0cc9e88c55276c47df Mon Sep 17 00:00:00 2001 From: Julie Qiu Date: Thu, 23 Jun 2022 23:18:56 +0000 Subject: [release-branch.go1.17] path/filepath: fix stack exhaustion in Glob A limit is added to the number of path separators allowed by an input to Glob, to prevent stack exhaustion issues. Thanks to Juho Nurminen of Mattermost who reported the issue. Fixes #53713 Updates #53416 Fixes CVE-2022-30632 Change-Id: I1b9fd4faa85411a05dbc91dceae1c0c8eb021f07 Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1498176 Reviewed-by: Roland Shoemaker (cherry picked from commit d182a6d1217fd0d04c9babfa9a7ccd3515435c39) Reviewed-on: https://go-review.googlesource.com/c/go/+/417073 Reviewed-by: Heschi Kreinick TryBot-Result: Gopher Robot Run-TryBot: Michael Knyszek --- src/path/filepath/match.go | 12 +++++++++++- src/path/filepath/match_test.go | 10 ++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go index c77a26952a..55ed1d75ae 100644 --- a/src/path/filepath/match.go +++ b/src/path/filepath/match.go @@ -241,6 +241,16 @@ func getEsc(chunk string) (r rune, nchunk string, err error) { // The only possible returned error is ErrBadPattern, when pattern // is malformed. func Glob(pattern string) (matches []string, err error) { + return globWithLimit(pattern, 0) +} + +func globWithLimit(pattern string, depth int) (matches []string, err error) { + // This limit is used prevent stack exhaustion issues. See CVE-2022-30632. + const pathSeparatorsLimit = 10000 + if depth == pathSeparatorsLimit { + return nil, ErrBadPattern + } + // Check pattern is well-formed. if _, err := Match(pattern, ""); err != nil { return nil, err @@ -270,7 +280,7 @@ func Glob(pattern string) (matches []string, err error) { } var m []string - m, err = Glob(dir) + m, err = globWithLimit(dir, depth+1) if err != nil { return } diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go index 375c41a7e9..d6282596fe 100644 --- a/src/path/filepath/match_test.go +++ b/src/path/filepath/match_test.go @@ -155,6 +155,16 @@ func TestGlob(t *testing.T) { } } +func TestCVE202230632(t *testing.T) { + // Prior to CVE-2022-30632, this would cause a stack exhaustion given a + // large number of separators (more than 4,000,000). There is now a limit + // of 10,000. + _, err := Glob("/*" + strings.Repeat("/", 10001)) + if err != ErrBadPattern { + t.Fatalf("Glob returned err=%v, want ErrBadPattern", err) + } +} + func TestGlobError(t *testing.T) { bad := []string{`[]`, `nonexist/[]`} for _, pattern := range bad { -- cgit v1.2.3-54-g00ecf From cd54600b866db0ad068ab8df06c7f5f6cb55c9b3 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Tue, 7 Jun 2022 13:00:43 -0700 Subject: [release-branch.go1.17] encoding/gob: add a depth limit for ignored fields Enforce a nesting limit of 10,000 for ignored fields during decoding of messages. This prevents the possibility of triggering stack exhaustion. Fixes #53709 Updates #53615 Fixes CVE-2022-30635 Change-Id: I05103d06dd5ca3945fcba3c1f5d3b5a645e8fb0f Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1484771 Reviewed-by: Damien Neil Reviewed-by: Tatiana Bradley (cherry picked from commit 55e8f938d22bfec29cc9dc9671044c5a41d1ea9c) Reviewed-on: https://go-review.googlesource.com/c/go/+/417074 Run-TryBot: Heschi Kreinick TryBot-Result: Gopher Robot Reviewed-by: Heschi Kreinick --- src/encoding/gob/decode.go | 19 ++++++++++++------- src/encoding/gob/gobencdec_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go index d2f6c749b1..0e0ec75ccc 100644 --- a/src/encoding/gob/decode.go +++ b/src/encoding/gob/decode.go @@ -871,8 +871,13 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg return &op } +var maxIgnoreNestingDepth = 10000 + // decIgnoreOpFor returns the decoding op for a field that has no destination. -func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp { +func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp { + if depth > maxIgnoreNestingDepth { + error_(errors.New("invalid nesting depth")) + } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). // Return the pointer to the op we're already building. if opPtr := inProgress[wireId]; opPtr != nil { @@ -896,7 +901,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) errorf("bad data: undefined type %s", wireId.string()) case wire.ArrayT != nil: elemId := wire.ArrayT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress) + elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len) } @@ -904,15 +909,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) case wire.MapT != nil: keyId := dec.wireType[wireId].MapT.Key elemId := dec.wireType[wireId].MapT.Elem - keyOp := dec.decIgnoreOpFor(keyId, inProgress) - elemOp := dec.decIgnoreOpFor(elemId, inProgress) + keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1) + elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreMap(state, *keyOp, *elemOp) } case wire.SliceT != nil: elemId := wire.SliceT.Elem - elemOp := dec.decIgnoreOpFor(elemId, inProgress) + elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1) op = func(i *decInstr, state *decoderState, value reflect.Value) { state.dec.ignoreSlice(state, *elemOp) } @@ -1073,7 +1078,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine { engine := new(decEngine) engine.instr = make([]decInstr, 1) // one item - op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp)) + op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0) ovfl := overflow(dec.typeString(remoteId)) engine.instr[0] = decInstr{*op, 0, nil, ovfl} engine.numInstr = 1 @@ -1118,7 +1123,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn localField, present := srt.FieldByName(wireField.Name) // TODO(r): anonymous names if !present || !isExported(wireField.Name) { - op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp)) + op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0) engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl} continue } diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go index 6d2c8db42d..1b52ecc6c8 100644 --- a/src/encoding/gob/gobencdec_test.go +++ b/src/encoding/gob/gobencdec_test.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "net" + "reflect" "strings" "testing" "time" @@ -796,3 +797,26 @@ func TestNetIP(t *testing.T) { t.Errorf("decoded to %v, want 1.2.3.4", ip.String()) } } + +func TestIngoreDepthLimit(t *testing.T) { + // We don't test the actual depth limit because it requires building an + // extremely large message, which takes quite a while. + oldNestingDepth := maxIgnoreNestingDepth + maxIgnoreNestingDepth = 100 + defer func() { maxIgnoreNestingDepth = oldNestingDepth }() + b := new(bytes.Buffer) + enc := NewEncoder(b) + typ := reflect.TypeOf(int(0)) + nested := reflect.ArrayOf(1, typ) + for i := 0; i < 100; i++ { + nested = reflect.ArrayOf(1, nested) + } + badStruct := reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}})) + enc.Encode(badStruct.Interface()) + dec := NewDecoder(b) + var output struct{ Hello int } + expectedErr := "invalid nesting depth" + if err := dec.Decode(&output); err == nil || err.Error() != expectedErr { + t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err) + } +} -- cgit v1.2.3-54-g00ecf From 1ed3c127daceaffb9aadc806ba60f0b51b47421b Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Tue, 12 Jul 2022 19:59:59 +0000 Subject: [release-branch.go1.17] go1.17.12 Change-Id: I12bfc6a625d61a7a25ecdaa10c8f78953c4c3bcf Reviewed-on: https://go-review.googlesource.com/c/go/+/417178 Run-TryBot: Gopher Robot Auto-Submit: Gopher Robot TryBot-Result: Gopher Robot Reviewed-by: Michael Knyszek Reviewed-by: Heschi Kreinick --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 3e9980643e..760e383235 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -go1.17.11 \ No newline at end of file +go1.17.12 \ No newline at end of file -- cgit v1.2.3-54-g00ecf