aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-04-22 21:36:11 +0000
committerIan Lance Taylor <iant@golang.org>2020-05-01 17:37:02 +0000
commit1b41fcbce503fd9f383ae15bfdd24720fde4a188 (patch)
treed7baa9cb0d58070479ce30112b8b6cf15d05834b
parentc4b92691b021dbed5ab243ced91c030286dcb922 (diff)
downloadgo-1b41fcbce503fd9f383ae15bfdd24720fde4a188.tar.gz
go-1b41fcbce503fd9f383ae15bfdd24720fde4a188.zip
[release-branch.go1.14] runtime: ensure allocToCache updates searchAddr in a valid way
Currently allocToCache assumes it can move the search address past the block it allocated the cache from, which violates the property that searchAddr should always point to mapped memory (i.e. memory represented by pageAlloc.inUse). This bug was already fixed once for pageAlloc.alloc in the Go 1.14 release via CL 216697, but that changed failed to take into account allocToCache. For #38605. Fixes #38606. Change-Id: Id08180aa10d19dc0f9f551a1d9e327a295560dff Reviewed-on: https://go-review.googlesource.com/c/go/+/229577 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> (cherry picked from commit 287d1ec96c1271de532c6b1160cd9cbbe717ee34) Reviewed-on: https://go-review.googlesource.com/c/go/+/230377 Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
-rw-r--r--src/runtime/mpagecache.go13
-rw-r--r--src/runtime/mpagecache_test.go33
2 files changed, 40 insertions, 6 deletions
diff --git a/src/runtime/mpagecache.go b/src/runtime/mpagecache.go
index 9fc338bd8e..a074961840 100644
--- a/src/runtime/mpagecache.go
+++ b/src/runtime/mpagecache.go
@@ -148,9 +148,14 @@ func (s *pageAlloc) allocToCache() pageCache {
// Update as an allocation, but note that it's not contiguous.
s.update(c.base, pageCachePages, false, true)
- // We're always searching for the first free page, and we always know the
- // up to pageCache size bits will be allocated, so we can always move the
- // searchAddr past the cache.
- s.searchAddr = c.base + pageSize*pageCachePages
+ // Set the search address to the last page represented by the cache.
+ // Since all of the pages in this block are going to the cache, and we
+ // searched for the first free page, we can confidently start at the
+ // next page.
+ //
+ // However, s.searchAddr is not allowed to point into unmapped heap memory
+ // unless it is maxSearchAddr, so make it the last page as opposed to
+ // the page after.
+ s.searchAddr = c.base + pageSize*(pageCachePages-1)
return c
}
diff --git a/src/runtime/mpagecache_test.go b/src/runtime/mpagecache_test.go
index b8cc0bd965..2ed0c0aa6a 100644
--- a/src/runtime/mpagecache_test.go
+++ b/src/runtime/mpagecache_test.go
@@ -260,12 +260,13 @@ func TestPageAllocAllocToCache(t *testing.T) {
if GOOS == "openbsd" && testing.Short() {
t.Skip("skipping because virtual memory is limited; see #36210")
}
- tests := map[string]struct {
+ type test struct {
before map[ChunkIdx][]BitRange
scav map[ChunkIdx][]BitRange
hits []PageCache // expected base addresses and patterns
after map[ChunkIdx][]BitRange
- }{
+ }
+ tests := map[string]test{
"AllFree": {
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
@@ -349,6 +350,34 @@ func TestPageAllocAllocToCache(t *testing.T) {
},
},
}
+ if PageAlloc64Bit != 0 {
+ const chunkIdxBigJump = 0x100000 // chunk index offset which translates to O(TiB)
+
+ // This test is similar to the one with the same name for
+ // pageAlloc.alloc and serves the same purpose.
+ // See mpagealloc_test.go for details.
+ sumsPerPhysPage := ChunkIdx(PhysPageSize / PallocSumBytes)
+ baseChunkIdx := BaseChunkIdx &^ (sumsPerPhysPage - 1)
+ tests["DiscontiguousMappedSumBoundary"] = test{
+ before: map[ChunkIdx][]BitRange{
+ baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages - 1}},
+ baseChunkIdx + chunkIdxBigJump: {{1, PallocChunkPages - 1}},
+ },
+ scav: map[ChunkIdx][]BitRange{
+ baseChunkIdx + sumsPerPhysPage - 1: {},
+ baseChunkIdx + chunkIdxBigJump: {},
+ },
+ hits: []PageCache{
+ NewPageCache(PageBase(baseChunkIdx+sumsPerPhysPage-1, PallocChunkPages-64), 1<<63, 0),
+ NewPageCache(PageBase(baseChunkIdx+chunkIdxBigJump, 0), 1, 0),
+ NewPageCache(0, 0, 0),
+ },
+ after: map[ChunkIdx][]BitRange{
+ baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages}},
+ baseChunkIdx + chunkIdxBigJump: {{0, PallocChunkPages}},
+ },
+ }
+ }
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {