aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-09-09 16:52:18 +0000
committerDmitri Shuralyov <dmitshur@golang.org>2020-09-10 18:38:18 +0000
commitb1be1428dc7d988c2be9006b1cbdf3e513d299b6 (patch)
treeecac6792565547680e0a123403a88e3c5d59ca0d
parentbbee5236cd64a11a2bfd194eb9f809f51cae0870 (diff)
downloadgo-b1be1428dc7d988c2be9006b1cbdf3e513d299b6.tar.gz
go-b1be1428dc7d988c2be9006b1cbdf3e513d299b6.zip
[release-branch.go1.15] runtime: fix ReadMemStatsSlow's and CheckScavengedBits' chunk iteration
Both ReadMemStatsSlow and CheckScavengedBits iterate over the page allocator's chunks but don't actually check if they exist. During the development process the chunks index became sparse, so now this was a possibility. If the runtime tests' heap is sparse we might end up segfaulting in either one of these functions, though this will generally be very rare. The pattern here to return nil for a nonexistent chunk is also useful elsewhere, so this change introduces tryChunkOf which won't throw, but might return nil. It also updates the documentation of chunkOf. For #41296. Fixes #41317. Change-Id: Id5ae0ca3234480de1724fdf2e3677eeedcf76fa0 Reviewed-on: https://go-review.googlesource.com/c/go/+/253777 Run-TryBot: Michael Knyszek <mknyszek@google.com> Reviewed-by: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> (cherry picked from commit 34835df04891a1d54394888b763af88f9476101d) Reviewed-on: https://go-review.googlesource.com/c/go/+/253917
-rw-r--r--src/runtime/export_test.go17
-rw-r--r--src/runtime/mpagealloc.go13
2 files changed, 23 insertions, 7 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 5ab03f3f99..e43813bcc4 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -360,7 +360,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
+ pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -758,11 +762,7 @@ func (p *PageAlloc) InUse() []AddrRange {
// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
ci := chunkIdx(i)
- l2 := (*pageAlloc)(p).chunks[ci.l1()]
- if l2 == nil {
- return nil
- }
- return (*PallocData)(&l2[ci.l2()])
+ return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
}
// AddrRange represents a range over addresses.
@@ -902,7 +902,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := mheap_.pages.chunkOf(i)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 8b3c62c375..c90a6378bd 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
s.scav.scavLWM = maxSearchAddr
}
+// tryChunkOf returns the bitmap data for the given chunk.
+//
+// Returns nil if the chunk data has not been mapped.
+func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
+ l2 := s.chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return &l2[ci.l2()]
+}
+
// chunkOf returns the chunk at the given chunk index.
+//
+// The chunk index must be valid or this method may throw.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()]
}