aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-09-09 16:52:18 +0000
committerMichael Knyszek <mknyszek@google.com>2020-09-09 17:48:56 +0000
commit34835df04891a1d54394888b763af88f9476101d (patch)
tree6e903f81e4021e9d149a80b398c16918edfb0fd9
parent9ef3ee339634d1a349e7b9bb4cae32aacc326f61 (diff)
downloadgo-34835df04891a1d54394888b763af88f9476101d.tar.gz
go-34835df04891a1d54394888b763af88f9476101d.zip
runtime: fix ReadMemStatsSlow's and CheckScavengedBits' chunk iteration
Both ReadMemStatsSlow and CheckScavengedBits iterate over the page allocator's chunks but don't actually check if they exist. During the development process the chunks index became sparse, so now this was a possibility. If the runtime tests' heap is sparse we might end up segfaulting in either one of these functions, though this will generally be very rare. The pattern here to return nil for a nonexistent chunk is also useful elsewhere, so this change introduces tryChunkOf which won't throw, but might return nil. It also updates the documentation of chunkOf. Fixes #41296. Change-Id: Id5ae0ca3234480de1724fdf2e3677eeedcf76fa0 Reviewed-on: https://go-review.googlesource.com/c/go/+/253777 Run-TryBot: Michael Knyszek <mknyszek@google.com> Reviewed-by: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org>
-rw-r--r--src/runtime/export_test.go17
-rw-r--r--src/runtime/mpagealloc.go13
2 files changed, 23 insertions, 7 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 3307000c51..929bb35db6 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -358,7 +358,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
+ pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -756,11 +760,7 @@ func (p *PageAlloc) InUse() []AddrRange {
// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
ci := chunkIdx(i)
- l2 := (*pageAlloc)(p).chunks[ci.l1()]
- if l2 == nil {
- return nil
- }
- return (*PallocData)(&l2[ci.l2()])
+ return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
}
// AddrRange represents a range over addresses.
@@ -900,7 +900,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := mheap_.pages.chunkOf(i)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 8b3c62c375..c90a6378bd 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
s.scav.scavLWM = maxSearchAddr
}
+// tryChunkOf returns the bitmap data for the given chunk.
+//
+// Returns nil if the chunk data has not been mapped.
+func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
+ l2 := s.chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return &l2[ci.l2()]
+}
+
// chunkOf returns the chunk at the given chunk index.
+//
+// The chunk index must be valid or this method may throw.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()]
}