aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/export_test.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-09-09 16:52:18 +0000
committerMichael Knyszek <mknyszek@google.com>2020-09-09 17:48:56 +0000
commit34835df04891a1d54394888b763af88f9476101d (patch)
tree6e903f81e4021e9d149a80b398c16918edfb0fd9 /src/runtime/export_test.go
parent9ef3ee339634d1a349e7b9bb4cae32aacc326f61 (diff)
downloadgo-34835df04891a1d54394888b763af88f9476101d.tar.gz
go-34835df04891a1d54394888b763af88f9476101d.zip
runtime: fix ReadMemStatsSlow's and CheckScavengedBits' chunk iteration
Both ReadMemStatsSlow and CheckScavengedBits iterate over the page allocator's chunks but don't actually check if they exist. During the development process the chunks index became sparse, so now this was a possibility. If the runtime tests' heap is sparse we might end up segfaulting in either one of these functions, though this will generally be very rare. The pattern here to return nil for a nonexistent chunk is also useful elsewhere, so this change introduces tryChunkOf which won't throw, but might return nil. It also updates the documentation of chunkOf. Fixes #41296. Change-Id: Id5ae0ca3234480de1724fdf2e3677eeedcf76fa0 Reviewed-on: https://go-review.googlesource.com/c/go/+/253777 Run-TryBot: Michael Knyszek <mknyszek@google.com> Reviewed-by: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src/runtime/export_test.go')
-rw-r--r--src/runtime/export_test.go17
1 files changed, 10 insertions, 7 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 3307000c51..929bb35db6 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -358,7 +358,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
+ pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -756,11 +760,7 @@ func (p *PageAlloc) InUse() []AddrRange {
// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
ci := chunkIdx(i)
- l2 := (*pageAlloc)(p).chunks[ci.l1()]
- if l2 == nil {
- return nil
- }
- return (*PallocData)(&l2[ci.l2()])
+ return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
}
// AddrRange represents a range over addresses.
@@ -900,7 +900,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := mheap_.pages.chunkOf(i)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.