aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mheap.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-11-21 17:05:14 +0000
committerMichael Knyszek <mknyszek@google.com>2020-05-08 16:24:40 +0000
commit55ec5182d7b84eb2461c495a55984162b23f3df8 (patch)
tree87d24203820c776f090d73e93fd931cbfdcffdb4 /src/runtime/mheap.go
parentb1a48af7e8ee87cc46e1bbb07f81ac4853e0f27b (diff)
downloadgo-55ec5182d7b84eb2461c495a55984162b23f3df8.tar.gz
go-55ec5182d7b84eb2461c495a55984162b23f3df8.zip
runtime: remove scavAddr in favor of address ranges
This change removes the concept of s.scavAddr in favor of explicitly reserving and unreserving address ranges. s.scavAddr has several problems with raciness that can cause the scavenger to miss updates, or move it back unnecessarily, forcing future scavenge calls to iterate over searched address space unnecessarily. This change achieves this by replacing scavAddr with a second addrRanges which is cloned from s.inUse at the end of each sweep phase. Ranges from this second addrRanges are then reserved by scavengers (with the reservation size proportional to the heap size) who are then able to safely iterate over those ranges without worry of another scavenger coming in. Fixes #35788. Change-Id: Ief01ae170384174875118742f6c26b2a41cbb66d Reviewed-on: https://go-review.googlesource.com/c/go/+/208378 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mheap.go')
-rw-r--r--src/runtime/mheap.go20
1 files changed, 11 insertions, 9 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 558ff1f689..3f57b0b6e9 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -98,7 +98,7 @@ type mheap struct {
// For !go115NewMCentralImpl.
sweepSpans [2]gcSweepBuf
- // _ uint32 // align uint64 fields on 32-bit for atomics
+ _ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep
//
@@ -1389,7 +1389,7 @@ func (h *mheap) grow(npage uintptr) bool {
if overage := uintptr(retained + uint64(totalGrowth) - h.scavengeGoal); todo > overage {
todo = overage
}
- h.pages.scavenge(todo, true)
+ h.pages.scavenge(todo, false)
}
return true
}
@@ -1473,9 +1473,9 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
h.freeMSpanLocked(s)
}
-// scavengeAll visits each node in the free treap and scavenges the
-// treapNode's span. It then removes the scavenged span from
-// unscav and adds it into scav before continuing.
+// scavengeAll acquires the heap lock (blocking any additional
+// manipulation of the page allocator) and iterates over the whole
+// heap, scavenging every free page available.
func (h *mheap) scavengeAll() {
// Disallow malloc or panic while holding the heap lock. We do
// this here because this is a non-mallocgc entry-point to
@@ -1483,14 +1483,16 @@ func (h *mheap) scavengeAll() {
gp := getg()
gp.m.mallocing++
lock(&h.lock)
- // Reset the scavenger address so we have access to the whole heap.
- h.pages.resetScavengeAddr()
- released := h.pages.scavenge(^uintptr(0), true)
+ // Start a new scavenge generation so we have a chance to walk
+ // over the whole heap.
+ h.pages.scavengeStartGen()
+ released := h.pages.scavenge(^uintptr(0), false)
+ gen := h.pages.scav.gen
unlock(&h.lock)
gp.m.mallocing--
if debug.scavtrace > 0 {
- printScavTrace(released, true)
+ printScavTrace(gen, released, true)
}
}