aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-09-03 19:54:32 +0000
committerAndrew Bonventre <andybons@golang.org>2019-10-04 16:55:09 +0000
commitcd951aeec4e658b3a1f05362c9655791427d6de6 (patch)
tree5433bc8f62b8bbebcb3b20aefd27bfbe3ac7f055
parent951dbb1e5b264a253fe99fe5543c6e626ea33f96 (diff)
downloadgo-cd951aeec4e658b3a1f05362c9655791427d6de6.tar.gz
go-cd951aeec4e658b3a1f05362c9655791427d6de6.zip
[release-branch.go1.13] runtime: redefine scavenge goal in terms of heap_inuse
This change makes it so that the scavenge goal is defined primarily in terms of heap_inuse at the end of the last GC rather than next_gc. The reason behind this change is that next_gc doesn't take into account fragmentation, and we can fall into situation where the scavenger thinks it should have work to do but there's no free and unscavenged memory available. In order to ensure the scavenge goal still tracks next_gc, we multiply heap_inuse by the ratio between the current heap goal and the last heap goal, which describes whether the heap is growing or shrinking, and by how much. Finally, this change updates the documentation for scavenging and elaborates on why the scavenge goal is defined the way it is. Fixes #34149 Change-Id: I8deaf87620b5dc12a40ab8a90bf27932868610da Reviewed-on: https://go-review.googlesource.com/c/go/+/193040 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Keith Randall <khr@golang.org> (cherry picked from commit 9b30811280427a6d50d2558f316d62210e948656) Reviewed-on: https://go-review.googlesource.com/c/go/+/198487 Run-TryBot: Andrew Bonventre <andybons@golang.org>
-rw-r--r--src/runtime/mgc.go4
-rw-r--r--src/runtime/mgcscavenge.go40
-rw-r--r--src/runtime/mstats.go2
3 files changed, 42 insertions, 4 deletions
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 8ad2198e9f..8a6b860c96 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -1651,6 +1651,10 @@ func gcMarkTermination(nextTriggerRatio float64) {
throw("gc done but gcphase != _GCoff")
}
+ // Record next_gc and heap_inuse for scavenger.
+ memstats.last_next_gc = memstats.next_gc
+ memstats.last_heap_inuse = memstats.heap_inuse
+
// Update GC trigger and pacing for the next cycle.
gcSetTriggerRatio(nextTriggerRatio)
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index e9010eb98a..c4394a3d60 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -17,7 +17,29 @@
// scavenger's primary goal is to bring the estimated heap RSS of the
// application down to a goal.
//
-// That goal is defined as (retainExtraPercent+100) / 100 * next_gc.
+// That goal is defined as:
+// (retainExtraPercent+100) / 100 * (next_gc / last_next_gc) * last_heap_inuse
+//
+// Essentially, we wish to have the application's RSS track the heap goal, but
+// the heap goal is defined in terms of bytes of objects, rather than pages like
+// RSS. As a result, we need to take into account for fragmentation internal to
+// spans. next_gc / last_next_gc defines the ratio between the current heap goal
+// and the last heap goal, which tells us by how much the heap is growing and
+// shrinking. We estimate what the heap will grow to in terms of pages by taking
+// this ratio and multiplying it by heap_inuse at the end of the last GC, which
+// allows us to account for this additional fragmentation. Note that this
+// procedure makes the assumption that the degree of fragmentation won't change
+// dramatically over the next GC cycle. Overestimating the amount of
+// fragmentation simply results in higher memory use, which will be accounted
+// for by the next pacing up date. Underestimating the fragmentation however
+// could lead to performance degradation. Handling this case is not within the
+// scope of the scavenger. Situations where the amount of fragmentation balloons
+// over the course of a single GC cycle should be considered pathologies,
+// flagged as bugs, and fixed appropriately.
+//
+// An additional factor of retainExtraPercent is added as a buffer to help ensure
+// that there's more unscavenged memory to allocate out of, since each allocation
+// out of scavenged memory incurs a potentially expensive page fault.
//
// The goal is updated after each GC and the scavenger's pacing parameters
// (which live in mheap_) are updated to match. The pacing parameters work much
@@ -81,14 +103,24 @@ func heapRetained() uint64 {
//
// mheap_.lock must be held or the world must be stopped.
func gcPaceScavenger() {
- // Compute our scavenging goal and align it to a physical page boundary
- // to make the following calculations more exact.
- retainedGoal := memstats.next_gc
+ // If we're called before the first GC completed, disable scavenging.
+ // We never scavenge before the 2nd GC cycle anyway (we don't have enough
+ // information about the heap yet) so this is fine, and avoids a fault
+ // or garbage data later.
+ if memstats.last_next_gc == 0 {
+ mheap_.scavengeBytesPerNS = 0
+ return
+ }
+ // Compute our scavenging goal.
+ goalRatio := float64(memstats.next_gc) / float64(memstats.last_next_gc)
+ retainedGoal := uint64(float64(memstats.last_heap_inuse) * goalRatio)
// Add retainExtraPercent overhead to retainedGoal. This calculation
// looks strange but the purpose is to arrive at an integer division
// (e.g. if retainExtraPercent = 12.5, then we get a divisor of 8)
// that also avoids the overflow from a multiplication.
retainedGoal += retainedGoal / (1.0 / (retainExtraPercent / 100.0))
+ // Align it to a physical page boundary to make the following calculations
+ // a bit more exact.
retainedGoal = (retainedGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1)
// Represents where we are now in the heap's contribution to RSS in bytes.
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index 421580eec3..09dbb26735 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -79,6 +79,8 @@ type mstats struct {
last_gc_nanotime uint64 // last gc (monotonic time)
tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
+ last_next_gc uint64 // next_gc for the previous GC
+ last_heap_inuse uint64 // heap_inuse at mark termination of the previous GC
// triggerRatio is the heap growth ratio that triggers marking.
//