aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2015-08-03 09:25:23 -0400
committerAustin Clements <austin@google.com>2015-08-04 18:54:41 +0000
commite30c6d64bac1b5a31a7062dff89744332bebc23e (patch)
tree15f062d0daa110781b4e090b78801d0451a14653
parentfb5230af8a0d416fa69049f71eb1271c053e7b8c (diff)
downloadgo-e30c6d64bac1b5a31a7062dff89744332bebc23e.tar.gz
go-e30c6d64bac1b5a31a7062dff89744332bebc23e.zip
runtime: always give concurrent sweep some heap distance
Currently it's possible for the next_gc heap size trigger computed for the next GC cycle to be less than the current allocated heap size. This means the next cycle will start immediately, which means there's no time to perform the concurrent sweep between GC cycles. This places responsibility for finishing the sweep on GC itself, which delays GC start-up and hence delays mutator assist. Fix this by ensuring that next_gc is always at least a little higher than the allocated heap size, so we won't trigger the next cycle instantly. Updates #11911. Change-Id: I74f0b887bf187518d5fedffc7989817cbcf30592 Reviewed-on: https://go-review.googlesource.com/13043 Reviewed-by: Rick Hudson <rlh@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
-rw-r--r--src/runtime/mgc.go20
1 files changed, 20 insertions, 0 deletions
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index de9f4f51fd..f308530c5c 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -145,6 +145,11 @@ const (
// bookkeeping will use a large amount of each stack.
firstStackBarrierOffset = 1024
debugStackBarrier = false
+
+ // sweepMinHeapDistance is a lower bound on the heap distance
+ // (in bytes) reserved for concurrent sweeping between GC
+ // cycles. This will be scaled by gcpercent/100.
+ sweepMinHeapDistance = 1024 * 1024
)
// heapminimum is the minimum heap size at which to trigger GC.
@@ -1489,6 +1494,21 @@ func gcMark(start_time int64) {
memstats.heap_marked = work.bytesMarked
memstats.heap_scan = uint64(gcController.scanWork)
+ minNextGC := memstats.heap_live + sweepMinHeapDistance*uint64(gcpercent)/100
+ if memstats.next_gc < minNextGC {
+ // The allocated heap is already past the trigger.
+ // This can happen if the triggerRatio is very low and
+ // the reachable heap estimate is less than the live
+ // heap size.
+ //
+ // Concurrent sweep happens in the heap growth from
+ // heap_live to next_gc, so bump next_gc up to ensure
+ // that concurrent sweep has some heap growth in which
+ // to perform sweeping before we start the next GC
+ // cycle.
+ memstats.next_gc = minNextGC
+ }
+
if trace.enabled {
traceHeapAlloc()
traceNextGC()