aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mstats.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2021-03-31 22:55:06 +0000
committerMichael Knyszek <mknyszek@google.com>2021-04-13 23:42:29 +0000
commitf2d5bd1ad306e87804d600d92105dc37279af83f (patch)
tree3469c5e88ec761a320877fba1e3aed6e21f0ae7f /src/runtime/mstats.go
parent8c2a8b1771cd7ed2182f4d03b3c4bd09828315ce (diff)
downloadgo-f2d5bd1ad306e87804d600d92105dc37279af83f.tar.gz
go-f2d5bd1ad306e87804d600d92105dc37279af83f.zip
runtime: move internal GC statistics from memstats to gcController
This change moves certain important but internal-only GC statistics from memstats into gcController. These statistics are mainly used in pacing the GC, so it makes sense to keep them in the pacer's state. This CL was mostly generated via rf ' ex . { memstats.gc_trigger -> gcController.trigger memstats.triggerRatio -> gcController.triggerRatio memstats.heap_marked -> gcController.heapMarked memstats.heap_live -> gcController.heapLive memstats.heap_scan -> gcController.heapScan } ' except for a few special cases, like updating names in comments and when these fields are used within gcControllerState methods (at which point they're accessed through the reciever). For #44167. Change-Id: I6bd1602585aeeb80818ded24c07d8e6fec992b93 Reviewed-on: https://go-review.googlesource.com/c/go/+/306598 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime/mstats.go')
-rw-r--r--src/runtime/mstats.go65
1 files changed, 1 insertions, 64 deletions
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index 6defaedabe..cd9359bc91 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -62,7 +62,7 @@ type mstats struct {
// Statistics about the garbage collector.
- // next_gc is the goal heap_live for when next GC ends.
+ // next_gc is the goal gcController.heapLive for when next GC ends.
// Set to ^uint64(0) if disabled.
//
// Read and written atomically, unless the world is stopped.
@@ -96,65 +96,6 @@ type mstats struct {
last_next_gc uint64 // next_gc for the previous GC
last_heap_inuse uint64 // heap_inuse at mark termination of the previous GC
- // triggerRatio is the heap growth ratio that triggers marking.
- //
- // E.g., if this is 0.6, then GC should start when the live
- // heap has reached 1.6 times the heap size marked by the
- // previous cycle. This should be ≤ GOGC/100 so the trigger
- // heap size is less than the goal heap size. This is set
- // during mark termination for the next cycle's trigger.
- triggerRatio float64
-
- // gc_trigger is the heap size that triggers marking.
- //
- // When heap_live ≥ gc_trigger, the mark phase will start.
- // This is also the heap size by which proportional sweeping
- // must be complete.
- //
- // This is computed from triggerRatio during mark termination
- // for the next cycle's trigger.
- gc_trigger uint64
-
- // heap_live is the number of bytes considered live by the GC.
- // That is: retained by the most recent GC plus allocated
- // since then. heap_live <= alloc, since alloc includes unmarked
- // objects that have not yet been swept (and hence goes up as we
- // allocate and down as we sweep) while heap_live excludes these
- // objects (and hence only goes up between GCs).
- //
- // This is updated atomically without locking. To reduce
- // contention, this is updated only when obtaining a span from
- // an mcentral and at this point it counts all of the
- // unallocated slots in that span (which will be allocated
- // before that mcache obtains another span from that
- // mcentral). Hence, it slightly overestimates the "true" live
- // heap size. It's better to overestimate than to
- // underestimate because 1) this triggers the GC earlier than
- // necessary rather than potentially too late and 2) this
- // leads to a conservative GC rate rather than a GC rate that
- // is potentially too low.
- //
- // Reads should likewise be atomic (or during STW).
- //
- // Whenever this is updated, call traceHeapAlloc() and
- // gcController.revise().
- heap_live uint64
-
- // heap_scan is the number of bytes of "scannable" heap. This
- // is the live heap (as counted by heap_live), but omitting
- // no-scan objects and no-scan tails of objects.
- //
- // Whenever this is updated, call gcController.revise().
- //
- // Read and written atomically or with the world stopped.
- heap_scan uint64
-
- // heap_marked is the number of bytes marked by the previous
- // GC. After mark termination, heap_live == heap_marked, but
- // unlike heap_live, heap_marked does not change until the
- // next mark termination.
- heap_marked uint64
-
// heapStats is a set of statistics
heapStats consistentHeapStats
@@ -443,10 +384,6 @@ type MemStats struct {
}
func init() {
- if offset := unsafe.Offsetof(memstats.heap_live); offset%8 != 0 {
- println(offset)
- throw("memstats.heap_live not aligned to 8 bytes")
- }
if offset := unsafe.Offsetof(memstats.heapStats); offset%8 != 0 {
println(offset)
throw("memstats.heapStats not aligned to 8 bytes")