aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mgcscavenge.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-12-27 16:48:23 +0000
committerMichael Knyszek <mknyszek@google.com>2020-01-09 18:00:06 +0000
commit8ac98e7b3fcadc497c4ca7d8637ba9578e8159be (patch)
tree7590cfe506165a3232b6457afef41073f75f347d /src/runtime/mgcscavenge.go
parent509592d188f6a30bb65b27a24f4c6f68716ea063 (diff)
downloadgo-8ac98e7b3fcadc497c4ca7d8637ba9578e8159be.tar.gz
go-8ac98e7b3fcadc497c4ca7d8637ba9578e8159be.zip
runtime: add scavtrace debug flag and remove scavenge info from gctrace
Currently, scavenging information is printed if the gctrace debug variable is >0. Scavenging information is also printed naively, for every page scavenged, resulting in a lot of noise when the typical expectation for GC trace is one line per GC. This change adds a new GODEBUG flag called scavtrace which prints scavenge information roughly once per GC cycle and removes any scavenge information from gctrace. The exception is debug.FreeOSMemory, which may force an additional line to be printed. Fixes #32952. Change-Id: I4177dcb85fe3f9653fd74297ea93c97c389c1811 Reviewed-on: https://go-review.googlesource.com/c/go/+/212640 Run-TryBot: Michael Knyszek <mknyszek@google.com> Reviewed-by: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src/runtime/mgcscavenge.go')
-rw-r--r--src/runtime/mgcscavenge.go35
1 files changed, 28 insertions, 7 deletions
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index 8015bf5d19..24c5554b0b 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -264,16 +264,10 @@ func bgscavenge(c chan int) {
// Scavenge one page, and measure the amount of time spent scavenging.
start := nanotime()
released = mheap_.pages.scavengeOne(physPageSize, false)
+ atomic.Xadduintptr(&mheap_.pages.scavReleased, released)
crit = nanotime() - start
})
- if debug.gctrace > 0 {
- if released > 0 {
- print("scvg: ", released>>10, " KB released\n")
- }
- print("scvg: inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
- }
-
if released == 0 {
lock(&scavenge.lock)
scavenge.parked = true
@@ -346,12 +340,39 @@ func (s *pageAlloc) scavenge(nbytes uintptr, locked bool) uintptr {
return released
}
+// printScavTrace prints a scavenge trace line to standard error.
+//
+// released should be the amount of memory released since the last time this
+// was called, and forced indicates whether the scavenge was forced by the
+// application.
+func printScavTrace(released uintptr, forced bool) {
+ printlock()
+ print("scav ",
+ released>>10, " KiB work, ",
+ atomic.Load64(&memstats.heap_released)>>10, " KiB total, ",
+ (atomic.Load64(&memstats.heap_inuse)*100)/heapRetained(), "% util",
+ )
+ if forced {
+ print(" (forced)")
+ }
+ println()
+ printunlock()
+}
+
// resetScavengeAddr sets the scavenge start address to the top of the heap's
// address space. This should be called each time the scavenger's pacing
// changes.
//
// s.mheapLock must be held.
func (s *pageAlloc) resetScavengeAddr() {
+ released := atomic.Loaduintptr(&s.scavReleased)
+ if debug.scavtrace > 0 {
+ printScavTrace(released, false)
+ }
+ // Subtract from scavReleased instead of just setting it to zero because
+ // the scavenger could have increased scavReleased concurrently with the
+ // load above, and we may miss an update by just blindly zeroing the field.
+ atomic.Xadduintptr(&s.scavReleased, -released)
s.scavAddr = chunkBase(s.end) - 1
}