aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-08-03 20:35:40 +0000
committerMichael Knyszek <mknyszek@google.com>2020-10-26 18:28:20 +0000
commitf77a9025f1e4bf4bb3e2b582d13cce5f19c1ca51 (patch)
tree0d32f60f00704a0ce70dab1e1d6bebf2902bbd63
parentfe7ff71185cf30f9bdee3e8d8897e8b6069ad02e (diff)
downloadgo-f77a9025f1e4bf4bb3e2b582d13cce5f19c1ca51.tar.gz
go-f77a9025f1e4bf4bb3e2b582d13cce5f19c1ca51.zip
runtime: replace some memstats with consistent stats
This change replaces stacks_inuse, gcWorkBufInUse and gcProgPtrScalarBitsInUse with their corresponding consistent stats. It also adds checks to make sure the rest of the sharded stats line up with existing stats in updatememstats. Change-Id: I17d0bd181aedb5c55e09c8dff18cef5b2a3a14e3 Reviewed-on: https://go-review.googlesource.com/c/go/+/247038 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Trust: Michael Knyszek <mknyszek@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
-rw-r--r--src/runtime/mheap.go18
-rw-r--r--src/runtime/mstats.go73
2 files changed, 62 insertions, 29 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index d17b6fa284..14a73c0491 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1225,15 +1225,8 @@ HaveSpan:
atomic.Xadd64(&memstats.heap_released, -int64(scav))
}
// Update stats.
- switch typ {
- case spanAllocHeap:
+ if typ == spanAllocHeap {
atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
- case spanAllocStack:
- atomic.Xadd64(&memstats.stacks_inuse, int64(nbytes))
- case spanAllocWorkBuf:
- atomic.Xadd64(&memstats.gcWorkBufInUse, int64(nbytes))
- case spanAllocPtrScalarBits:
- atomic.Xadd64(&memstats.gcProgPtrScalarBitsInUse, int64(nbytes))
}
if typ.manual() {
// Manually managed memory doesn't count toward heap_sys.
@@ -1421,15 +1414,8 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
//
// Mirrors the code in allocSpan.
nbytes := s.npages * pageSize
- switch typ {
- case spanAllocHeap:
+ if typ == spanAllocHeap {
atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
- case spanAllocStack:
- atomic.Xadd64(&memstats.stacks_inuse, -int64(nbytes))
- case spanAllocWorkBuf:
- atomic.Xadd64(&memstats.gcWorkBufInUse, -int64(nbytes))
- case spanAllocPtrScalarBits:
- atomic.Xadd64(&memstats.gcProgPtrScalarBitsInUse, -int64(nbytes))
}
if typ.manual() {
// Manually managed memory doesn't count toward heap_sys, so add it back.
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index 76546c0f0c..4363eff1e0 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -40,19 +40,25 @@ type mstats struct {
// computed on the fly by updatememstats.
heap_objects uint64 // total number of allocated objects
+ // Statistics about stacks.
+ stacks_inuse uint64 // bytes in manually-managed stack spans; computed by updatememstats
+ stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
+
// Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks.
- stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
- stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
- mspan_inuse uint64 // mspan structures
- mspan_sys sysMemStat
- mcache_inuse uint64 // mcache structures
- mcache_sys sysMemStat
- buckhash_sys sysMemStat // profiling bucket hash table
- gcWorkBufInUse uint64 // updated atomically or during STW
- gcProgPtrScalarBitsInUse uint64 // updated atomically or during STW
+ mspan_inuse uint64 // mspan structures
+ mspan_sys sysMemStat
+ mcache_inuse uint64 // mcache structures
+ mcache_sys sysMemStat
+ buckhash_sys sysMemStat // profiling bucket hash table
+
+ // Statistics about GC overhead.
+ gcWorkBufInUse uint64 // computed by updatememstats
+ gcProgPtrScalarBitsInUse uint64 // computed by updatememstats
gcMiscSys sysMemStat // updated atomically or during STW
- other_sys sysMemStat // updated atomically or during STW
+
+ // Miscellaneous statistics.
+ other_sys sysMemStat // updated atomically or during STW
// Statistics about the garbage collector.
@@ -577,6 +583,10 @@ func readGCStats_m(pauses *[]uint64) {
*pauses = p[:n+n+3]
}
+// Updates the memstats structure.
+//
+// The world must be stopped.
+//
//go:nowritebarrier
func updatememstats() {
// Flush mcaches to mcentral before doing anything else.
@@ -591,9 +601,6 @@ func updatememstats() {
memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gcMiscSys.load() +
memstats.other_sys.load()
- // We also count stacks_inuse, gcWorkBufInUse, and gcProgPtrScalarBitsInUse as sys memory.
- memstats.sys += memstats.stacks_inuse + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
-
// Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory.
// Current number of alive objects in the heap and amount of alive heap memory
@@ -641,6 +648,9 @@ func updatememstats() {
smallFree += uint64(c.smallFreeCount[i]) * uint64(class_to_size[i])
}
}
+ // Collect consistent stats, which are the source-of-truth in the some cases.
+ var consStats heapStatsDelta
+ memstats.heapStats.unsafeRead(&consStats)
totalFree += smallFree
@@ -651,6 +661,43 @@ func updatememstats() {
memstats.total_alloc = totalAlloc
memstats.alloc = totalAlloc - totalFree
memstats.heap_objects = memstats.nmalloc - memstats.nfree
+
+ memstats.stacks_inuse = uint64(consStats.inStacks)
+ memstats.gcWorkBufInUse = uint64(consStats.inWorkBufs)
+ memstats.gcProgPtrScalarBitsInUse = uint64(consStats.inPtrScalarBits)
+
+ // We also count stacks_inuse, gcWorkBufInUse, and gcProgPtrScalarBitsInUse as sys memory.
+ memstats.sys += memstats.stacks_inuse + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
+
+ // The world is stopped, so the consistent stats (after aggregation)
+ // should be identical to some combination of memstats. In particular:
+ //
+ // * heap_inuse == inHeap
+ // * heap_released == released
+ // * heap_sys - heap_released == committed - inStacks - inWorkBufs - inPtrScalarBits
+ //
+ // Check if that's actually true.
+ //
+ // TODO(mknyszek): Maybe don't throw here. It would be bad if a
+ // bug in otherwise benign accounting caused the whole application
+ // to crash.
+ if memstats.heap_inuse != uint64(consStats.inHeap) {
+ print("runtime: heap_inuse=", memstats.heap_inuse, "\n")
+ print("runtime: consistent value=", consStats.inHeap, "\n")
+ throw("heap_inuse and consistent stats are not equal")
+ }
+ if memstats.heap_released != uint64(consStats.released) {
+ print("runtime: heap_released=", memstats.heap_released, "\n")
+ print("runtime: consistent value=", consStats.released, "\n")
+ throw("heap_released and consistent stats are not equal")
+ }
+ globalRetained := memstats.heap_sys.load() - memstats.heap_released
+ consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits)
+ if globalRetained != consRetained {
+ print("runtime: global value=", globalRetained, "\n")
+ print("runtime: consistent value=", consRetained, "\n")
+ throw("measures of the retained heap are not equal")
+ }
}
// flushmcache flushes the mcache of allp[i].