aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mheap.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-11-02 19:03:16 +0000
committerMichael Knyszek <mknyszek@google.com>2020-11-02 21:21:46 +0000
commit39a5ee52b9b41b1e4f4cf821c78ef5b7be68d181 (patch)
tree7c1929e087a1d27118dcad9f607391868dbb32a0 /src/runtime/mheap.go
parentac766e37182f36cd0a3247e44a4143d2d2132e42 (diff)
downloadgo-39a5ee52b9b41b1e4f4cf821c78ef5b7be68d181.tar.gz
go-39a5ee52b9b41b1e4f4cf821c78ef5b7be68d181.zip
runtime: decouple consistent stats from mcache and allow P-less update
This change modifies the consistent stats implementation to keep the per-P sequence counter on each P instead of each mcache. A valid mcache is not available everywhere that we want to call e.g. allocSpan, as per issue #42339. By decoupling these two, we can add a mechanism to allow contexts without a P to update stats consistently. In this CL, we achieve that with a mutex. In practice, it will be very rare for an M to update these stats without a P. Furthermore, the stats reader also only needs to hold the mutex across the update to "gen" since once that changes, writers are free to continue updating the new stats generation. Contention could thus only arise between writers without a P, and as mentioned earlier, those should be rare. A nice side-effect of this change is that the consistent stats acquire and release API becomes simpler. Fixes #42339. Change-Id: Ied74ab256f69abd54b550394c8ad7c4c40a5fe34 Reviewed-on: https://go-review.googlesource.com/c/go/+/267158 Run-TryBot: Michael Knyszek <mknyszek@google.com> Trust: Michael Knyszek <mknyszek@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime/mheap.go')
-rw-r--r--src/runtime/mheap.go27
1 files changed, 6 insertions, 21 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 6b29f34a82..b8429eee94 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1246,12 +1246,7 @@ HaveSpan:
memstats.heap_sys.add(-int64(nbytes))
}
// Update consistent stats.
- c := getMCache()
- if c == nil {
- // TODO(mknyszek): Remove this and handle this case to fix #42339.
- throw("allocSpan called without P or outside bootstrapping")
- }
- stats := memstats.heapStats.acquire(c)
+ stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.committed, int64(scav))
atomic.Xaddint64(&stats.released, -int64(scav))
switch typ {
@@ -1264,7 +1259,7 @@ HaveSpan:
case spanAllocWorkBuf:
atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes))
}
- memstats.heapStats.release(c)
+ memstats.heapStats.release()
// Publish the span in various locations.
@@ -1344,14 +1339,9 @@ func (h *mheap) grow(npage uintptr) bool {
// size which is always > physPageSize, so its safe to
// just add directly to heap_released.
atomic.Xadd64(&memstats.heap_released, int64(asize))
- c := getMCache()
- if c == nil {
- // TODO(mknyszek): Remove this and handle this case to fix #42339.
- throw("grow called without P or outside bootstrapping")
- }
- stats := memstats.heapStats.acquire(c)
+ stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.released, int64(asize))
- memstats.heapStats.release(c)
+ memstats.heapStats.release()
// Recalculate nBase.
// We know this won't overflow, because sysAlloc returned
@@ -1447,12 +1437,7 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
memstats.heap_sys.add(int64(nbytes))
}
// Update consistent stats.
- c := getMCache()
- if c == nil {
- // TODO(mknyszek): Remove this and handle this case to fix #42339.
- throw("freeSpanLocked called without P or outside bootstrapping")
- }
- stats := memstats.heapStats.acquire(c)
+ stats := memstats.heapStats.acquire()
switch typ {
case spanAllocHeap:
atomic.Xaddint64(&stats.inHeap, -int64(nbytes))
@@ -1463,7 +1448,7 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
case spanAllocWorkBuf:
atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes))
}
- memstats.heapStats.release(c)
+ memstats.heapStats.release()
// Mark the space as free.
h.pages.free(s.base(), s.npages)