aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mcache.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-11-02 19:03:16 +0000
committerMichael Knyszek <mknyszek@google.com>2020-11-02 21:21:46 +0000
commit39a5ee52b9b41b1e4f4cf821c78ef5b7be68d181 (patch)
tree7c1929e087a1d27118dcad9f607391868dbb32a0 /src/runtime/mcache.go
parentac766e37182f36cd0a3247e44a4143d2d2132e42 (diff)
downloadgo-39a5ee52b9b41b1e4f4cf821c78ef5b7be68d181.tar.gz
go-39a5ee52b9b41b1e4f4cf821c78ef5b7be68d181.zip
runtime: decouple consistent stats from mcache and allow P-less update
This change modifies the consistent stats implementation to keep the per-P sequence counter on each P instead of each mcache. A valid mcache is not available everywhere that we want to call e.g. allocSpan, as per issue #42339. By decoupling these two, we can add a mechanism to allow contexts without a P to update stats consistently. In this CL, we achieve that with a mutex. In practice, it will be very rare for an M to update these stats without a P. Furthermore, the stats reader also only needs to hold the mutex across the update to "gen" since once that changes, writers are free to continue updating the new stats generation. Contention could thus only arise between writers without a P, and as mentioned earlier, those should be rare. A nice side-effect of this change is that the consistent stats acquire and release API becomes simpler. Fixes #42339. Change-Id: Ied74ab256f69abd54b550394c8ad7c4c40a5fe34 Reviewed-on: https://go-review.googlesource.com/c/go/+/267158 Run-TryBot: Michael Knyszek <mknyszek@google.com> Trust: Michael Knyszek <mknyszek@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime/mcache.go')
-rw-r--r--src/runtime/mcache.go16
1 files changed, 6 insertions, 10 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index 847a5dedf3..bb7475b6f3 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -50,10 +50,6 @@ type mcache struct {
// in this mcache are stale and need to the flushed so they
// can be swept. This is done in acquirep.
flushGen uint32
-
- // statsSeq is a counter indicating whether this P is currently
- // writing any stats. Its value is even when not, odd when it is.
- statsSeq uint32
}
// A gclink is a node in a linked list of blocks, like mlink,
@@ -178,9 +174,9 @@ func (c *mcache) refill(spc spanClass) {
// Assume all objects from this span will be allocated in the
// mcache. If it gets uncached, we'll adjust this.
- stats := memstats.heapStats.acquire(c)
+ stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
- memstats.heapStats.release(c)
+ memstats.heapStats.release()
// Update heap_live with the same assumption.
usedBytes := uintptr(s.allocCount) * s.elemsize
@@ -229,10 +225,10 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
if s == nil {
throw("out of memory")
}
- stats := memstats.heapStats.acquire(c)
+ stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize)
atomic.Xadduintptr(&stats.largeAllocCount, 1)
- memstats.heapStats.release(c)
+ memstats.heapStats.release()
// Update heap_live and revise pacing if needed.
atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
@@ -263,9 +259,9 @@ func (c *mcache) releaseAll() {
if s != &emptymspan {
// Adjust nsmallalloc in case the span wasn't fully allocated.
n := uintptr(s.nelems) - uintptr(s.allocCount)
- stats := memstats.heapStats.acquire(c)
+ stats := memstats.heapStats.acquire()
atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
- memstats.heapStats.release(c)
+ memstats.heapStats.release()
if s.sweepgen != sg+1 {
// refill conservatively counted unallocated slots in heap_live.
// Undo this.