diff options
author | Michael Anthony Knyszek <mknyszek@google.com> | 2022-06-09 18:25:01 +0000 |
---|---|---|
committer | Heschi Kreinick <heschi@google.com> | 2022-07-06 18:37:56 +0000 |
commit | b1be664d64750bccd5081d51b585036c931b5cf0 (patch) | |
tree | 7b498af84a47647c6c750b13cad1c17b3ead0521 /src/runtime/mcache.go | |
parent | 77cc1c0defaf4cd6c38221504b29ad125aae9ac7 (diff) | |
download | go-b1be664d64750bccd5081d51b585036c931b5cf0.tar.gz go-b1be664d64750bccd5081d51b585036c931b5cf0.zip |
[release-branch.go1.17] runtime: store consistent total allocation stats as uint64
Currently the consistent total allocation stats are managed as uintptrs,
which means they can easily overflow on 32-bit systems. Fix this by
storing these stats as uint64s. This will cause some minor performance
degradation on 32-bit systems, but there really isn't a way around this,
and it affects the correctness of the metrics we export.
For #52680.
Fixes #52688.
Change-Id: I8b1926116e899ae9f03d58e0320bcb9264945b3e
Reviewed-on: https://go-review.googlesource.com/c/go/+/411496
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime/mcache.go')
-rw-r--r-- | src/runtime/mcache.go | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index a9e959109a..99303358be 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -175,11 +175,11 @@ func (c *mcache) refill(spc spanClass) { // Assume all objects from this span will be allocated in the // mcache. If it gets uncached, we'll adjust this. stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount)) + atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], int64(s.nelems)-int64(s.allocCount)) // Flush tinyAllocs. if spc == tinySpanClass { - atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs) + atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) c.tinyAllocs = 0 } memstats.heapStats.release() @@ -229,8 +229,8 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) (*mspan, b throw("out of memory") } stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize) - atomic.Xadduintptr(&stats.largeAllocCount, 1) + atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize)) + atomic.Xadd64(&stats.largeAllocCount, 1) memstats.heapStats.release() // Update gcController.heapLive and revise pacing if needed. @@ -261,9 +261,9 @@ func (c *mcache) releaseAll() { s := c.alloc[i] if s != &emptymspan { // Adjust nsmallalloc in case the span wasn't fully allocated. - n := uintptr(s.nelems) - uintptr(s.allocCount) + n := int64(s.nelems) - int64(s.allocCount) stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n) + atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], -n) memstats.heapStats.release() if s.sweepgen != sg+1 { // refill conservatively counted unallocated slots in gcController.heapLive. @@ -273,7 +273,7 @@ func (c *mcache) releaseAll() { // gcController.heapLive was totally recomputed since // caching this span, so we don't do this for // stale spans. - atomic.Xadd64(&gcController.heapLive, -int64(n)*int64(s.elemsize)) + atomic.Xadd64(&gcController.heapLive, -n*int64(s.elemsize)) } // Release the span to the mcentral. mheap_.central[i].mcentral.uncacheSpan(s) @@ -286,7 +286,7 @@ func (c *mcache) releaseAll() { // Flush tinyAllocs. stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs) + atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) c.tinyAllocs = 0 memstats.heapStats.release() |