aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2022-07-07 20:01:21 +0000
committerMichael Knyszek <mknyszek@google.com>2022-07-07 21:15:48 +0000
commit1ebc983000ed411a1c06f6b8a61770be1392e707 (patch)
tree4f6db80ea3b089a54ee34456e2ea5a50d07656b6
parentc177d9d98a7bfb21346f6309c115d0a2bf3167e3 (diff)
downloadgo-1ebc983000ed411a1c06f6b8a61770be1392e707.tar.gz
go-1ebc983000ed411a1c06f6b8a61770be1392e707.zip
runtime: overestimate the amount of allocated memory in heapLive
CL 377516 made it so that memory metrics are truly monotonic, but also updated how heapLive tracked allocated memory to also be monotonic. The result is that cached spans with allocated memory aren't fully accounted for by the GC, causing it to make a worse assumption (the exact mechanism is at this time unknown), resulting in a memory regression, especially for smaller heaps. This change is a partial revert of CL 377516 that makes heapLive a non-monotonic overestimate again, which appears to resolve the regression. For #53738. Change-Id: I5c51067abc0b8e0a6b89dd8dbd4a0be2e8c0c1b2 Reviewed-on: https://go-review.googlesource.com/c/go/+/416417 Reviewed-by: Michael Pratt <mpratt@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
-rw-r--r--src/runtime/mcache.go36
1 files changed, 30 insertions, 6 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index 7c785900db..1f484fb9b6 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -173,10 +173,6 @@ func (c *mcache) refill(spc spanClass) {
bytesAllocated := slotsUsed * int64(s.elemsize)
gcController.totalAlloc.Add(bytesAllocated)
- // Update heapLive and flush scanAlloc.
- gcController.update(bytesAllocated, int64(c.scanAlloc))
- c.scanAlloc = 0
-
// Clear the second allocCount just to be safe.
s.allocCountBeforeCache = 0
}
@@ -198,6 +194,23 @@ func (c *mcache) refill(spc spanClass) {
// Store the current alloc count for accounting later.
s.allocCountBeforeCache = s.allocCount
+ // Update heapLive and flush scanAlloc.
+ //
+ // We have not yet allocated anything new into the span, but we
+ // assume that all of its slots will get used, so this makes
+ // heapLive an overestimate.
+ //
+ // When the span gets uncached, we'll fix up this overestimate
+ // if necessary (see releaseAll).
+ //
+ // We pick an overestimate here because an underestimate leads
+ // the pacer to believe that it's in better shape than it is,
+ // which appears to lead to more memory used. See #53738 for
+ // more details.
+ usedBytes := uintptr(s.allocCount) * s.elemsize
+ gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
+ c.scanAlloc = 0
+
c.alloc[spc] = s
}
@@ -247,6 +260,8 @@ func (c *mcache) releaseAll() {
scanAlloc := int64(c.scanAlloc)
c.scanAlloc = 0
+ sg := mheap_.sweepgen
+ dHeapLive := int64(0)
for i := range c.alloc {
s := c.alloc[i]
if s != &emptymspan {
@@ -262,6 +277,15 @@ func (c *mcache) releaseAll() {
// We assumed earlier that the full span gets allocated.
gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))
+ if s.sweepgen != sg+1 {
+ // refill conservatively counted unallocated slots in gcController.heapLive.
+ // Undo this.
+ //
+ // If this span was cached before sweep, then gcController.heapLive was totally
+ // recomputed since caching this span, so we don't do this for stale spans.
+ dHeapLive -= int64(uintptr(s.nelems)-uintptr(s.allocCount)) * int64(s.elemsize)
+ }
+
// Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s)
c.alloc[i] = &emptymspan
@@ -277,8 +301,8 @@ func (c *mcache) releaseAll() {
c.tinyAllocs = 0
memstats.heapStats.release()
- // Updated heapScan.
- gcController.update(0, scanAlloc)
+ // Update heapLive and heapScan.
+ gcController.update(dHeapLive, scanAlloc)
}
// prepareForSweep flushes c if the system has entered a new sweep phase