aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-06-25 19:06:57 +0000
committerAndrew Bonventre <andybons@golang.org>2019-10-04 17:11:45 +0000
commit66b8787f6bb461838dbf482d021c30daeef6dfa6 (patch)
tree7193e9a8cb4fb7c2acfc51108241d8f0611f3a48
parentcd951aeec4e658b3a1f05362c9655791427d6de6 (diff)
downloadgo-66b8787f6bb461838dbf482d021c30daeef6dfa6.tar.gz
go-66b8787f6bb461838dbf482d021c30daeef6dfa6.zip
[release-branch.go1.13] runtime: scavenge on growth instead of inline with allocation
Inline scavenging causes significant performance regressions in tail latency for k8s and has relatively little benefit for RSS footprint. We disabled inline scavenging in Go 1.12.5 (CL 174102) as well, but we thought other changes in Go 1.13 had mitigated the issues with inline scavenging. Apparently we were wrong. This CL switches back to only doing foreground scavenging on heap growth, rather than doing it when allocation tries to allocate from scavenged space. Fixes #34556 Change-Id: I1f5df44046091f0b4f89fec73c2cde98bf9448cb Reviewed-on: https://go-review.googlesource.com/c/go/+/183857 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com> (cherry picked from commit eb96f8a57444d174bba500b3a5d2a8b21b7e6d1e) Reviewed-on: https://go-review.googlesource.com/c/go/+/198486 Reviewed-by: Austin Clements <austin@google.com> Run-TryBot: Andrew Bonventre <andybons@golang.org>
-rw-r--r--src/runtime/mheap.go14
1 files changed, 4 insertions, 10 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 6721f3a8bb..3807050cbe 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1227,16 +1227,6 @@ HaveSpan:
// heap_released since we already did so earlier.
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
s.scavenged = false
-
- // Since we allocated out of a scavenged span, we just
- // grew the RSS. Mitigate this by scavenging enough free
- // space to make up for it but only if we need to.
- //
- // scavengeLocked may cause coalescing, so prevent
- // coalescing with s by temporarily changing its state.
- s.state = mSpanManual
- h.scavengeIfNeededLocked(s.npages * pageSize)
- s.state = mSpanFree
}
h.setSpans(s.base(), npage, s)
@@ -1312,6 +1302,10 @@ func (h *mheap) grow(npage uintptr) bool {
//
// h must be locked.
func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) {
+ // Scavenge some pages to make up for the virtual memory space
+ // we just allocated, but only if we need to.
+ h.scavengeIfNeededLocked(size)
+
s := (*mspan)(h.spanalloc.alloc())
s.init(uintptr(v), size/pageSize)
h.setSpans(s.base(), s.npages, s)