aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mgcscavenge.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-11-14 23:58:50 +0000
committerMichael Knyszek <mknyszek@google.com>2019-12-03 17:35:06 +0000
commitacf3ff2e8a0ee777a35b42879c90a1d5a130988f (patch)
tree0581b87e65f969de22999d9ebaafefddf22c9606 /src/runtime/mgcscavenge.go
parent2ac1ca9160f52907ce1cd04738c80b1c055b5ba6 (diff)
downloadgo-acf3ff2e8a0ee777a35b42879c90a1d5a130988f.tar.gz
go-acf3ff2e8a0ee777a35b42879c90a1d5a130988f.zip
runtime: convert page allocator bitmap to sparse array
Currently the page allocator bitmap is implemented as a single giant memory mapping which is reserved at init time and committed as needed. This causes problems on systems that don't handle large uncommitted mappings well, or institute low virtual address space defaults as a memory limiting mechanism. This change modifies the implementation of the page allocator bitmap away from a directly-mapped set of bytes to a sparse array in same vein as mheap.arenas. This will hurt performance a little but the biggest gains are from the lockless allocation possible with the page allocator, so the impact of this extra layer of indirection should be minimal. In fact, this is exactly what we see: https://perf.golang.org/search?q=upload:20191125.5 This reduces the amount of mapped (PROT_NONE) memory needed on systems with 48-bit address spaces to ~600 MiB down from almost 9 GiB. The bulk of this remaining memory is used by the summaries. Go processes with 32-bit address spaces now always commit to 128 KiB of memory for the bitmap. Previously it would only commit the pages in the bitmap which represented the range of addresses (lowest address to highest address, even if there are unused regions in that range) used by the heap. Updates #35568. Updates #35451. Change-Id: I0ff10380156568642b80c366001eefd0a4e6c762 Reviewed-on: https://go-review.googlesource.com/c/go/+/207497 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Cherry Zhang <cherryyz@google.com>
Diffstat (limited to 'src/runtime/mgcscavenge.go')
-rw-r--r--src/runtime/mgcscavenge.go14
1 files changed, 9 insertions, 5 deletions
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index c7bab59fb7..1f8dff90d1 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -420,7 +420,7 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
// continue if the summary says we can because that's how
// we can tell if parts of the address space are unused.
// See the comment on s.chunks in mpagealloc.go.
- base, npages := s.chunks[ci].findScavengeCandidate(chunkPageIndex(s.scavAddr), minPages, maxPages)
+ base, npages := s.chunkOf(ci).findScavengeCandidate(chunkPageIndex(s.scavAddr), minPages, maxPages)
// If we found something, scavenge it and return!
if npages != 0 {
@@ -450,8 +450,12 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
// Run over the chunk looking harder for a candidate. Again, we could
// race with a lot of different pieces of code, but we're just being
- // optimistic.
- if !s.chunks[i].hasScavengeCandidate(minPages) {
+ // optimistic. Make sure we load the l2 pointer atomically though, to
+ // avoid races with heap growth. It may or may not be possible to also
+ // see a nil pointer in this case if we do race with heap growth, but
+ // just defensively ignore the nils. This operation is optimistic anyway.
+ l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&s.chunks[i.l1()])))
+ if l2 == nil || !l2[i.l2()].hasScavengeCandidate(minPages) {
continue
}
@@ -459,7 +463,7 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
lockHeap()
// Find, verify, and scavenge if we can.
- chunk := &s.chunks[i]
+ chunk := s.chunkOf(i)
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
if npages > 0 {
// We found memory to scavenge! Mark the bits and report that up.
@@ -488,7 +492,7 @@ func (s *pageAlloc) scavengeOne(max uintptr, locked bool) uintptr {
//
// s.mheapLock must be held.
func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) {
- s.chunks[ci].scavenged.setRange(base, npages)
+ s.chunkOf(ci).scavenged.setRange(base, npages)
// Compute the full address for the start of the range.
addr := chunkBase(ci) + uintptr(base)*pageSize