aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mpagealloc.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-08-21 00:24:25 +0000
committerMichael Knyszek <mknyszek@google.com>2019-11-07 19:14:27 +0000
commit73317080e12234defb59f84e2b5b15f69650b5d5 (patch)
tree299810e966e35857625c1323fe0da5d3c6cd4c88 /src/runtime/mpagealloc.go
parent39e8cb0faac7785f89b21246a45e8cf8d5bc7d95 (diff)
downloadgo-73317080e12234defb59f84e2b5b15f69650b5d5.tar.gz
go-73317080e12234defb59f84e2b5b15f69650b5d5.zip
runtime: add scavenging code for new page allocator
This change adds a scavenger for the new page allocator along with tests. The scavenger walks over the heap backwards once per GC, looking for memory to scavenge. It walks across the heap without any lock held, searching optimistically. If it finds what appears to be a scavenging candidate it acquires the heap lock and attempts to verify it. Upon verification it then scavenges. Notably, unlike the old scavenger, it doesn't show any preference for huge pages and instead follows a more strict last-page-first policy. Updates #35112. Change-Id: I0621ef73c999a471843eab2d1307ae5679dd18d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/195697 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mpagealloc.go')
-rw-r--r--src/runtime/mpagealloc.go25
1 files changed, 23 insertions, 2 deletions
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index bca3cd8149..cc65921d39 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -80,6 +80,11 @@ const (
// value in the shifted address space, but searchAddr is stored as a regular
// memory address. See arenaBaseOffset for details.
maxSearchAddr = ^uintptr(0) - arenaBaseOffset
+
+ // Minimum scavAddr value, which indicates that the scavenger is done.
+ //
+ // minScavAddr + arenaBaseOffset == 0
+ minScavAddr = (^arenaBaseOffset + 1) & uintptrMask
)
// Global chunk index.
@@ -171,7 +176,7 @@ type pageAlloc struct {
// TODO(mknyszek): Consider changing the definition of the bitmap
// such that 1 means free and 0 means in-use so that summaries and
// the bitmaps align better on zero-values.
- chunks []pallocBits
+ chunks []pallocData
// The address to start an allocation search with.
//
@@ -185,6 +190,9 @@ type pageAlloc struct {
// space on architectures with segmented address spaces.
searchAddr uintptr
+ // The address to start a scavenge candidate search with.
+ scavAddr uintptr
+
// start and end represent the chunk indices
// which pageAlloc knows about. It assumes
// chunks in the range [start, end) are
@@ -198,6 +206,9 @@ type pageAlloc struct {
// sysStat is the runtime memstat to update when new system
// memory is committed by the pageAlloc for allocation metadata.
sysStat *uint64
+
+ // Whether or not this struct is being used in tests.
+ test bool
}
func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
@@ -217,6 +228,9 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
// Start with the searchAddr in a state indicating there's no free memory.
s.searchAddr = maxSearchAddr
+ // Start with the scavAddr in a state indicating there's nothing more to do.
+ s.scavAddr = minScavAddr
+
// Reserve space for the bitmap and put this reservation
// into the chunks slice.
const maxChunks = (1 << heapAddrBits) / pallocChunkBytes
@@ -225,7 +239,7 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
throw("failed to reserve page bitmap memory")
}
sl := notInHeapSlice{(*notInHeap)(r), 0, maxChunks}
- s.chunks = *(*[]pallocBits)(unsafe.Pointer(&sl))
+ s.chunks = *(*[]pallocData)(unsafe.Pointer(&sl))
// Set the mheapLock.
s.mheapLock = mheapLock
@@ -350,6 +364,13 @@ func (s *pageAlloc) grow(base, size uintptr) {
s.searchAddr = base
}
+ // Newly-grown memory is always considered scavenged.
+ //
+ // Set all the bits in the scavenged bitmaps high.
+ for c := chunkIndex(base); c < chunkIndex(limit); c++ {
+ s.chunks[c].scavenged.setRange(0, pallocChunkPages)
+ }
+
// Update summaries accordingly. The grow acts like a free, so
// we need to ensure this newly-free memory is visible in the
// summaries.