aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mpagealloc.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-11-21 17:05:14 +0000
committerMichael Knyszek <mknyszek@google.com>2020-05-08 16:24:40 +0000
commit55ec5182d7b84eb2461c495a55984162b23f3df8 (patch)
tree87d24203820c776f090d73e93fd931cbfdcffdb4 /src/runtime/mpagealloc.go
parentb1a48af7e8ee87cc46e1bbb07f81ac4853e0f27b (diff)
downloadgo-55ec5182d7b84eb2461c495a55984162b23f3df8.tar.gz
go-55ec5182d7b84eb2461c495a55984162b23f3df8.zip
runtime: remove scavAddr in favor of address ranges
This change removes the concept of s.scavAddr in favor of explicitly reserving and unreserving address ranges. s.scavAddr has several problems with raciness that can cause the scavenger to miss updates, or move it back unnecessarily, forcing future scavenge calls to iterate over searched address space unnecessarily. This change achieves this by replacing scavAddr with a second addrRanges which is cloned from s.inUse at the end of each sweep phase. Ranges from this second addrRanges are then reserved by scavengers (with the reservation size proportional to the heap size) who are then able to safely iterate over those ranges without worry of another scavenger coming in. Fixes #35788. Change-Id: Ief01ae170384174875118742f6c26b2a41cbb66d Reviewed-on: https://go-review.googlesource.com/c/go/+/208378 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mpagealloc.go')
-rw-r--r--src/runtime/mpagealloc.go36
1 files changed, 19 insertions, 17 deletions
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 60926fbebf..771cb3a3ba 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -88,11 +88,6 @@ const (
// value in the shifted address space, but searchAddr is stored as a regular
// memory address. See arenaBaseOffset for details.
maxSearchAddr = ^uintptr(0) - arenaBaseOffset
-
- // Minimum scavAddr value, which indicates that the scavenger is done.
- //
- // minScavAddr + arenaBaseOffset == 0
- minScavAddr = (^arenaBaseOffset + 1) & uintptrMask
)
// Global chunk index.
@@ -239,15 +234,6 @@ type pageAlloc struct {
// space on architectures with segmented address spaces.
searchAddr uintptr
- // The address to start a scavenge candidate search with. It
- // need not point to memory contained in inUse.
- scavAddr uintptr
-
- // The amount of memory scavenged since the last scavtrace print.
- //
- // Read and updated atomically.
- scavReleased uintptr
-
// start and end represent the chunk indices
// which pageAlloc knows about. It assumes
// chunks in the range [start, end) are
@@ -267,6 +253,25 @@ type pageAlloc struct {
// All access is protected by the mheapLock.
inUse addrRanges
+ // scav stores the scavenger state.
+ //
+ // All fields are protected by mheapLock.
+ scav struct {
+ // inUse is a slice of ranges of address space which have not
+ // yet been looked at by the scavenger.
+ inUse addrRanges
+
+ // gen is the scavenge generation number.
+ gen uint32
+
+ // reservationBytes is how large of a reservation should be made
+ // in bytes of address space for each scavenge iteration.
+ reservationBytes uintptr
+
+ // released is the amount of memory released this generation.
+ released uintptr
+ }
+
// mheap_.lock. This level of indirection makes it possible
// to test pageAlloc indepedently of the runtime allocator.
mheapLock *mutex
@@ -299,9 +304,6 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
// Start with the searchAddr in a state indicating there's no free memory.
s.searchAddr = maxSearchAddr
- // Start with the scavAddr in a state indicating there's nothing more to do.
- s.scavAddr = minScavAddr
-
// Set the mheapLock.
s.mheapLock = mheapLock
}