diff options
author | Michael Anthony Knyszek <mknyszek@google.com> | 2019-11-21 17:05:14 +0000 |
---|---|---|
committer | Michael Knyszek <mknyszek@google.com> | 2020-05-08 16:24:40 +0000 |
commit | 55ec5182d7b84eb2461c495a55984162b23f3df8 (patch) | |
tree | 87d24203820c776f090d73e93fd931cbfdcffdb4 /src/runtime/export_test.go | |
parent | b1a48af7e8ee87cc46e1bbb07f81ac4853e0f27b (diff) | |
download | go-55ec5182d7b84eb2461c495a55984162b23f3df8.tar.gz go-55ec5182d7b84eb2461c495a55984162b23f3df8.zip |
runtime: remove scavAddr in favor of address ranges
This change removes the concept of s.scavAddr in favor of explicitly
reserving and unreserving address ranges. s.scavAddr has several
problems with raciness that can cause the scavenger to miss updates, or
move it back unnecessarily, forcing future scavenge calls to iterate
over searched address space unnecessarily.
This change achieves this by replacing scavAddr with a second addrRanges
which is cloned from s.inUse at the end of each sweep phase. Ranges from
this second addrRanges are then reserved by scavengers (with the
reservation size proportional to the heap size) who are then able to
safely iterate over those ranges without worry of another scavenger
coming in.
Fixes #35788.
Change-Id: Ief01ae170384174875118742f6c26b2a41cbb66d
Reviewed-on: https://go-review.googlesource.com/c/go/+/208378
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/export_test.go')
-rw-r--r-- | src/runtime/export_test.go | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index f2461f0cb0..01e1d0dc9e 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -735,9 +735,12 @@ func (p *PageAlloc) Free(base, npages uintptr) { func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) { return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end) } -func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) { +func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) { + pp := (*pageAlloc)(p) systemstack(func() { - r = (*pageAlloc)(p).scavenge(nbytes, locked) + lock(pp.mheapLock) + r = pp.scavenge(nbytes, mayUnlock) + unlock(pp.mheapLock) }) return } @@ -819,7 +822,6 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc { } } } - p.resetScavengeAddr() // Apply alloc state. for _, s := range init { @@ -833,6 +835,11 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc { // Update heap metadata for the allocRange calls above. p.update(addr, pallocChunkPages, false, false) } + systemstack(func() { + lock(p.mheapLock) + p.scavengeStartGen() + unlock(p.mheapLock) + }) return (*PageAlloc)(p) } |