aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mgcscavenge.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-04-28 21:09:17 +0000
committerMichael Knyszek <mknyszek@google.com>2020-05-08 16:31:00 +0000
commitd69509ff995bf3b92246365980e3d27eaf720e6a (patch)
treeab6ba85484feb81a8a9b850ce131e2bcd56bb55c /src/runtime/mgcscavenge.go
parentdba1205b2fc458829e783bd0a4d1eff7231ae16c (diff)
downloadgo-d69509ff995bf3b92246365980e3d27eaf720e6a.tar.gz
go-d69509ff995bf3b92246365980e3d27eaf720e6a.zip
runtime: make addrRange[s] operate on offset addresses
Currently addrRange and addrRanges operate on real addresses. That is, the addresses they manipulate don't include arenaBaseOffset. When added to an address, arenaBaseOffset makes the address space appear contiguous on platforms where the address space is segmented. While this is generally OK because even those platforms which have a segmented address space usually don't give addresses in a different segment, today it causes a mismatch between the scavenger and the rest of the page allocator. The scavenger scavenges from the highest addresses first, but only via real address, whereas the page allocator allocates memory in offset address order. So this change makes addrRange and addrRanges, i.e. what the scavenger operates on, use offset addresses. However, lots of the page allocator relies on an addrRange containing real addresses. To make this transition less error-prone, this change introduces a new type, offAddr, whose purpose is to make offset addresses a distinct type, so any attempt to trivially mix real and offset addresses will trigger a compilation error. This change doesn't attempt to use offAddr in all of the runtime; a follow-up change will look for and catch remaining uses of an offset address which doesn't use the type. Updates #35788. Change-Id: I991d891ac8ace8339ca180daafdf6b261a4d43d1 Reviewed-on: https://go-review.googlesource.com/c/go/+/230717 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mgcscavenge.go')
-rw-r--r--src/runtime/mgcscavenge.go22
1 files changed, 11 insertions, 11 deletions
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index 069f267130..4dacfa0a5c 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -508,11 +508,11 @@ func (s *pageAlloc) scavengeReserve() (addrRange, uint32) {
// palloc chunk because that's the unit of operation for
// the scavenger, so align down, potentially extending
// the range.
- newBase := alignDown(r.base, pallocChunkBytes)
+ newBase := alignDown(r.base.addr(), pallocChunkBytes)
// Remove from inUse however much extra we just pulled out.
s.scav.inUse.removeGreaterEqual(newBase)
- r.base = newBase
+ r.base = offAddr{newBase}
return r, s.scav.gen
}
@@ -528,7 +528,7 @@ func (s *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
if r.size() == 0 || gen != s.scav.gen {
return
}
- if r.base%pallocChunkBytes != 0 {
+ if r.base.addr()%pallocChunkBytes != 0 {
throw("unreserving unaligned region")
}
s.scav.inUse.add(r)
@@ -559,7 +559,7 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
return 0, work
}
// Check the prerequisites of work.
- if work.base%pallocChunkBytes != 0 {
+ if work.base.addr()%pallocChunkBytes != 0 {
throw("scavengeOne called with unaligned work region")
}
// Calculate the maximum number of pages to scavenge.
@@ -598,9 +598,9 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
// Fast path: check the chunk containing the top-most address in work,
// starting at that address's page index in the chunk.
//
- // Note that work.limit is exclusive, so get the chunk we care about
+ // Note that work.end() is exclusive, so get the chunk we care about
// by subtracting 1.
- maxAddr := work.limit - 1
+ maxAddr := work.limit.addr() - 1
maxChunk := chunkIndex(maxAddr)
if s.summary[len(s.summary)-1][maxChunk].max() >= uint(minPages) {
// We only bother looking for a candidate if there at least
@@ -609,12 +609,12 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
// If we found something, scavenge it and return!
if npages != 0 {
- work.limit = s.scavengeRangeLocked(maxChunk, base, npages)
+ work.limit = offAddr{s.scavengeRangeLocked(maxChunk, base, npages)}
return uintptr(npages) * pageSize, work
}
}
// Update the limit to reflect the fact that we checked maxChunk already.
- work.limit = chunkBase(maxChunk)
+ work.limit = offAddr{chunkBase(maxChunk)}
// findCandidate finds the next scavenge candidate in work optimistically.
//
@@ -623,7 +623,7 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
// The heap need not be locked.
findCandidate := func(work addrRange) (chunkIdx, bool) {
// Iterate over this work's chunks.
- for i := chunkIndex(work.limit - 1); i >= chunkIndex(work.base); i-- {
+ for i := chunkIndex(work.limit.addr() - 1); i >= chunkIndex(work.base.addr()); i-- {
// If this chunk is totally in-use or has no unscavenged pages, don't bother
// doing a more sophisticated check.
//
@@ -673,12 +673,12 @@ func (s *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (ui
chunk := s.chunkOf(candidateChunkIdx)
base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
if npages > 0 {
- work.limit = s.scavengeRangeLocked(candidateChunkIdx, base, npages)
+ work.limit = offAddr{s.scavengeRangeLocked(candidateChunkIdx, base, npages)}
return uintptr(npages) * pageSize, work
}
// We were fooled, so let's continue from where we left off.
- work.limit = chunkBase(candidateChunkIdx)
+ work.limit = offAddr{chunkBase(candidateChunkIdx)}
}
return 0, work
}