aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mpagealloc.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-09-10 18:53:51 +0000
committerMichael Knyszek <mknyszek@google.com>2019-11-07 19:14:38 +0000
commite1ddf0507c6d6dcbe1a2ebe54b8728498edf0995 (patch)
tree266bcd52947c4b4d79625e85a6f690f78c7059db /src/runtime/mpagealloc.go
parent73317080e12234defb59f84e2b5b15f69650b5d5 (diff)
downloadgo-e1ddf0507c6d6dcbe1a2ebe54b8728498edf0995.tar.gz
go-e1ddf0507c6d6dcbe1a2ebe54b8728498edf0995.zip
runtime: count scavenged bits for new allocation for new page allocator
This change makes it so that the new page allocator returns the number of pages that are scavenged in a new allocation so that mheap can update memstats appropriately. The accounting could be embedded into pageAlloc, but that would make the new allocator more difficult to test. Updates #35112. Change-Id: I0f94f563d7af2458e6d534f589d2e7dd6af26d12 Reviewed-on: https://go-review.googlesource.com/c/go/+/195698 Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mpagealloc.go')
-rw-r--r--src/runtime/mpagealloc.go29
1 files changed, 20 insertions, 9 deletions
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index cc65921d39..21ea6a8525 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -467,24 +467,33 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
// allocated. It also updates the summaries to reflect the newly-updated
// bitmap.
//
+// Returns the amount of scavenged memory in bytes present in the
+// allocated range.
+//
// s.mheapLock must be held.
-func (s *pageAlloc) allocRange(base, npages uintptr) {
+func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
+ scav := uint(0)
if sc == ec {
// The range doesn't cross any chunk boundaries.
+ scav += s.chunks[sc].scavenged.popcntRange(si, ei+1-si)
s.chunks[sc].allocRange(si, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
+ scav += s.chunks[sc].scavenged.popcntRange(si, pallocChunkPages-si)
s.chunks[sc].allocRange(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
+ scav += s.chunks[c].scavenged.popcntRange(0, pallocChunkPages)
s.chunks[c].allocAll()
}
+ scav += s.chunks[ec].scavenged.popcntRange(0, ei+1)
s.chunks[ec].allocRange(0, ei+1)
}
s.update(base, npages, true, true)
+ return uintptr(scav) * pageSize
}
// find searches for the first (address-ordered) contiguous free region of
@@ -714,21 +723,23 @@ nextLevel:
}
// alloc allocates npages worth of memory from the page heap, returning the base
-// address for the allocation.
+// address for the allocation and the amount of scavenged memory in bytes
+// contained in the region [base address, base address + npages*pageSize).
//
-// Returns 0 on failure.
+// Returns a 0 base address on failure, in which case other returned values
+// should be ignored.
//
// s.mheapLock must be held.
-func (s *pageAlloc) alloc(npages uintptr) uintptr {
+func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(s.searchAddr) >= s.end {
- return 0
+ return 0, 0
}
// If npages has a chance of fitting in the chunk where the searchAddr is,
// search it directly.
- var addr, searchAddr uintptr
+ searchAddr := uintptr(0)
if pallocChunkPages-chunkPageIndex(s.searchAddr) >= uint(npages) {
// npages is guaranteed to be no greater than pallocChunkPages here.
i := chunkIndex(s.searchAddr)
@@ -756,11 +767,11 @@ func (s *pageAlloc) alloc(npages uintptr) uintptr {
// accommodate npages.
s.searchAddr = maxSearchAddr
}
- return 0
+ return 0, 0
}
Found:
// Go ahead and actually mark the bits now that we have an address.
- s.allocRange(addr, npages)
+ scav = s.allocRange(addr, npages)
// If we found a higher (linearized) searchAddr, we know that all the
// heap memory before that searchAddr in a linear address space is
@@ -768,7 +779,7 @@ Found:
if s.compareSearchAddrTo(searchAddr) > 0 {
s.searchAddr = searchAddr
}
- return addr
+ return addr, scav
}
// free returns npages worth of memory starting at base back to the page heap.