aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/export_test.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-11-14 23:58:50 +0000
committerMichael Knyszek <mknyszek@google.com>2019-12-03 17:35:06 +0000
commitacf3ff2e8a0ee777a35b42879c90a1d5a130988f (patch)
tree0581b87e65f969de22999d9ebaafefddf22c9606 /src/runtime/export_test.go
parent2ac1ca9160f52907ce1cd04738c80b1c055b5ba6 (diff)
downloadgo-acf3ff2e8a0ee777a35b42879c90a1d5a130988f.tar.gz
go-acf3ff2e8a0ee777a35b42879c90a1d5a130988f.zip
runtime: convert page allocator bitmap to sparse array
Currently the page allocator bitmap is implemented as a single giant memory mapping which is reserved at init time and committed as needed. This causes problems on systems that don't handle large uncommitted mappings well, or institute low virtual address space defaults as a memory limiting mechanism. This change modifies the implementation of the page allocator bitmap away from a directly-mapped set of bytes to a sparse array in same vein as mheap.arenas. This will hurt performance a little but the biggest gains are from the lockless allocation possible with the page allocator, so the impact of this extra layer of indirection should be minimal. In fact, this is exactly what we see: https://perf.golang.org/search?q=upload:20191125.5 This reduces the amount of mapped (PROT_NONE) memory needed on systems with 48-bit address spaces to ~600 MiB down from almost 9 GiB. The bulk of this remaining memory is used by the summaries. Go processes with 32-bit address spaces now always commit to 128 KiB of memory for the bitmap. Previously it would only commit the pages in the bitmap which represented the range of addresses (lowest address to highest address, even if there are unused regions in that range) used by the heap. Updates #35568. Updates #35451. Change-Id: I0ff10380156568642b80c366001eefd0a4e6c762 Reviewed-on: https://go-review.googlesource.com/c/go/+/207497 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Cherry Zhang <cherryyz@google.com>
Diffstat (limited to 'src/runtime/export_test.go')
-rw-r--r--src/runtime/export_test.go28
1 files changed, 20 insertions, 8 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 47cefa1f3b..75882d02b6 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -355,7 +355,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
+ pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -726,9 +726,6 @@ func (p *PageAlloc) Free(base, npages uintptr) {
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
}
-func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
- return (*PallocData)(&((*pageAlloc)(p).chunks[i]))
-}
func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) {
systemstack(func() {
r = (*pageAlloc)(p).scavenge(nbytes, locked)
@@ -736,6 +733,16 @@ func (p *PageAlloc) Scavenge(nbytes uintptr, locked bool) (r uintptr) {
return
}
+// Returns nil if the PallocData's L2 is missing.
+func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
+ ci := chunkIdx(i)
+ l2 := (*pageAlloc)(p).chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return (*PallocData)(&l2[ci.l2()])
+}
+
// BitRange represents a range over a bitmap.
type BitRange struct {
I, N uint // bit index and length in bits
@@ -769,7 +776,7 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
p.grow(addr, pallocChunkBytes)
// Initialize the bitmap and update pageAlloc metadata.
- chunk := &p.chunks[chunkIndex(addr)]
+ chunk := p.chunkOf(chunkIndex(addr))
// Clear all the scavenged bits which grow set.
chunk.scavenged.clearRange(0, pallocChunkPages)
@@ -823,8 +830,13 @@ func FreePageAlloc(pp *PageAlloc) {
}
// Free the mapped space for chunks.
- chunksLen := uintptr(cap(p.chunks)) * unsafe.Sizeof(p.chunks[0])
- sysFree(unsafe.Pointer(&p.chunks[0]), alignUp(chunksLen, physPageSize), nil)
+ for i := range p.chunks {
+ if x := p.chunks[i]; x != nil {
+ p.chunks[i] = nil
+ // This memory comes from sysAlloc and will always be page-aligned.
+ sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil)
+ }
+ }
}
// BaseChunkIdx is a convenient chunkIdx value which works on both
@@ -861,7 +873,7 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := &mheap_.pages.chunks[i]
+ chunk := mheap_.pages.chunkOf(i)
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.