aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/export_test.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-10-17 17:42:15 +0000
committerMichael Knyszek <mknyszek@google.com>2019-11-07 20:14:02 +0000
commit689f6f77f0d54b597ebc82e9bc4a8e1a59bce04d (patch)
treef1cdd32b56fa969b849d7f236dc8ebd05c219dc2 /src/runtime/export_test.go
parent21445b091ec0a0625282603e2730d10b34396375 (diff)
downloadgo-689f6f77f0d54b597ebc82e9bc4a8e1a59bce04d.tar.gz
go-689f6f77f0d54b597ebc82e9bc4a8e1a59bce04d.zip
runtime: integrate new page allocator into runtime
This change integrates all the bits and pieces of the new page allocator into the runtime, behind a global constant. Updates #35112. Change-Id: I6696bde7bab098a498ab37ed2a2caad2a05d30ec Reviewed-on: https://go-review.googlesource.com/c/go/+/201764 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/export_test.go')
-rw-r--r--src/runtime/export_test.go59
1 files changed, 57 insertions, 2 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 10066115b4..fa0a77e43b 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -12,6 +12,8 @@ import (
"unsafe"
)
+const OldPageAllocator = oldPageAllocator
+
var Fadd64 = fadd64
var Fsub64 = fsub64
var Fmul64 = fmul64
@@ -354,8 +356,15 @@ func ReadMemStatsSlow() (base, slow MemStats) {
slow.BySize[i].Frees = bySize[i].Frees
}
- for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
- slow.HeapReleased += uint64(i.span().released())
+ if oldPageAllocator {
+ for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
+ slow.HeapReleased += uint64(i.span().released())
+ }
+ } else {
+ for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
+ pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
+ slow.HeapReleased += uint64(pg) * pageSize
+ }
}
// Unused space in the current arena also counts as released space.
@@ -974,3 +983,49 @@ var BaseChunkIdx = ChunkIdx(chunkIndex((0xc000*pageAlloc64Bit + 0x200*pageAlloc3
func PageBase(c ChunkIdx, pageIdx uint) uintptr {
return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
}
+
+type BitsMismatch struct {
+ Base uintptr
+ Got, Want uint64
+}
+
+func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
+ ok = true
+
+ // Run on the system stack to avoid stack growth allocation.
+ systemstack(func() {
+ getg().m.mallocing++
+
+ // Lock so that we can safely access the bitmap.
+ lock(&mheap_.lock)
+ chunkLoop:
+ for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
+ chunk := &mheap_.pages.chunks[i]
+ for j := 0; j < pallocChunkPages/64; j++ {
+ // Run over each 64-bit bitmap section and ensure
+ // scavenged is being cleared properly on allocation.
+ // If a used bit and scavenged bit are both set, that's
+ // an error, and could indicate a larger problem, or
+ // an accounting problem.
+ want := chunk.scavenged[j] &^ chunk.pallocBits[j]
+ got := chunk.scavenged[j]
+ if want != got {
+ ok = false
+ if n >= len(mismatches) {
+ break chunkLoop
+ }
+ mismatches[n] = BitsMismatch{
+ Base: chunkBase(i) + uintptr(j)*64*pageSize,
+ Got: got,
+ Want: want,
+ }
+ n++
+ }
+ }
+ }
+ unlock(&mheap_.lock)
+
+ getg().m.mallocing--
+ })
+ return
+}