aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/export_test.go
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2020-08-21 11:59:55 -0400
committerMichael Pratt <mpratt@google.com>2020-10-30 20:21:14 +0000
commit9393b5bae5944acebed3ab6f995926b7de3ce429 (patch)
treef2b29ca5cbc839f575cc0011b425a3231895016f /src/runtime/export_test.go
parent6abbfc17c255c07134a69c3ca305231db80530ec (diff)
downloadgo-9393b5bae5944acebed3ab6f995926b7de3ce429.tar.gz
go-9393b5bae5944acebed3ab6f995926b7de3ce429.zip
runtime: add heap lock assertions
Some functions that required holding the heap lock _or_ world stop have been simplified to simply requiring the heap lock. This is conceptually simpler and taking the heap lock during world stop is guaranteed to not contend. This was only done on functions already called on the systemstack to avoid too many extra systemstack calls in GC. Updates #40677 Change-Id: I15aa1dadcdd1a81aac3d2a9ecad6e7d0377befdc Reviewed-on: https://go-review.googlesource.com/c/go/+/250262 Run-TryBot: Michael Pratt <mpratt@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com> Trust: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime/export_test.go')
-rw-r--r--src/runtime/export_test.go61
1 files changed, 55 insertions, 6 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 4ca0420d2a..44551dcaf1 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -743,7 +743,16 @@ func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
return (*pageCache)(c).alloc(npages)
}
func (c *PageCache) Flush(s *PageAlloc) {
- (*pageCache)(c).flush((*pageAlloc)(s))
+ cp := (*pageCache)(c)
+ sp := (*pageAlloc)(s)
+
+ systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
+ lock(sp.mheapLock)
+ cp.flush(sp)
+ unlock(sp.mheapLock)
+ })
}
// Expose chunk index type.
@@ -754,13 +763,41 @@ type ChunkIdx chunkIdx
type PageAlloc pageAlloc
func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
- return (*pageAlloc)(p).alloc(npages)
+ pp := (*pageAlloc)(p)
+
+ var addr, scav uintptr
+ systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
+ lock(pp.mheapLock)
+ addr, scav = pp.alloc(npages)
+ unlock(pp.mheapLock)
+ })
+ return addr, scav
}
func (p *PageAlloc) AllocToCache() PageCache {
- return PageCache((*pageAlloc)(p).allocToCache())
+ pp := (*pageAlloc)(p)
+
+ var c PageCache
+ systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
+ lock(pp.mheapLock)
+ c = PageCache(pp.allocToCache())
+ unlock(pp.mheapLock)
+ })
+ return c
}
func (p *PageAlloc) Free(base, npages uintptr) {
- (*pageAlloc)(p).free(base, npages)
+ pp := (*pageAlloc)(p)
+
+ systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
+ lock(pp.mheapLock)
+ pp.free(base, npages)
+ unlock(pp.mheapLock)
+ })
}
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
@@ -768,6 +805,8 @@ func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
pp := (*pageAlloc)(p)
systemstack(func() {
+ // None of the tests need any higher-level locking, so we just
+ // take the lock internally.
lock(pp.mheapLock)
r = pp.scavenge(nbytes, mayUnlock)
unlock(pp.mheapLock)
@@ -926,7 +965,11 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
addr := chunkBase(chunkIdx(i))
// Mark the chunk's existence in the pageAlloc.
- p.grow(addr, pallocChunkBytes)
+ systemstack(func() {
+ lock(p.mheapLock)
+ p.grow(addr, pallocChunkBytes)
+ unlock(p.mheapLock)
+ })
// Initialize the bitmap and update pageAlloc metadata.
chunk := p.chunkOf(chunkIndex(addr))
@@ -957,13 +1000,19 @@ func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
}
// Update heap metadata for the allocRange calls above.
- p.update(addr, pallocChunkPages, false, false)
+ systemstack(func() {
+ lock(p.mheapLock)
+ p.update(addr, pallocChunkPages, false, false)
+ unlock(p.mheapLock)
+ })
}
+
systemstack(func() {
lock(p.mheapLock)
p.scavengeStartGen()
unlock(p.mheapLock)
})
+
return (*PageAlloc)(p)
}