aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mfinal.go
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2016-10-14 13:39:07 -0400
committerAustin Clements <austin@google.com>2016-10-28 19:13:54 +0000
commitd3836aba316a6a5ee9410b9abd2f42120af4ffe6 (patch)
treebdcf971e9a3219937323b2877fc2e17a93957854 /src/runtime/mfinal.go
parentdb56a63547d7ae9f19afd2497c67f717154f9115 (diff)
downloadgo-d3836aba316a6a5ee9410b9abd2f42120af4ffe6.tar.gz
go-d3836aba316a6a5ee9410b9abd2f42120af4ffe6.zip
runtime: ensure finalizers are zero-initialized before reuse
We reuse finalizers in finblocks, which are allocated off-heap. This means they have to be zero-initialized before becoming visible to the garbage collector. We actually already do this by clearing the finalizer before returning it to the pool, but we're not careful to enforce correct memory ordering. Fix this by manipulating the finalizer count atomically so these writes synchronize properly with the garbage collector. Updates #17503. Change-Id: I7797d31df3c656c9fe654bc6da287f66a9e2037d Reviewed-on: https://go-review.googlesource.com/31454 Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mfinal.go')
-rw-r--r--src/runtime/mfinal.go15
1 files changed, 9 insertions, 6 deletions
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index dae3956cd1..e0da4a3ac0 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -19,7 +19,7 @@ import (
type finblock struct {
alllink *finblock
next *finblock
- cnt int32
+ cnt uint32
_ int32
fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}
@@ -72,7 +72,7 @@ var finalizer1 = [...]byte{
func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
lock(&finlock)
- if finq == nil || finq.cnt == int32(len(finq.fin)) {
+ if finq == nil || finq.cnt == uint32(len(finq.fin)) {
if finc == nil {
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
finc.alllink = allfin
@@ -99,7 +99,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
finq = block
}
f := &finq.fin[finq.cnt]
- finq.cnt++
+ atomic.Xadd(&finq.cnt, +1) // Sync with markroots
f.fn = fn
f.nret = nret
f.fint = fint
@@ -112,7 +112,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
//go:nowritebarrier
func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
for fb := allfin; fb != nil; fb = fb.alllink {
- for i := int32(0); i < fb.cnt; i++ {
+ for i := uint32(0); i < fb.cnt; i++ {
f := &fb.fin[i]
callback(f.fn, f.arg, f.nret, f.fint, f.ot)
}
@@ -208,11 +208,14 @@ func runfinq() {
reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
fingRunning = false
- // drop finalizer queue references to finalized object
+ // Drop finalizer queue heap references
+ // before hiding them from markroot.
+ // This also ensures these will be
+ // clear if we reuse the finalizer.
f.fn = nil
f.arg = nil
f.ot = nil
- fb.cnt = i - 1
+ atomic.Store(&fb.cnt, i-1)
}
next := fb.next
lock(&finlock)