aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mfinal.go
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2017-01-31 11:46:36 -0500
committerAustin Clements <austin@google.com>2017-03-03 17:02:14 +0000
commitf1ba75f8c577e1471f646ef3715fc2f41dd423ef (patch)
treef92e128a4737bbd93635e33325d4dd9045df1da5 /src/runtime/mfinal.go
parent98da2d1f91a8f4e6bdaecd8a98fc77cbac211c80 (diff)
downloadgo-f1ba75f8c577e1471f646ef3715fc2f41dd423ef.tar.gz
go-f1ba75f8c577e1471f646ef3715fc2f41dd423ef.zip
runtime: don't rescan finalizers queue during mark termination
Currently we scan the finalizers queue both during concurrent mark and during mark termination. This costs roughly 20ns per queued finalizer and about 1ns per unused finalizer queue slot (allocated queue length never decreases), which can drive up STW time if there are many finalizers. However, we only add finalizers to this queue during sweeping, which means that the second scan will never find anything new. Hence, we can fix this by simply not scanning the finalizers queue during mark termination. This brings the STW time under the 100µs goal even with 1,000,000 queued finalizers. Fixes #18869. Change-Id: I4ce5620c66fb7f13ebeb39ca313ce57047d1d0fb Reviewed-on: https://go-review.googlesource.com/36013 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mfinal.go')
-rw-r--r--src/runtime/mfinal.go16
1 files changed, 15 insertions, 1 deletions
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index 7e191d4e7b..6ba1322881 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -12,8 +12,12 @@ import (
"unsafe"
)
+// finblock is an array of finalizers to be executed. finblocks are
+// arranged in a linked list for the finalizer queue.
+//
// finblock is allocated from non-GC'd memory, so any heap pointers
-// must be specially handled.
+// must be specially handled. GC currently assumes that the finalizer
+// queue does not grow during marking (but it can shrink).
//
//go:notinheap
type finblock struct {
@@ -71,6 +75,16 @@ var finalizer1 = [...]byte{
}
func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
+ if gcphase != _GCoff {
+ // Currently we assume that the finalizer queue won't
+ // grow during marking so we don't have to rescan it
+ // during mark termination. If we ever need to lift
+ // this assumption, we can do it by adding the
+ // necessary barriers to queuefinalizer (which it may
+ // have automatically).
+ throw("queuefinalizer during GC")
+ }
+
lock(&finlock)
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
if finc == nil {