aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mwbbuf.go
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2017-10-27 13:48:08 -0400
committerAustin Clements <austin@google.com>2017-10-30 18:12:54 +0000
commit877387e38a734db8a2a151ddd4af7ba53bcf6460 (patch)
tree7d076bdf50a6b37f887c4d4b12ebb977f023d9c7 /src/runtime/mwbbuf.go
parent6a5f1e58edfe55428d375be26c9c8eefbd67ad31 (diff)
downloadgo-877387e38a734db8a2a151ddd4af7ba53bcf6460.tar.gz
go-877387e38a734db8a2a151ddd4af7ba53bcf6460.zip
runtime: use buffered write barrier for bulkBarrierPreWrite
This modifies bulkBarrierPreWrite to use the buffered write barrier instead of the eager write barrier. This reduces the number of system stack switches and sanity checks by a factor of the buffer size (currently 256). This affects both typedmemmove and typedmemclr. Since this is purely a runtime change, it applies to all arches (unlike the pointer write barrier). name old time/op new time/op delta BulkWriteBarrier-12 7.33ns ± 6% 4.46ns ± 9% -39.10% (p=0.000 n=20+19) Updates #22460. Change-Id: I6a686a63bbf08be02b9b97250e37163c5a90cdd8 Reviewed-on: https://go-review.googlesource.com/73832 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mwbbuf.go')
-rw-r--r--src/runtime/mwbbuf.go34
1 files changed, 33 insertions, 1 deletions
diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go
index d1cd193665..2c06996210 100644
--- a/src/runtime/mwbbuf.go
+++ b/src/runtime/mwbbuf.go
@@ -20,6 +20,7 @@
package runtime
import (
+ "runtime/internal/sys"
"unsafe"
)
@@ -94,6 +95,37 @@ func (b *wbBuf) reset() {
}
}
+// putFast adds old and new to the write barrier buffer and returns
+// false if a flush is necessary. Callers should use this as:
+//
+// buf := &getg().m.p.ptr().wbBuf
+// if !buf.putFast(old, new) {
+// wbBufFlush(...)
+// }
+//
+// The arguments to wbBufFlush depend on whether the caller is doing
+// its own cgo pointer checks. If it is, then this can be
+// wbBufFlush(nil, 0). Otherwise, it must pass the slot address and
+// new.
+//
+// Since buf is a per-P resource, the caller must ensure there are no
+// preemption points while buf is in use.
+//
+// It must be nowritebarrierrec to because write barriers here would
+// corrupt the write barrier buffer. It (and everything it calls, if
+// it called anything) has to be nosplit to avoid scheduling on to a
+// different P and a different buffer.
+//
+//go:nowritebarrierrec
+//go:nosplit
+func (b *wbBuf) putFast(old, new uintptr) bool {
+ p := (*[2]uintptr)(unsafe.Pointer(b.next))
+ p[0] = old
+ p[1] = new
+ b.next += 2 * sys.PtrSize
+ return b.next != b.end
+}
+
// wbBufFlush flushes the current P's write barrier buffer to the GC
// workbufs. It is passed the slot and value of the write barrier that
// caused the flush so that it can implement cgocheck.
@@ -118,7 +150,7 @@ func wbBufFlush(dst *uintptr, src uintptr) {
return
}
- if writeBarrier.cgo {
+ if writeBarrier.cgo && dst != nil {
// This must be called from the stack that did the
// write. It's nosplit all the way down.
cgoCheckWriteBarrier(dst, src)