aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mbitmap.go
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2018-01-15 00:00:02 -0500
committerAustin Clements <austin@google.com>2018-02-13 16:34:45 +0000
commit245310883dcae717bb662b22d5b1fd07fdd59b76 (patch)
tree4b4af5e2a49dc9ea1f5a9cb3eed3fc2231a3f526 /src/runtime/mbitmap.go
parent2ae1e1ae2f8726057914f26d5360c3403b8f049a (diff)
downloadgo-245310883dcae717bb662b22d5b1fd07fdd59b76.tar.gz
go-245310883dcae717bb662b22d5b1fd07fdd59b76.zip
runtime: eliminate all writebarrierptr* calls
Calls to writebarrierptr can simply be actual pointer writes. Calls to writebarrierptr_prewrite need to go through the write barrier buffer. Updates #22460. Change-Id: I92cee4da98c5baa499f1977563757c76f95bf0ca Reviewed-on: https://go-review.googlesource.com/92704 Run-TryBot: Austin Clements <austin@google.com> Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mbitmap.go')
-rw-r--r--src/runtime/mbitmap.go11
1 files changed, 9 insertions, 2 deletions
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 3a88f17788..8e03505124 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -550,6 +550,8 @@ func (h heapBits) setCheckmarked(size uintptr) {
// make sure the underlying allocation contains pointers, usually
// by checking typ.kind&kindNoPointers.
//
+// Callers must perform cgo checks if writeBarrier.cgo.
+//
//go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr) {
if (dst|src|size)&(sys.PtrSize-1) != 0 {
@@ -649,7 +651,7 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
}
}
-// typeBitsBulkBarrier executes writebarrierptr_prewrite for every
+// typeBitsBulkBarrier executes a write barrier for every
// pointer that would be copied from [src, src+size) to [dst,
// dst+size) by a memmove using the type bitmap to locate those
// pointer slots.
@@ -663,6 +665,8 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
// Must not be preempted because it typically runs right before memmove,
// and the GC must observe them as an atomic action.
//
+// Callers must perform cgo checks if writeBarrier.cgo.
+//
//go:nosplit
func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
if typ == nil {
@@ -680,6 +684,7 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
return
}
ptrmask := typ.gcdata
+ buf := &getg().m.p.ptr().wbBuf
var bits uint32
for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
if i&(sys.PtrSize*8-1) == 0 {
@@ -691,7 +696,9 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
if bits&1 != 0 {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i))
- writebarrierptr_prewrite(dstx, *srcx)
+ if !buf.putFast(*dstx, *srcx) {
+ wbBufFlush(nil, 0)
+ }
}
}
}