aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/atomic_pointer.go
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-12-22 22:50:42 -0500
committerRuss Cox <rsc@golang.org>2015-01-06 00:27:06 +0000
commit7b4df8f018ec01df3ecbd9961c769e7199853363 (patch)
tree08d6f4c8dcd3d95cd2418fe94445edab7025f158 /src/runtime/atomic_pointer.go
parenteafc482d4f091c4ddd2178098d94831d1e2f25ab (diff)
downloadgo-7b4df8f018ec01df3ecbd9961c769e7199853363.tar.gz
go-7b4df8f018ec01df3ecbd9961c769e7199853363.zip
runtime, sync/atomic: add write barrier for atomic write of pointer
Add write barrier to atomic operations manipulating pointers. In general an atomic write of a pointer word may indicate racy accesses, so there is no strictly safe way to attempt to keep the shadow copy in sync with the real one. Instead, mark the shadow copy as not used. Redirect sync/atomic pointer routines back to the runtime ones, so that there is only one copy of the write barrier and shadow logic. In time we might consider doing this for most of the sync/atomic functions, but for now only the pointer routines need that treatment. Found with GODEBUG=wbshadow=1 mode. Eventually that will run automatically, but right now it still detects other missing write barriers. Change-Id: I852936b9a111a6cb9079cfaf6bd78b43016c0242 Reviewed-on: https://go-review.googlesource.com/2066 Reviewed-by: Rick Hudson <rlh@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/atomic_pointer.go')
-rw-r--r--src/runtime/atomic_pointer.go96
1 files changed, 96 insertions, 0 deletions
diff --git a/src/runtime/atomic_pointer.go b/src/runtime/atomic_pointer.go
new file mode 100644
index 0000000000..50a30242d9
--- /dev/null
+++ b/src/runtime/atomic_pointer.go
@@ -0,0 +1,96 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// These functions cannot have go:noescape annotations,
+// because while ptr does not escape, new does.
+// If new is marked as not escaping, the compiler will make incorrect
+// escape analysis decisions about the pointer value being stored.
+// Instead, these are wrappers around the actual atomics (xchgp1 and so on)
+// that use noescape to convey which arguments do not escape.
+//
+// Additionally, these functions must update the shadow heap for
+// write barrier checking.
+
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
+ atomicstorep1(noescape(ptr), new)
+ writebarrierptr_nostore((*uintptr)(ptr), uintptr(new))
+ if mheap_.shadow_enabled {
+ writebarrierptr_noshadow((*uintptr)(noescape(ptr)))
+ }
+}
+
+//go:nosplit
+func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
+ old := xchgp1(noescape(ptr), new)
+ writebarrierptr_nostore((*uintptr)(ptr), uintptr(new))
+ if mheap_.shadow_enabled {
+ writebarrierptr_noshadow((*uintptr)(noescape(ptr)))
+ }
+ return old
+}
+
+//go:nosplit
+func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
+ if !casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) {
+ return false
+ }
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+ if mheap_.shadow_enabled {
+ writebarrierptr_noshadow((*uintptr)(noescape(unsafe.Pointer(ptr))))
+ }
+ return true
+}
+
+// Like above, but implement in terms of sync/atomic's uintptr operations.
+// We cannot just call the runtime routines, because the race detector expects
+// to be able to intercept the sync/atomic forms but not the runtime forms.
+
+//go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr
+func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
+
+//go:linkname sync_atomic_StorePointer sync/atomic.StorePointer
+//go:nosplit
+func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
+ sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+ atomicstorep1(noescape(unsafe.Pointer(ptr)), new)
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+ if mheap_.shadow_enabled {
+ writebarrierptr_noshadow((*uintptr)(noescape(unsafe.Pointer(ptr))))
+ }
+}
+
+//go:linkname sync_atomic_SwapUintptr sync/atomic.SwapUintptr
+func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer
+//go:nosplit
+func sync_atomic_SwapPointer(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
+ old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(ptr)), uintptr(new)))
+ writebarrierptr_nostore((*uintptr)(ptr), uintptr(new))
+ if mheap_.shadow_enabled {
+ writebarrierptr_noshadow((*uintptr)(noescape(ptr)))
+ }
+ return old
+}
+
+//go:linkname sync_atomic_CompareAndSwapUintptr sync/atomic.CompareAndSwapUintptr
+func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
+
+//go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer
+//go:nosplit
+func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
+ if !sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new)) {
+ return false
+ }
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+ if mheap_.shadow_enabled {
+ writebarrierptr_noshadow((*uintptr)(noescape(unsafe.Pointer(ptr))))
+ }
+ return true
+}