aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2021-10-14 18:18:49 -0400
committerDmitri Shuralyov <dmitshur@golang.org>2021-10-28 15:31:34 +0000
commit8c9c1487727a5e7938ca6ee7440334123a467b25 (patch)
treecefb66c55305d8ba63bbfa0adf40281ca9064df9
parent1a6281d9501763b1457abe99f142f0efe435fe29 (diff)
downloadgo-8c9c1487727a5e7938ca6ee7440334123a467b25.tar.gz
go-8c9c1487727a5e7938ca6ee7440334123a467b25.zip
[release-branch.go1.16] runtime: consistently access pollDesc r/w Gs with atomics
Both netpollblock and netpollunblock read gpp using a non-atomic load. When consuming a ready event, netpollblock clears gpp using a non-atomic store, thus skipping a barrier. Thus on systems with weak memory ordering, a sequence like so this is possible: T1 T2 1. netpollblock: read gpp -> pdReady 2. netpollblock: store gpp -> 0 3. netpollunblock: read gpp -> pdReady 4. netpollunblock: return i.e., without a happens-before edge between (2) and (3), netpollunblock may read the stale value of gpp. Switch these access to use atomic loads and stores in order to create these edges. For ease of future maintainance, I've simply changed rg and wg to always be accessed atomically, though I don't believe pollOpen or pollClose require atomics today. For #48925 Fixes #49009 Change-Id: I903ea667eea320277610b4f969129935731520c3 Reviewed-on: https://go-review.googlesource.com/c/go/+/355952 Trust: Michael Pratt <mpratt@google.com> Run-TryBot: Michael Pratt <mpratt@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: David Chase <drchase@google.com> (cherry picked from commit 1b072b3ed56c18619587354f499fcda5279718a2) Reviewed-on: https://go-review.googlesource.com/c/go/+/356370
-rw-r--r--src/runtime/netpoll.go43
1 files changed, 26 insertions, 17 deletions
diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go
index 77eb3aa4c6..f296b0a4db 100644
--- a/src/runtime/netpoll.go
+++ b/src/runtime/netpoll.go
@@ -74,6 +74,7 @@ type pollDesc struct {
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
// proceed w/o taking the lock. So closing, everr, rg, rd, wg and wd are manipulated
// in a lock-free way by all operations.
+ // TODO(golang.org/issue/49008): audit these lock-free fields for continued correctness.
// NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
// that will blow up when GC starts moving objects.
lock mutex // protects the following fields
@@ -82,11 +83,11 @@ type pollDesc struct {
everr bool // marks event scanning error happened
user uint32 // user settable cookie
rseq uintptr // protects from stale read timers
- rg uintptr // pdReady, pdWait, G waiting for read or nil
+ rg uintptr // pdReady, pdWait, G waiting for read or nil. Accessed atomically.
rt timer // read deadline timer (set if rt.f != nil)
rd int64 // read deadline
wseq uintptr // protects from stale write timers
- wg uintptr // pdReady, pdWait, G waiting for write or nil
+ wg uintptr // pdReady, pdWait, G waiting for write or nil. Accessed atomically.
wt timer // write deadline timer
wd int64 // write deadline
self *pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
@@ -143,20 +144,22 @@ func poll_runtime_isPollServerDescriptor(fd uintptr) bool {
func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
pd := pollcache.alloc()
lock(&pd.lock)
- if pd.wg != 0 && pd.wg != pdReady {
+ wg := atomic.Loaduintptr(&pd.wg)
+ if wg != 0 && wg != pdReady {
throw("runtime: blocked write on free polldesc")
}
- if pd.rg != 0 && pd.rg != pdReady {
+ rg := atomic.Loaduintptr(&pd.rg)
+ if rg != 0 && rg != pdReady {
throw("runtime: blocked read on free polldesc")
}
pd.fd = fd
pd.closing = false
pd.everr = false
pd.rseq++
- pd.rg = 0
+ atomic.Storeuintptr(&pd.rg, 0)
pd.rd = 0
pd.wseq++
- pd.wg = 0
+ atomic.Storeuintptr(&pd.wg, 0)
pd.wd = 0
pd.self = pd
unlock(&pd.lock)
@@ -171,10 +174,12 @@ func poll_runtime_pollClose(pd *pollDesc) {
if !pd.closing {
throw("runtime: close polldesc w/o unblock")
}
- if pd.wg != 0 && pd.wg != pdReady {
+ wg := atomic.Loaduintptr(&pd.wg)
+ if wg != 0 && wg != pdReady {
throw("runtime: blocked write on closing polldesc")
}
- if pd.rg != 0 && pd.rg != pdReady {
+ rg := atomic.Loaduintptr(&pd.rg)
+ if rg != 0 && rg != pdReady {
throw("runtime: blocked read on closing polldesc")
}
netpollclose(pd.fd)
@@ -198,9 +203,9 @@ func poll_runtime_pollReset(pd *pollDesc, mode int) int {
return errcode
}
if mode == 'r' {
- pd.rg = 0
+ atomic.Storeuintptr(&pd.rg, 0)
} else if mode == 'w' {
- pd.wg = 0
+ atomic.Storeuintptr(&pd.wg, 0)
}
return pollNoError
}
@@ -410,6 +415,8 @@ func netpollgoready(gp *g, traceskip int) {
// returns true if IO is ready, or false if timedout or closed
// waitio - wait only for completed IO, ignore errors
+// Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc
+// can hold only a single waiting goroutine for each mode.
func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
gpp := &pd.rg
if mode == 'w' {
@@ -418,17 +425,19 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
// set the gpp semaphore to pdWait
for {
- old := *gpp
- if old == pdReady {
- *gpp = 0
+ // Consume notification if already ready.
+ if atomic.Casuintptr(gpp, pdReady, 0) {
return true
}
- if old != 0 {
- throw("runtime: double wait")
- }
if atomic.Casuintptr(gpp, 0, pdWait) {
break
}
+
+ // Double check that this isn't corrupt; otherwise we'd loop
+ // forever.
+ if v := atomic.Loaduintptr(gpp); v != pdReady && v != 0 {
+ throw("runtime: double wait")
+ }
}
// need to recheck error states after setting gpp to pdWait
@@ -452,7 +461,7 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
}
for {
- old := *gpp
+ old := atomic.Loaduintptr(gpp)
if old == pdReady {
return nil
}