aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2017-10-14 22:47:24 -0400
committerRuss Cox <rsc@golang.org>2017-10-25 20:24:03 +0000
commitd93cb46280ae4710c5c6113159c7973a08a72249 (patch)
tree6b3cac482598e995b5fd727ea574a50611d12997
parent78952c06c53c8455d03430b17b5a7fe2693b5d35 (diff)
downloadgo-d93cb46280ae4710c5c6113159c7973a08a72249.tar.gz
go-d93cb46280ae4710c5c6113159c7973a08a72249.zip
[release-branch.go1.9] runtime: use simple, more robust fastrandn
CL 36932 (speed up fastrandn) made it faster but introduced bad interference with some properties of fastrand itself, making fastrandn not very random in certain ways. In particular, certain selects are demonstrably unfair. For Go 1.10 the new faster fastrandn has induced a new fastrand, which in turn has caused other follow-on bugs that are still being discovered and fixed. For Go 1.9.2, just go back to the barely slower % implementation that we used in Go 1.8 and earlier. This should restore fairness in select and any other problems caused by the clever fastrandn. The test in this CL is copied from CL 62530. Fixes #22253. Change-Id: Ibcf948a7bce981452e05c90dbdac122043f6f813 Reviewed-on: https://go-review.googlesource.com/70991 Run-TryBot: Russ Cox <rsc@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
-rw-r--r--src/runtime/chan_test.go57
-rw-r--r--src/runtime/stubs.go8
2 files changed, 62 insertions, 3 deletions
diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go
index a75fa1b992..0c94cf1a63 100644
--- a/src/runtime/chan_test.go
+++ b/src/runtime/chan_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ "math"
"runtime"
"sync"
"sync/atomic"
@@ -430,6 +431,62 @@ func TestSelectStress(t *testing.T) {
wg.Wait()
}
+func TestSelectFairness(t *testing.T) {
+ const trials = 10000
+ c1 := make(chan byte, trials+1)
+ c2 := make(chan byte, trials+1)
+ for i := 0; i < trials+1; i++ {
+ c1 <- 1
+ c2 <- 2
+ }
+ c3 := make(chan byte)
+ c4 := make(chan byte)
+ out := make(chan byte)
+ done := make(chan byte)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ var b byte
+ select {
+ case b = <-c3:
+ case b = <-c4:
+ case b = <-c1:
+ case b = <-c2:
+ }
+ select {
+ case out <- b:
+ case <-done:
+ return
+ }
+ }
+ }()
+ cnt1, cnt2 := 0, 0
+ for i := 0; i < trials; i++ {
+ switch b := <-out; b {
+ case 1:
+ cnt1++
+ case 2:
+ cnt2++
+ default:
+ t.Fatalf("unexpected value %d on channel", b)
+ }
+ }
+ // If the select in the goroutine is fair,
+ // cnt1 and cnt2 should be about the same value.
+ // With 10,000 trials, the expected margin of error at
+ // a confidence level of five nines is 4.4172 / (2 * Sqrt(10000)).
+ r := float64(cnt1) / trials
+ e := math.Abs(r - 0.5)
+ t.Log(cnt1, cnt2, r, e)
+ if e > 4.4172/(2*math.Sqrt(trials)) {
+ t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2)
+ }
+ close(done)
+ wg.Wait()
+}
+
func TestChanSendInterface(t *testing.T) {
type mt struct{}
m := &mt{}
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index c4f32a8482..72d21187ec 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -105,9 +105,11 @@ func fastrand() uint32 {
//go:nosplit
func fastrandn(n uint32) uint32 {
- // This is similar to fastrand() % n, but faster.
- // See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
- return uint32(uint64(fastrand()) * uint64(n) >> 32)
+ // Don't be clever.
+ // fastrand is not good enough for cleverness.
+ // Just use mod.
+ // See golang.org/issue/21806.
+ return fastrand() % n
}
//go:linkname sync_fastrand sync.fastrand