aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDamien Neil <dneil@google.com>2021-08-05 11:32:43 -0700
committerDmitri Shuralyov <dmitshur@golang.org>2022-05-02 16:21:47 +0000
commit0fdca725c754dd48a3b84c94d7f147100f751677 (patch)
treee436ac7abb7ccf416d8b17d7b077cca22f02811b
parent346b18ee9d15410ab08dd583787c64dbed0666d2 (diff)
downloadgo-0fdca725c754dd48a3b84c94d7f147100f751677.tar.gz
go-0fdca725c754dd48a3b84c94d7f147100f751677.zip
[release-branch.go1.17] sync: remove TestWaitGroupMisuse2 and TestWaitGroupMisuse3
These tests are inherently nondeterministic: They exercise a racy code path for up to one million iterations, and require that an error occur at least once. TestWaitGroupMisuse2 in particular is an ongoing source of trybot flakiness. Updates #38163. Fixes #52306. Change-Id: Ibbbda2c998c915333487ad262d3df6829de01c2b Reviewed-on: https://go-review.googlesource.com/c/go/+/340249 Trust: Damien Neil <dneil@google.com> Trust: Dmitri Shuralyov <dmitshur@golang.org> Run-TryBot: Damien Neil <dneil@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> (cherry picked from commit 011fd002457da0823da5f06b099fcf6e21444b00) Reviewed-on: https://go-review.googlesource.com/c/go/+/399821 Run-TryBot: Dmitri Shuralyov <dmitshur@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> Reviewed-by: Damien Neil <dneil@google.com>
-rw-r--r--src/sync/waitgroup_test.go126
1 files changed, 0 insertions, 126 deletions
diff --git a/src/sync/waitgroup_test.go b/src/sync/waitgroup_test.go
index c569e0faa2..4ded218d2d 100644
--- a/src/sync/waitgroup_test.go
+++ b/src/sync/waitgroup_test.go
@@ -5,8 +5,6 @@
package sync_test
import (
- "internal/race"
- "runtime"
. "sync"
"sync/atomic"
"testing"
@@ -48,12 +46,6 @@ func TestWaitGroup(t *testing.T) {
}
}
-func knownRacy(t *testing.T) {
- if race.Enabled {
- t.Skip("skipping known-racy test under the race detector")
- }
-}
-
func TestWaitGroupMisuse(t *testing.T) {
defer func() {
err := recover()
@@ -68,124 +60,6 @@ func TestWaitGroupMisuse(t *testing.T) {
t.Fatal("Should panic")
}
-// pollUntilEqual blocks until v, loaded atomically, is
-// equal to the target.
-func pollUntilEqual(v *uint32, target uint32) {
- for {
- for i := 0; i < 1e3; i++ {
- if atomic.LoadUint32(v) == target {
- return
- }
- }
- // yield to avoid deadlock with the garbage collector
- // see issue #20072
- runtime.Gosched()
- }
-}
-
-func TestWaitGroupMisuse2(t *testing.T) {
- knownRacy(t)
- if runtime.NumCPU() <= 4 {
- t.Skip("NumCPU<=4, skipping: this test requires parallelism")
- }
- defer func() {
- err := recover()
- if err != "sync: negative WaitGroup counter" &&
- err != "sync: WaitGroup misuse: Add called concurrently with Wait" &&
- err != "sync: WaitGroup is reused before previous Wait has returned" {
- t.Fatalf("Unexpected panic: %#v", err)
- }
- }()
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
- done := make(chan interface{}, 2)
- // The detection is opportunistic, so we want it to panic
- // at least in one run out of a million.
- for i := 0; i < 1e6; i++ {
- var wg WaitGroup
- var here uint32
- wg.Add(1)
- go func() {
- defer func() {
- done <- recover()
- }()
- atomic.AddUint32(&here, 1)
- pollUntilEqual(&here, 3)
- wg.Wait()
- }()
- go func() {
- defer func() {
- done <- recover()
- }()
- atomic.AddUint32(&here, 1)
- pollUntilEqual(&here, 3)
- wg.Add(1) // This is the bad guy.
- wg.Done()
- }()
- atomic.AddUint32(&here, 1)
- pollUntilEqual(&here, 3)
- wg.Done()
- for j := 0; j < 2; j++ {
- if err := <-done; err != nil {
- panic(err)
- }
- }
- }
- t.Fatal("Should panic")
-}
-
-func TestWaitGroupMisuse3(t *testing.T) {
- knownRacy(t)
- if runtime.NumCPU() <= 1 {
- t.Skip("NumCPU==1, skipping: this test requires parallelism")
- }
- defer func() {
- err := recover()
- if err != "sync: negative WaitGroup counter" &&
- err != "sync: WaitGroup misuse: Add called concurrently with Wait" &&
- err != "sync: WaitGroup is reused before previous Wait has returned" {
- t.Fatalf("Unexpected panic: %#v", err)
- }
- }()
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
- done := make(chan interface{}, 3)
- // The detection is opportunistically, so we want it to panic
- // at least in one run out of a million.
- for i := 0; i < 1e6; i++ {
- var wg WaitGroup
- wg.Add(1)
- go func() {
- defer func() {
- done <- recover()
- }()
- wg.Done()
- }()
- go func() {
- defer func() {
- done <- recover()
- }()
- wg.Wait()
- // Start reusing the wg before waiting for the Wait below to return.
- wg.Add(1)
- go func() {
- wg.Done()
- }()
- wg.Wait()
- }()
- go func() {
- defer func() {
- done <- recover()
- }()
- wg.Wait()
- }()
- for j := 0; j < 3; j++ {
- if err := <-done; err != nil {
- panic(err)
- }
- }
- }
- t.Fatal("Should panic")
-}
-
func TestWaitGroupRace(t *testing.T) {
// Run this test for about 1ms.
for i := 0; i < 1000; i++ {