aboutsummaryrefslogtreecommitdiff
path: root/src/sync
diff options
context:
space:
mode:
authorChangkun Ou <hi@changkun.us>2020-08-24 13:45:27 +0200
committerBryan C. Mills <bcmills@google.com>2020-08-24 20:05:41 +0000
commit94953d3e5928c8a577bad7911aabbf627269ef77 (patch)
tree815794b61cee10f76b4678232d80d19e58bf1c47 /src/sync
parentbb998747d6c5213e3a366936c482e149dce62720 (diff)
downloadgo-94953d3e5928c8a577bad7911aabbf627269ef77.tar.gz
go-94953d3e5928c8a577bad7911aabbf627269ef77.zip
sync: delete dirty keys inside Map.LoadAndDelete
Fixes #40999 Change-Id: Ie32427e5cb5ed512b976b554850f50be156ce9f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/250197 Run-TryBot: Emmanuel Odeke <emm.odeke@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Bryan C. Mills <bcmills@google.com>
Diffstat (limited to 'src/sync')
-rw-r--r--src/sync/map.go1
-rw-r--r--src/sync/map_test.go24
2 files changed, 25 insertions, 0 deletions
diff --git a/src/sync/map.go b/src/sync/map.go
index a61e2ebdd6..9ad25353ff 100644
--- a/src/sync/map.go
+++ b/src/sync/map.go
@@ -274,6 +274,7 @@ func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
+ delete(m.dirty, key)
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
diff --git a/src/sync/map_test.go b/src/sync/map_test.go
index 4ae989a6d5..7f163caa5c 100644
--- a/src/sync/map_test.go
+++ b/src/sync/map_test.go
@@ -9,6 +9,7 @@ import (
"reflect"
"runtime"
"sync"
+ "sync/atomic"
"testing"
"testing/quick"
)
@@ -171,3 +172,26 @@ func TestConcurrentRange(t *testing.T) {
}
}
}
+
+func TestIssue40999(t *testing.T) {
+ var m sync.Map
+
+ // Since the miss-counting in missLocked (via Delete)
+ // compares the miss count with len(m.dirty),
+ // add an initial entry to bias len(m.dirty) above the miss count.
+ m.Store(nil, struct{}{})
+
+ var finalized uint32
+
+ // Set finalizers that count for collected keys. A non-zero count
+ // indicates that keys have not been leaked.
+ for atomic.LoadUint32(&finalized) == 0 {
+ p := new(int)
+ runtime.SetFinalizer(p, func(*int) {
+ atomic.AddUint32(&finalized, 1)
+ })
+ m.Store(p, struct{}{})
+ m.Delete(p)
+ runtime.GC()
+ }
+}