aboutsummaryrefslogtreecommitdiff
path: root/src/sync
diff options
context:
space:
mode:
authorChangkun Ou <hi@changkun.us>2019-11-08 11:23:58 +0100
committerBryan C. Mills <bcmills@google.com>2020-02-25 14:31:55 +0000
commit2e8dbae85ce88d02f651e53338984288057f14cb (patch)
tree18c7cb61088e0ea0845484f895e08cb1cbf71d51 /src/sync
parent450d0b2f30e820f402a638799de0b886c1da8dbe (diff)
downloadgo-2e8dbae85ce88d02f651e53338984288057f14cb.tar.gz
go-2e8dbae85ce88d02f651e53338984288057f14cb.zip
sync: add new Map method LoadAndDelete
This CL implements a LoadAndDelete method in sync.Map. Benchmark: name time/op LoadAndDeleteBalanced/*sync_test.RWMutexMap-12 98.8ns ± 1% LoadAndDeleteBalanced/*sync.Map-12 10.3ns ±11% LoadAndDeleteUnique/*sync_test.RWMutexMap-12 99.2ns ± 2% LoadAndDeleteUnique/*sync.Map-12 6.63ns ±10% LoadAndDeleteCollision/*sync_test.DeepCopyMap-12 140ns ± 0% LoadAndDeleteCollision/*sync_test.RWMutexMap-12 75.2ns ± 2% LoadAndDeleteCollision/*sync.Map-12 5.21ns ± 5% In addition, Delete is bounded and more efficient if many collisions: DeleteCollision/*sync_test.DeepCopyMap-12 120ns ± 2% 125ns ± 1% +3.80% (p=0.000 n=10+9) DeleteCollision/*sync_test.RWMutexMap-12 73.5ns ± 3% 79.5ns ± 1% +8.03% (p=0.000 n=10+9) DeleteCollision/*sync.Map-12 97.8ns ± 3% 5.9ns ± 4% -94.00% (p=0.000 n=10+10) Fixes #33762 Change-Id: Ic8469a7861d27ab0edeface0078aad8af9b26c2f Reviewed-on: https://go-review.googlesource.com/c/go/+/205899 Reviewed-by: Bryan C. Mills <bcmills@google.com> Run-TryBot: Bryan C. Mills <bcmills@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src/sync')
-rw-r--r--src/sync/map.go25
-rw-r--r--src/sync/map_bench_test.go74
-rw-r--r--src/sync/map_reference_test.go23
-rw-r--r--src/sync/map_test.go13
4 files changed, 123 insertions, 12 deletions
diff --git a/src/sync/map.go b/src/sync/map.go
index c6aa308856..a61e2ebdd6 100644
--- a/src/sync/map.go
+++ b/src/sync/map.go
@@ -263,8 +263,9 @@ func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bo
}
}
-// Delete deletes the value for a key.
-func (m *Map) Delete(key interface{}) {
+// LoadAndDelete deletes the value for a key, returning the previous value if any.
+// The loaded result reports whether the key was present.
+func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
if !ok && read.amended {
@@ -272,23 +273,33 @@ func (m *Map) Delete(key interface{}) {
read, _ = m.read.Load().(readOnly)
e, ok = read.m[key]
if !ok && read.amended {
- delete(m.dirty, key)
+ e, ok = m.dirty[key]
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
}
m.mu.Unlock()
}
if ok {
- e.delete()
+ return e.delete()
}
+ return nil, false
}
-func (e *entry) delete() (hadValue bool) {
+// Delete deletes the value for a key.
+func (m *Map) Delete(key interface{}) {
+ m.LoadAndDelete(key)
+}
+
+func (e *entry) delete() (value interface{}, ok bool) {
for {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
- return false
+ return nil, false
}
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
- return true
+ return *(*interface{})(p), true
}
}
}
diff --git a/src/sync/map_bench_test.go b/src/sync/map_bench_test.go
index e6a8badddb..cf0a3d7fde 100644
--- a/src/sync/map_bench_test.go
+++ b/src/sync/map_bench_test.go
@@ -144,6 +144,66 @@ func BenchmarkLoadOrStoreCollision(b *testing.B) {
})
}
+func BenchmarkLoadAndDeleteBalanced(b *testing.B) {
+ const hits, misses = 128, 128
+
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ j := i % (hits + misses)
+ if j < hits {
+ m.LoadAndDelete(j)
+ } else {
+ m.LoadAndDelete(i)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkLoadAndDeleteUnique(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.LoadAndDelete(i)
+ }
+ },
+ })
+}
+
+func BenchmarkLoadAndDeleteCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.LoadAndDelete(0)
+ }
+ },
+ })
+}
+
func BenchmarkRange(b *testing.B) {
const mapSize = 1 << 10
@@ -213,3 +273,17 @@ func BenchmarkAdversarialDelete(b *testing.B) {
},
})
}
+
+func BenchmarkDeleteCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Delete(0)
+ }
+ },
+ })
+}
diff --git a/src/sync/map_reference_test.go b/src/sync/map_reference_test.go
index 9f27b07c32..d105a24e92 100644
--- a/src/sync/map_reference_test.go
+++ b/src/sync/map_reference_test.go
@@ -16,6 +16,7 @@ type mapInterface interface {
Load(interface{}) (interface{}, bool)
Store(key, value interface{})
LoadOrStore(key, value interface{}) (actual interface{}, loaded bool)
+ LoadAndDelete(key interface{}) (value interface{}, loaded bool)
Delete(interface{})
Range(func(key, value interface{}) (shouldContinue bool))
}
@@ -56,6 +57,18 @@ func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, lo
return actual, loaded
}
+func (m *RWMutexMap) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
+ m.mu.Lock()
+ value, loaded = m.dirty[key]
+ if !loaded {
+ m.mu.Unlock()
+ return nil, false
+ }
+ delete(m.dirty, key)
+ m.mu.Unlock()
+ return value, loaded
+}
+
func (m *RWMutexMap) Delete(key interface{}) {
m.mu.Lock()
delete(m.dirty, key)
@@ -124,6 +137,16 @@ func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, l
return actual, loaded
}
+func (m *DeepCopyMap) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
+ m.mu.Lock()
+ dirty := m.dirty()
+ value, loaded = dirty[key]
+ delete(dirty, key)
+ m.clean.Store(dirty)
+ m.mu.Unlock()
+ return
+}
+
func (m *DeepCopyMap) Delete(key interface{}) {
m.mu.Lock()
dirty := m.dirty()
diff --git a/src/sync/map_test.go b/src/sync/map_test.go
index b60a1c7bed..4ae989a6d5 100644
--- a/src/sync/map_test.go
+++ b/src/sync/map_test.go
@@ -16,13 +16,14 @@ import (
type mapOp string
const (
- opLoad = mapOp("Load")
- opStore = mapOp("Store")
- opLoadOrStore = mapOp("LoadOrStore")
- opDelete = mapOp("Delete")
+ opLoad = mapOp("Load")
+ opStore = mapOp("Store")
+ opLoadOrStore = mapOp("LoadOrStore")
+ opLoadAndDelete = mapOp("LoadAndDelete")
+ opDelete = mapOp("Delete")
)
-var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opDelete}
+var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opLoadAndDelete, opDelete}
// mapCall is a quick.Generator for calls on mapInterface.
type mapCall struct {
@@ -39,6 +40,8 @@ func (c mapCall) apply(m mapInterface) (interface{}, bool) {
return nil, false
case opLoadOrStore:
return m.LoadOrStore(c.k, c.v)
+ case opLoadAndDelete:
+ return m.LoadAndDelete(c.k)
case opDelete:
m.Delete(c.k)
return nil, false