aboutsummaryrefslogtreecommitdiff
path: root/src/sync
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2021-12-01 12:15:45 -0500
committerRuss Cox <rsc@golang.org>2021-12-13 18:45:54 +0000
commit2580d0e08d5e9f979b943758d3c49877fb2324cb (patch)
tree3aafccfd81087734156a1778ce2321adf345f271 /src/sync
parent083ef5462494e81ee23316245c5d65085a3f62d9 (diff)
downloadgo-2580d0e08d5e9f979b943758d3c49877fb2324cb.tar.gz
go-2580d0e08d5e9f979b943758d3c49877fb2324cb.zip
all: gofmt -w -r 'interface{} -> any' src
And then revert the bootstrap cmd directories and certain testdata. And adjust tests as needed. Not reverting the changes in std that are bootstrapped, because some of those changes would appear in API docs, and we want to use any consistently. Instead, rewrite 'any' to 'interface{}' in cmd/dist for those directories when preparing the bootstrap copy. A few files changed as a result of running gofmt -w not because of interface{} -> any but because they hadn't been updated for the new //go:build lines. Fixes #49884. Change-Id: Ie8045cba995f65bd79c694ec77a1b3d1fe01bb09 Reviewed-on: https://go-review.googlesource.com/c/go/+/368254 Trust: Russ Cox <rsc@golang.org> Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Robert Griesemer <gri@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org>
Diffstat (limited to 'src/sync')
-rw-r--r--src/sync/atomic/atomic_test.go2
-rw-r--r--src/sync/atomic/value.go12
-rw-r--r--src/sync/atomic/value_test.go18
-rw-r--r--src/sync/example_pool_test.go2
-rw-r--r--src/sync/export_test.go18
-rw-r--r--src/sync/map.go40
-rw-r--r--src/sync/map_bench_test.go4
-rw-r--r--src/sync/map_reference_test.go58
-rw-r--r--src/sync/map_test.go28
-rw-r--r--src/sync/pool.go14
-rw-r--r--src/sync/pool_test.go12
-rw-r--r--src/sync/poolqueue.go18
12 files changed, 113 insertions, 113 deletions
diff --git a/src/sync/atomic/atomic_test.go b/src/sync/atomic/atomic_test.go
index 4b8c2a58f3..8a53094cb7 100644
--- a/src/sync/atomic/atomic_test.go
+++ b/src/sync/atomic/atomic_test.go
@@ -32,7 +32,7 @@ const (
)
// Do the 64-bit functions panic? If so, don't bother testing.
-var test64err = func() (err interface{}) {
+var test64err = func() (err any) {
defer func() {
err = recover()
}()
diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go
index af6295de91..f18b7ee095 100644
--- a/src/sync/atomic/value.go
+++ b/src/sync/atomic/value.go
@@ -14,7 +14,7 @@ import (
//
// A Value must not be copied after first use.
type Value struct {
- v interface{}
+ v any
}
// ifaceWords is interface{} internal representation.
@@ -25,7 +25,7 @@ type ifaceWords struct {
// Load returns the value set by the most recent Store.
// It returns nil if there has been no call to Store for this Value.
-func (v *Value) Load() (val interface{}) {
+func (v *Value) Load() (val any) {
vp := (*ifaceWords)(unsafe.Pointer(v))
typ := LoadPointer(&vp.typ)
if typ == nil || typ == unsafe.Pointer(&firstStoreInProgress) {
@@ -44,7 +44,7 @@ var firstStoreInProgress byte
// Store sets the value of the Value to x.
// All calls to Store for a given Value must use values of the same concrete type.
// Store of an inconsistent type panics, as does Store(nil).
-func (v *Value) Store(val interface{}) {
+func (v *Value) Store(val any) {
if val == nil {
panic("sync/atomic: store of nil value into Value")
}
@@ -87,7 +87,7 @@ func (v *Value) Store(val interface{}) {
//
// All calls to Swap for a given Value must use values of the same concrete
// type. Swap of an inconsistent type panics, as does Swap(nil).
-func (v *Value) Swap(new interface{}) (old interface{}) {
+func (v *Value) Swap(new any) (old any) {
if new == nil {
panic("sync/atomic: swap of nil value into Value")
}
@@ -132,7 +132,7 @@ func (v *Value) Swap(new interface{}) (old interface{}) {
// All calls to CompareAndSwap for a given Value must use values of the same
// concrete type. CompareAndSwap of an inconsistent type panics, as does
// CompareAndSwap(old, nil).
-func (v *Value) CompareAndSwap(old, new interface{}) (swapped bool) {
+func (v *Value) CompareAndSwap(old, new any) (swapped bool) {
if new == nil {
panic("sync/atomic: compare and swap of nil value into Value")
}
@@ -179,7 +179,7 @@ func (v *Value) CompareAndSwap(old, new interface{}) (swapped bool) {
// CompareAndSwapPointer below only ensures vp.data
// has not changed since LoadPointer.
data := LoadPointer(&vp.data)
- var i interface{}
+ var i any
(*ifaceWords)(unsafe.Pointer(&i)).typ = typ
(*ifaceWords)(unsafe.Pointer(&i)).data = data
if i != old {
diff --git a/src/sync/atomic/value_test.go b/src/sync/atomic/value_test.go
index a5e717d6e0..721da965e3 100644
--- a/src/sync/atomic/value_test.go
+++ b/src/sync/atomic/value_test.go
@@ -80,7 +80,7 @@ func TestValuePanic(t *testing.T) {
}
func TestValueConcurrent(t *testing.T) {
- tests := [][]interface{}{
+ tests := [][]any{
{uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)},
{uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)},
{uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)},
@@ -138,10 +138,10 @@ func BenchmarkValueRead(b *testing.B) {
}
var Value_SwapTests = []struct {
- init interface{}
- new interface{}
- want interface{}
- err interface{}
+ init any
+ new any
+ want any
+ err any
}{
{init: nil, new: nil, err: "sync/atomic: swap of nil value into Value"},
{init: nil, new: true, want: nil, err: nil},
@@ -207,11 +207,11 @@ func TestValueSwapConcurrent(t *testing.T) {
var heapA, heapB = struct{ uint }{0}, struct{ uint }{0}
var Value_CompareAndSwapTests = []struct {
- init interface{}
- new interface{}
- old interface{}
+ init any
+ new any
+ old any
want bool
- err interface{}
+ err any
}{
{init: nil, new: nil, old: nil, err: "sync/atomic: compare and swap of nil value into Value"},
{init: nil, new: true, old: "", err: "sync/atomic: compare and swap of inconsistently typed values into Value"},
diff --git a/src/sync/example_pool_test.go b/src/sync/example_pool_test.go
index 8288d41e8c..2fb4c1e6b9 100644
--- a/src/sync/example_pool_test.go
+++ b/src/sync/example_pool_test.go
@@ -13,7 +13,7 @@ import (
)
var bufPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
// The Pool's New function should generally only return pointer
// types, since a pointer can be put into the return interface
// value without an allocation:
diff --git a/src/sync/export_test.go b/src/sync/export_test.go
index ffbe567464..c020ef737d 100644
--- a/src/sync/export_test.go
+++ b/src/sync/export_test.go
@@ -12,9 +12,9 @@ var Runtime_procUnpin = runtime_procUnpin
// poolDequeue testing.
type PoolDequeue interface {
- PushHead(val interface{}) bool
- PopHead() (interface{}, bool)
- PopTail() (interface{}, bool)
+ PushHead(val any) bool
+ PopHead() (any, bool)
+ PopTail() (any, bool)
}
func NewPoolDequeue(n int) PoolDequeue {
@@ -27,15 +27,15 @@ func NewPoolDequeue(n int) PoolDequeue {
return d
}
-func (d *poolDequeue) PushHead(val interface{}) bool {
+func (d *poolDequeue) PushHead(val any) bool {
return d.pushHead(val)
}
-func (d *poolDequeue) PopHead() (interface{}, bool) {
+func (d *poolDequeue) PopHead() (any, bool) {
return d.popHead()
}
-func (d *poolDequeue) PopTail() (interface{}, bool) {
+func (d *poolDequeue) PopTail() (any, bool) {
return d.popTail()
}
@@ -43,15 +43,15 @@ func NewPoolChain() PoolDequeue {
return new(poolChain)
}
-func (c *poolChain) PushHead(val interface{}) bool {
+func (c *poolChain) PushHead(val any) bool {
c.pushHead(val)
return true
}
-func (c *poolChain) PopHead() (interface{}, bool) {
+func (c *poolChain) PopHead() (any, bool) {
return c.popHead()
}
-func (c *poolChain) PopTail() (interface{}, bool) {
+func (c *poolChain) PopTail() (any, bool) {
return c.popTail()
}
diff --git a/src/sync/map.go b/src/sync/map.go
index 7a6c82e5c3..2fa3253429 100644
--- a/src/sync/map.go
+++ b/src/sync/map.go
@@ -48,7 +48,7 @@ type Map struct {
//
// If the dirty map is nil, the next write to the map will initialize it by
// making a shallow copy of the clean map, omitting stale entries.
- dirty map[interface{}]*entry
+ dirty map[any]*entry
// misses counts the number of loads since the read map was last updated that
// needed to lock mu to determine whether the key was present.
@@ -61,13 +61,13 @@ type Map struct {
// readOnly is an immutable struct stored atomically in the Map.read field.
type readOnly struct {
- m map[interface{}]*entry
+ m map[any]*entry
amended bool // true if the dirty map contains some key not in m.
}
// expunged is an arbitrary pointer that marks entries which have been deleted
// from the dirty map.
-var expunged = unsafe.Pointer(new(interface{}))
+var expunged = unsafe.Pointer(new(any))
// An entry is a slot in the map corresponding to a particular key.
type entry struct {
@@ -93,14 +93,14 @@ type entry struct {
p unsafe.Pointer // *interface{}
}
-func newEntry(i interface{}) *entry {
+func newEntry(i any) *entry {
return &entry{p: unsafe.Pointer(&i)}
}
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
-func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
+func (m *Map) Load(key any) (value any, ok bool) {
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
if !ok && read.amended {
@@ -125,16 +125,16 @@ func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
return e.load()
}
-func (e *entry) load() (value interface{}, ok bool) {
+func (e *entry) load() (value any, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
return nil, false
}
- return *(*interface{})(p), true
+ return *(*any)(p), true
}
// Store sets the value for a key.
-func (m *Map) Store(key, value interface{}) {
+func (m *Map) Store(key, value any) {
read, _ := m.read.Load().(readOnly)
if e, ok := read.m[key]; ok && e.tryStore(&value) {
return
@@ -167,7 +167,7 @@ func (m *Map) Store(key, value interface{}) {
//
// If the entry is expunged, tryStore returns false and leaves the entry
// unchanged.
-func (e *entry) tryStore(i *interface{}) bool {
+func (e *entry) tryStore(i *any) bool {
for {
p := atomic.LoadPointer(&e.p)
if p == expunged {
@@ -190,14 +190,14 @@ func (e *entry) unexpungeLocked() (wasExpunged bool) {
// storeLocked unconditionally stores a value to the entry.
//
// The entry must be known not to be expunged.
-func (e *entry) storeLocked(i *interface{}) {
+func (e *entry) storeLocked(i *any) {
atomic.StorePointer(&e.p, unsafe.Pointer(i))
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
-func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
+func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
// Avoid locking if it's a clean hit.
read, _ := m.read.Load().(readOnly)
if e, ok := read.m[key]; ok {
@@ -237,13 +237,13 @@ func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bo
//
// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
// returns with ok==false.
-func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
+func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) {
p := atomic.LoadPointer(&e.p)
if p == expunged {
return nil, false, false
}
if p != nil {
- return *(*interface{})(p), true, true
+ return *(*any)(p), true, true
}
// Copy the interface after the first load to make this method more amenable
@@ -259,14 +259,14 @@ func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bo
return nil, false, false
}
if p != nil {
- return *(*interface{})(p), true, true
+ return *(*any)(p), true, true
}
}
}
// LoadAndDelete deletes the value for a key, returning the previous value if any.
// The loaded result reports whether the key was present.
-func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
+func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
read, _ := m.read.Load().(readOnly)
e, ok := read.m[key]
if !ok && read.amended {
@@ -290,18 +290,18 @@ func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
}
// Delete deletes the value for a key.
-func (m *Map) Delete(key interface{}) {
+func (m *Map) Delete(key any) {
m.LoadAndDelete(key)
}
-func (e *entry) delete() (value interface{}, ok bool) {
+func (e *entry) delete() (value any, ok bool) {
for {
p := atomic.LoadPointer(&e.p)
if p == nil || p == expunged {
return nil, false
}
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
- return *(*interface{})(p), true
+ return *(*any)(p), true
}
}
}
@@ -317,7 +317,7 @@ func (e *entry) delete() (value interface{}, ok bool) {
//
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
-func (m *Map) Range(f func(key, value interface{}) bool) {
+func (m *Map) Range(f func(key, value any) bool) {
// We need to be able to iterate over all of the keys that were already
// present at the start of the call to Range.
// If read.amended is false, then read.m satisfies that property without
@@ -366,7 +366,7 @@ func (m *Map) dirtyLocked() {
}
read, _ := m.read.Load().(readOnly)
- m.dirty = make(map[interface{}]*entry, len(read.m))
+ m.dirty = make(map[any]*entry, len(read.m))
for k, e := range read.m {
if !e.tryExpungeLocked() {
m.dirty[k] = e
diff --git a/src/sync/map_bench_test.go b/src/sync/map_bench_test.go
index cf0a3d7fde..e7b0e6039c 100644
--- a/src/sync/map_bench_test.go
+++ b/src/sync/map_bench_test.go
@@ -216,7 +216,7 @@ func BenchmarkRange(b *testing.B) {
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
for ; pb.Next(); i++ {
- m.Range(func(_, _ interface{}) bool { return true })
+ m.Range(func(_, _ any) bool { return true })
}
},
})
@@ -263,7 +263,7 @@ func BenchmarkAdversarialDelete(b *testing.B) {
m.Load(i)
if i%mapSize == 0 {
- m.Range(func(k, _ interface{}) bool {
+ m.Range(func(k, _ any) bool {
m.Delete(k)
return false
})
diff --git a/src/sync/map_reference_test.go b/src/sync/map_reference_test.go
index d105a24e92..1122b40b9b 100644
--- a/src/sync/map_reference_test.go
+++ b/src/sync/map_reference_test.go
@@ -13,43 +13,43 @@ import (
// mapInterface is the interface Map implements.
type mapInterface interface {
- Load(interface{}) (interface{}, bool)
- Store(key, value interface{})
- LoadOrStore(key, value interface{}) (actual interface{}, loaded bool)
- LoadAndDelete(key interface{}) (value interface{}, loaded bool)
- Delete(interface{})
- Range(func(key, value interface{}) (shouldContinue bool))
+ Load(any) (any, bool)
+ Store(key, value any)
+ LoadOrStore(key, value any) (actual any, loaded bool)
+ LoadAndDelete(key any) (value any, loaded bool)
+ Delete(any)
+ Range(func(key, value any) (shouldContinue bool))
}
// RWMutexMap is an implementation of mapInterface using a sync.RWMutex.
type RWMutexMap struct {
mu sync.RWMutex
- dirty map[interface{}]interface{}
+ dirty map[any]any
}
-func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) {
+func (m *RWMutexMap) Load(key any) (value any, ok bool) {
m.mu.RLock()
value, ok = m.dirty[key]
m.mu.RUnlock()
return
}
-func (m *RWMutexMap) Store(key, value interface{}) {
+func (m *RWMutexMap) Store(key, value any) {
m.mu.Lock()
if m.dirty == nil {
- m.dirty = make(map[interface{}]interface{})
+ m.dirty = make(map[any]any)
}
m.dirty[key] = value
m.mu.Unlock()
}
-func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
+func (m *RWMutexMap) LoadOrStore(key, value any) (actual any, loaded bool) {
m.mu.Lock()
actual, loaded = m.dirty[key]
if !loaded {
actual = value
if m.dirty == nil {
- m.dirty = make(map[interface{}]interface{})
+ m.dirty = make(map[any]any)
}
m.dirty[key] = value
}
@@ -57,7 +57,7 @@ func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, lo
return actual, loaded
}
-func (m *RWMutexMap) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
+func (m *RWMutexMap) LoadAndDelete(key any) (value any, loaded bool) {
m.mu.Lock()
value, loaded = m.dirty[key]
if !loaded {
@@ -69,15 +69,15 @@ func (m *RWMutexMap) LoadAndDelete(key interface{}) (value interface{}, loaded b
return value, loaded
}
-func (m *RWMutexMap) Delete(key interface{}) {
+func (m *RWMutexMap) Delete(key any) {
m.mu.Lock()
delete(m.dirty, key)
m.mu.Unlock()
}
-func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) {
+func (m *RWMutexMap) Range(f func(key, value any) (shouldContinue bool)) {
m.mu.RLock()
- keys := make([]interface{}, 0, len(m.dirty))
+ keys := make([]any, 0, len(m.dirty))
for k := range m.dirty {
keys = append(keys, k)
}
@@ -102,13 +102,13 @@ type DeepCopyMap struct {
clean atomic.Value
}
-func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) {
- clean, _ := m.clean.Load().(map[interface{}]interface{})
+func (m *DeepCopyMap) Load(key any) (value any, ok bool) {
+ clean, _ := m.clean.Load().(map[any]any)
value, ok = clean[key]
return value, ok
}
-func (m *DeepCopyMap) Store(key, value interface{}) {
+func (m *DeepCopyMap) Store(key, value any) {
m.mu.Lock()
dirty := m.dirty()
dirty[key] = value
@@ -116,8 +116,8 @@ func (m *DeepCopyMap) Store(key, value interface{}) {
m.mu.Unlock()
}
-func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
- clean, _ := m.clean.Load().(map[interface{}]interface{})
+func (m *DeepCopyMap) LoadOrStore(key, value any) (actual any, loaded bool) {
+ clean, _ := m.clean.Load().(map[any]any)
actual, loaded = clean[key]
if loaded {
return actual, loaded
@@ -125,7 +125,7 @@ func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, l
m.mu.Lock()
// Reload clean in case it changed while we were waiting on m.mu.
- clean, _ = m.clean.Load().(map[interface{}]interface{})
+ clean, _ = m.clean.Load().(map[any]any)
actual, loaded = clean[key]
if !loaded {
dirty := m.dirty()
@@ -137,7 +137,7 @@ func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, l
return actual, loaded
}
-func (m *DeepCopyMap) LoadAndDelete(key interface{}) (value interface{}, loaded bool) {
+func (m *DeepCopyMap) LoadAndDelete(key any) (value any, loaded bool) {
m.mu.Lock()
dirty := m.dirty()
value, loaded = dirty[key]
@@ -147,7 +147,7 @@ func (m *DeepCopyMap) LoadAndDelete(key interface{}) (value interface{}, loaded
return
}
-func (m *DeepCopyMap) Delete(key interface{}) {
+func (m *DeepCopyMap) Delete(key any) {
m.mu.Lock()
dirty := m.dirty()
delete(dirty, key)
@@ -155,8 +155,8 @@ func (m *DeepCopyMap) Delete(key interface{}) {
m.mu.Unlock()
}
-func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) {
- clean, _ := m.clean.Load().(map[interface{}]interface{})
+func (m *DeepCopyMap) Range(f func(key, value any) (shouldContinue bool)) {
+ clean, _ := m.clean.Load().(map[any]any)
for k, v := range clean {
if !f(k, v) {
break
@@ -164,9 +164,9 @@ func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)
}
}
-func (m *DeepCopyMap) dirty() map[interface{}]interface{} {
- clean, _ := m.clean.Load().(map[interface{}]interface{})
- dirty := make(map[interface{}]interface{}, len(clean)+1)
+func (m *DeepCopyMap) dirty() map[any]any {
+ clean, _ := m.clean.Load().(map[any]any)
+ dirty := make(map[any]any, len(clean)+1)
for k, v := range clean {
dirty[k] = v
}
diff --git a/src/sync/map_test.go b/src/sync/map_test.go
index c4a8f8b99a..8352471104 100644
--- a/src/sync/map_test.go
+++ b/src/sync/map_test.go
@@ -29,10 +29,10 @@ var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opLoadAndDelete, opDelet
// mapCall is a quick.Generator for calls on mapInterface.
type mapCall struct {
op mapOp
- k, v interface{}
+ k, v any
}
-func (c mapCall) apply(m mapInterface) (interface{}, bool) {
+func (c mapCall) apply(m mapInterface) (any, bool) {
switch c.op {
case opLoad:
return m.Load(c.k)
@@ -52,11 +52,11 @@ func (c mapCall) apply(m mapInterface) (interface{}, bool) {
}
type mapResult struct {
- value interface{}
+ value any
ok bool
}
-func randValue(r *rand.Rand) interface{} {
+func randValue(r *rand.Rand) any {
b := make([]byte, r.Intn(4))
for i := range b {
b[i] = 'a' + byte(rand.Intn(26))
@@ -73,14 +73,14 @@ func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
return reflect.ValueOf(c)
}
-func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) {
+func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[any]any) {
for _, c := range calls {
v, ok := c.apply(m)
results = append(results, mapResult{v, ok})
}
- final = make(map[interface{}]interface{})
- m.Range(func(k, v interface{}) bool {
+ final = make(map[any]any)
+ m.Range(func(k, v any) bool {
final[k] = v
return true
})
@@ -88,15 +88,15 @@ func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map
return results, final
}
-func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
+func applyMap(calls []mapCall) ([]mapResult, map[any]any) {
return applyCalls(new(sync.Map), calls)
}
-func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
+func applyRWMutexMap(calls []mapCall) ([]mapResult, map[any]any) {
return applyCalls(new(RWMutexMap), calls)
}
-func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
+func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[any]any) {
return applyCalls(new(DeepCopyMap), calls)
}
@@ -155,7 +155,7 @@ func TestConcurrentRange(t *testing.T) {
for n := iters; n > 0; n-- {
seen := make(map[int64]bool, mapSize)
- m.Range(func(ki, vi interface{}) bool {
+ m.Range(func(ki, vi any) bool {
k, v := ki.(int64), vi.(int64)
if v%k != 0 {
t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
@@ -201,8 +201,8 @@ func TestMapRangeNestedCall(t *testing.T) { // Issue 46399
for i, v := range [3]string{"hello", "world", "Go"} {
m.Store(i, v)
}
- m.Range(func(key, value interface{}) bool {
- m.Range(func(key, value interface{}) bool {
+ m.Range(func(key, value any) bool {
+ m.Range(func(key, value any) bool {
// We should be able to load the key offered in the Range callback,
// because there are no concurrent Delete involved in this tested map.
if v, ok := m.Load(key); !ok || !reflect.DeepEqual(v, value) {
@@ -236,7 +236,7 @@ func TestMapRangeNestedCall(t *testing.T) { // Issue 46399
// After a Range of Delete, all keys should be removed and any
// further Range won't invoke the callback. Hence length remains 0.
length := 0
- m.Range(func(key, value interface{}) bool {
+ m.Range(func(key, value any) bool {
length++
return true
})
diff --git a/src/sync/pool.go b/src/sync/pool.go
index 9802f29d6f..d1abb6a8b7 100644
--- a/src/sync/pool.go
+++ b/src/sync/pool.go
@@ -53,13 +53,13 @@ type Pool struct {
// New optionally specifies a function to generate
// a value when Get would otherwise return nil.
// It may not be changed concurrently with calls to Get.
- New func() interface{}
+ New func() any
}
// Local per-P Pool appendix.
type poolLocalInternal struct {
- private interface{} // Can be used only by the respective P.
- shared poolChain // Local P can pushHead/popHead; any P can popTail.
+ private any // Can be used only by the respective P.
+ shared poolChain // Local P can pushHead/popHead; any P can popTail.
}
type poolLocal struct {
@@ -80,14 +80,14 @@ var poolRaceHash [128]uint64
// directly, for fear of conflicting with other synchronization on that address.
// Instead, we hash the pointer to get an index into poolRaceHash.
// See discussion on golang.org/cl/31589.
-func poolRaceAddr(x interface{}) unsafe.Pointer {
+func poolRaceAddr(x any) unsafe.Pointer {
ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1])
h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16)
return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))])
}
// Put adds x to the pool.
-func (p *Pool) Put(x interface{}) {
+func (p *Pool) Put(x any) {
if x == nil {
return
}
@@ -121,7 +121,7 @@ func (p *Pool) Put(x interface{}) {
//
// If Get would otherwise return nil and p.New is non-nil, Get returns
// the result of calling p.New.
-func (p *Pool) Get() interface{} {
+func (p *Pool) Get() any {
if race.Enabled {
race.Disable()
}
@@ -150,7 +150,7 @@ func (p *Pool) Get() interface{} {
return x
}
-func (p *Pool) getSlow(pid int) interface{} {
+func (p *Pool) getSlow(pid int) any {
// See the comment in pin regarding ordering of the loads.
size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
locals := p.local // load-consume
diff --git a/src/sync/pool_test.go b/src/sync/pool_test.go
index d991621624..bb20043a54 100644
--- a/src/sync/pool_test.go
+++ b/src/sync/pool_test.go
@@ -64,7 +64,7 @@ func TestPoolNew(t *testing.T) {
i := 0
p := Pool{
- New: func() interface{} {
+ New: func() any {
i++
return i
},
@@ -143,7 +143,7 @@ func TestPoolStress(t *testing.T) {
done := make(chan bool)
for i := 0; i < P; i++ {
go func() {
- var v interface{} = 0
+ var v any = 0
for j := 0; j < N; j++ {
if v == nil {
v = 0
@@ -290,7 +290,7 @@ func BenchmarkPoolStarvation(b *testing.B) {
})
}
-var globalSink interface{}
+var globalSink any
func BenchmarkPoolSTW(b *testing.B) {
// Take control of GC.
@@ -303,7 +303,7 @@ func BenchmarkPoolSTW(b *testing.B) {
for i := 0; i < b.N; i++ {
// Put a large number of items into a pool.
const N = 100000
- var item interface{} = 42
+ var item any = 42
for i := 0; i < N; i++ {
p.Put(item)
}
@@ -338,7 +338,7 @@ func BenchmarkPoolExpensiveNew(b *testing.B) {
// Create a pool that's "expensive" to fill.
var p Pool
var nNew uint64
- p.New = func() interface{} {
+ p.New = func() any {
atomic.AddUint64(&nNew, 1)
time.Sleep(time.Millisecond)
return 42
@@ -348,7 +348,7 @@ func BenchmarkPoolExpensiveNew(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
// Simulate 100X the number of goroutines having items
// checked out from the Pool simultaneously.
- items := make([]interface{}, 100)
+ items := make([]any, 100)
var sink []byte
for pb.Next() {
// Stress the pool.
diff --git a/src/sync/poolqueue.go b/src/sync/poolqueue.go
index 9be83e9a43..631f2c15fd 100644
--- a/src/sync/poolqueue.go
+++ b/src/sync/poolqueue.go
@@ -77,7 +77,7 @@ func (d *poolDequeue) pack(head, tail uint32) uint64 {
// pushHead adds val at the head of the queue. It returns false if the
// queue is full. It must only be called by a single producer.
-func (d *poolDequeue) pushHead(val interface{}) bool {
+func (d *poolDequeue) pushHead(val any) bool {
ptrs := atomic.LoadUint64(&d.headTail)
head, tail := d.unpack(ptrs)
if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
@@ -98,7 +98,7 @@ func (d *poolDequeue) pushHead(val interface{}) bool {
if val == nil {
val = dequeueNil(nil)
}
- *(*interface{})(unsafe.Pointer(slot)) = val
+ *(*any)(unsafe.Pointer(slot)) = val
// Increment head. This passes ownership of slot to popTail
// and acts as a store barrier for writing the slot.
@@ -109,7 +109,7 @@ func (d *poolDequeue) pushHead(val interface{}) bool {
// popHead removes and returns the element at the head of the queue.
// It returns false if the queue is empty. It must only be called by a
// single producer.
-func (d *poolDequeue) popHead() (interface{}, bool) {
+func (d *poolDequeue) popHead() (any, bool) {
var slot *eface
for {
ptrs := atomic.LoadUint64(&d.headTail)
@@ -131,7 +131,7 @@ func (d *poolDequeue) popHead() (interface{}, bool) {
}
}
- val := *(*interface{})(unsafe.Pointer(slot))
+ val := *(*any)(unsafe.Pointer(slot))
if val == dequeueNil(nil) {
val = nil
}
@@ -144,7 +144,7 @@ func (d *poolDequeue) popHead() (interface{}, bool) {
// popTail removes and returns the element at the tail of the queue.
// It returns false if the queue is empty. It may be called by any
// number of consumers.
-func (d *poolDequeue) popTail() (interface{}, bool) {
+func (d *poolDequeue) popTail() (any, bool) {
var slot *eface
for {
ptrs := atomic.LoadUint64(&d.headTail)
@@ -166,7 +166,7 @@ func (d *poolDequeue) popTail() (interface{}, bool) {
}
// We now own slot.
- val := *(*interface{})(unsafe.Pointer(slot))
+ val := *(*any)(unsafe.Pointer(slot))
if val == dequeueNil(nil) {
val = nil
}
@@ -225,7 +225,7 @@ func loadPoolChainElt(pp **poolChainElt) *poolChainElt {
return (*poolChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp))))
}
-func (c *poolChain) pushHead(val interface{}) {
+func (c *poolChain) pushHead(val any) {
d := c.head
if d == nil {
// Initialize the chain.
@@ -255,7 +255,7 @@ func (c *poolChain) pushHead(val interface{}) {
d2.pushHead(val)
}
-func (c *poolChain) popHead() (interface{}, bool) {
+func (c *poolChain) popHead() (any, bool) {
d := c.head
for d != nil {
if val, ok := d.popHead(); ok {
@@ -268,7 +268,7 @@ func (c *poolChain) popHead() (interface{}, bool) {
return nil, false
}
-func (c *poolChain) popTail() (interface{}, bool) {
+func (c *poolChain) popTail() (any, bool) {
d := loadPoolChainElt(&c.tail)
if d == nil {
return nil, false