aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/cmd/compile/internal/gc/builtin.go2
-rw-r--r--src/cmd/compile/internal/gc/builtin/runtime.go2
-rw-r--r--src/cmd/compile/internal/gc/walk.go31
-rw-r--r--src/runtime/hashmap_fast.go176
-rw-r--r--test/fixedbugs/issue22781.go29
5 files changed, 232 insertions, 8 deletions
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index f21a4da491..fafed7cae8 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -86,7 +86,9 @@ var runtimeDecls = [...]struct {
{"mapaccess2_fat", funcTag, 67},
{"mapassign", funcTag, 62},
{"mapassign_fast32", funcTag, 63},
+ {"mapassign_fast32ptr", funcTag, 63},
{"mapassign_fast64", funcTag, 63},
+ {"mapassign_fast64ptr", funcTag, 63},
{"mapassign_faststr", funcTag, 63},
{"mapiterinit", funcTag, 68},
{"mapdelete", funcTag, 68},
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
index 7f4846db9d..c3994c5fa2 100644
--- a/src/cmd/compile/internal/gc/builtin/runtime.go
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -106,7 +106,9 @@ func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pre
func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any)
func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any)
func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
func mapdelete(mapType *byte, hmap map[any]any, key *any)
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 76031d160a..ab2e38208d 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -2785,21 +2785,23 @@ func mapfndel(name string, t *types.Type) *Node {
const (
mapslow = iota
mapfast32
+ mapfast32ptr
mapfast64
+ mapfast64ptr
mapfaststr
nmapfast
)
type mapnames [nmapfast]string
-func mkmapnames(base string) mapnames {
- return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"}
+func mkmapnames(base string, ptr string) mapnames {
+ return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
}
-var mapaccess1 mapnames = mkmapnames("mapaccess1")
-var mapaccess2 mapnames = mkmapnames("mapaccess2")
-var mapassign mapnames = mkmapnames("mapassign")
-var mapdelete mapnames = mkmapnames("mapdelete")
+var mapaccess1 mapnames = mkmapnames("mapaccess1", "")
+var mapaccess2 mapnames = mkmapnames("mapaccess2", "")
+var mapassign mapnames = mkmapnames("mapassign", "ptr")
+var mapdelete mapnames = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
// Check ../../runtime/hashmap.go:maxValueSize before changing.
@@ -2808,9 +2810,22 @@ func mapfast(t *types.Type) int {
}
switch algtype(t.Key()) {
case AMEM32:
- return mapfast32
+ if !t.Key().HasPointer() {
+ return mapfast32
+ }
+ if Widthptr == 4 {
+ return mapfast32ptr
+ }
+ Fatalf("small pointer %v", t.Key())
case AMEM64:
- return mapfast64
+ if !t.Key().HasPointer() {
+ return mapfast64
+ }
+ if Widthptr == 8 {
+ return mapfast64ptr
+ }
+ // Two-word object, at least one of which is a pointer.
+ // Use the slow path.
case ASTRING:
return mapfaststr
}
diff --git a/src/runtime/hashmap_fast.go b/src/runtime/hashmap_fast.go
index 67b9787909..c07992b0f3 100644
--- a/src/runtime/hashmap_fast.go
+++ b/src/runtime/hashmap_fast.go
@@ -507,6 +507,94 @@ done:
return val
}
+func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer(&t))
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
+ h.flags |= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & (uintptr(1)<<h.B - 1)
+ if h.growing() {
+ growWork(t, h, bucket)
+ }
+ b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+
+ var inserti *uint8
+ var insertk unsafe.Pointer
+ var val unsafe.Pointer
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == empty && inserti == nil {
+ inserti = &b.tophash[i]
+ insertk = add(unsafe.Pointer(b), dataOffset+i*4)
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
+ }
+ continue
+ }
+ k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
+ if k != key {
+ continue
+ }
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if inserti == nil {
+ // all current buckets are full, allocate a new one.
+ newb := h.newoverflow(t, b)
+ inserti = &newb.tophash[0]
+ insertk = add(unsafe.Pointer(newb), dataOffset)
+ val = add(insertk, bucketCnt*4)
+ }
+
+ // store new key/value at insert position
+ typedmemmove(t.key, insertk, unsafe.Pointer(&key))
+ *inserti = top
+ h.count++
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return val
+}
+
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
@@ -595,6 +683,94 @@ done:
return val
}
+func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer(&t))
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
+ h.flags |= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & (uintptr(1)<<h.B - 1)
+ if h.growing() {
+ growWork(t, h, bucket)
+ }
+ b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+
+ var inserti *uint8
+ var insertk unsafe.Pointer
+ var val unsafe.Pointer
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == empty && inserti == nil {
+ inserti = &b.tophash[i]
+ insertk = add(unsafe.Pointer(b), dataOffset+i*8)
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+ }
+ continue
+ }
+ k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
+ if k != key {
+ continue
+ }
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if inserti == nil {
+ // all current buckets are full, allocate a new one.
+ newb := h.newoverflow(t, b)
+ inserti = &newb.tophash[0]
+ insertk = add(unsafe.Pointer(newb), dataOffset)
+ val = add(insertk, bucketCnt*8)
+ }
+
+ // store new key/value at insert position
+ typedmemmove(t.key, insertk, unsafe.Pointer(&key))
+ *inserti = top
+ h.count++
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return val
+}
+
func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
diff --git a/test/fixedbugs/issue22781.go b/test/fixedbugs/issue22781.go
new file mode 100644
index 0000000000..5ad82398bb
--- /dev/null
+++ b/test/fixedbugs/issue22781.go
@@ -0,0 +1,29 @@
+// run
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "runtime/debug"
+
+type T struct {
+ // >= 16 bytes to avoid tiny alloc.
+ a, b int
+}
+
+func main() {
+ debug.SetGCPercent(1)
+ for i := 0; i < 100000; i++ {
+ m := make(map[*T]struct{}, 0)
+ for j := 0; j < 20; j++ {
+ // During the call to mapassign_fast64, the key argument
+ // was incorrectly treated as a uint64. If the stack was
+ // scanned during that call, the only pointer to k was
+ // missed, leading to *k being collected prematurely.
+ k := new(T)
+ m[k] = struct{}{}
+ }
+ }
+}