aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/map.go
diff options
context:
space:
mode:
authorKeith Randall <khr@google.com>2018-12-28 14:34:48 -0800
committerKeith Randall <khr@golang.org>2018-12-29 01:00:54 +0000
commited15e82413c7b16e21a493f5a647f68b46e965ee (patch)
tree34dd85b90385fef2ec09b6082b372b560c3b7e4a /src/runtime/map.go
parent14bdcc76fd9aa3edda64ef07a526fbeeed8b4326 (diff)
downloadgo-ed15e82413c7b16e21a493f5a647f68b46e965ee.tar.gz
go-ed15e82413c7b16e21a493f5a647f68b46e965ee.zip
runtime: panic on uncomparable map key, even if map is empty
Reorg map flags a bit so we don't need any extra space for the extra flag. Fixes #23734 Change-Id: I436812156240ae90de53d0943fe1aabf3ea37417 Reviewed-on: https://go-review.googlesource.com/c/155918 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
Diffstat (limited to 'src/runtime/map.go')
-rw-r--r--src/runtime/map.go61
1 files changed, 35 insertions, 26 deletions
diff --git a/src/runtime/map.go b/src/runtime/map.go
index d835cc831a..9c25b63348 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -404,6 +404,9 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ t.key.alg.hash(key, 0) // see issue 23734
+ }
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
@@ -434,12 +437,12 @@ bucketloop:
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
+ if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
return v
@@ -460,6 +463,9 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ t.key.alg.hash(key, 0) // see issue 23734
+ }
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
@@ -490,12 +496,12 @@ bucketloop:
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
+ if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
return v, true
@@ -535,12 +541,12 @@ bucketloop:
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
+ if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
return k, v
@@ -620,14 +626,14 @@ bucketloop:
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if !alg.equal(key, k) {
continue
}
// already have a mapping for key. Update it.
- if t.needkeyupdate {
+ if t.needkeyupdate() {
typedmemmove(t.key, k, key)
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
@@ -658,12 +664,12 @@ bucketloop:
}
// store new key/value at insert position
- if t.indirectkey {
+ if t.indirectkey() {
kmem := newobject(t.key)
*(*unsafe.Pointer)(insertk) = kmem
insertk = kmem
}
- if t.indirectvalue {
+ if t.indirectvalue() {
vmem := newobject(t.elem)
*(*unsafe.Pointer)(val) = vmem
}
@@ -676,7 +682,7 @@ done:
throw("concurrent map writes")
}
h.flags &^= hashWriting
- if t.indirectvalue {
+ if t.indirectvalue() {
val = *((*unsafe.Pointer)(val))
}
return val
@@ -693,6 +699,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ t.key.alg.hash(key, 0) // see issue 23734
+ }
return
}
if h.flags&hashWriting != 0 {
@@ -724,20 +733,20 @@ search:
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k
- if t.indirectkey {
+ if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2) {
continue
}
// Only clear key if there are pointers in it.
- if t.indirectkey {
+ if t.indirectkey() {
*(*unsafe.Pointer)(k) = nil
} else if t.key.kind&kindNoPointers == 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
+ if t.indirectvalue() {
*(*unsafe.Pointer)(v) = nil
} else if t.elem.kind&kindNoPointers == 0 {
memclrHasPointers(v, t.elem.size)
@@ -897,7 +906,7 @@ next:
continue
}
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
@@ -909,7 +918,7 @@ next:
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
- if t.reflexivekey || alg.equal(k, k) {
+ if t.reflexivekey() || alg.equal(k, k) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
hash := alg.hash(k, uintptr(h.hash0))
@@ -930,13 +939,13 @@ next:
}
}
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.reflexivekey || alg.equal(k, k)) {
+ !(t.reflexivekey() || alg.equal(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
// That's lucky for us because when key!=key we can't look it up successfully.
it.key = k
- if t.indirectvalue {
+ if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
it.value = v
@@ -1160,7 +1169,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
throw("bad map state")
}
k2 := k
- if t.indirectkey {
+ if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
var useY uint8
@@ -1168,7 +1177,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
hash := t.key.alg.hash(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
+ if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
@@ -1203,12 +1212,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
- if t.indirectkey {
+ if t.indirectkey() {
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
} else {
typedmemmove(t.key, dst.k, k) // copy value
}
- if t.indirectvalue {
+ if t.indirectvalue() {
*(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
} else {
typedmemmove(t.elem, dst.v, v)
@@ -1274,12 +1283,12 @@ func reflect_makemap(t *maptype, cap int) *hmap {
if !ismapkey(t.key) {
throw("runtime.reflect_makemap: unsupported map key type")
}
- if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
- t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
+ if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
+ t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
throw("key size wrong")
}
- if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
- t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
+ if t.elem.size > maxValueSize && (!t.indirectvalue() || t.valuesize != uint8(sys.PtrSize)) ||
+ t.elem.size <= maxValueSize && (t.indirectvalue() || t.valuesize != uint8(t.elem.size)) {
throw("value size wrong")
}
if t.key.align > bucketCnt {