aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/map_faststr.go
diff options
context:
space:
mode:
authorKeith Randall <keithr@alum.mit.edu>2019-08-06 15:22:51 -0700
committerKeith Randall <khr@golang.org>2019-09-03 20:41:29 +0000
commit36f30ba289e31df033d100b2adb4eaf557f05a34 (patch)
tree17579106197e4c1d80b67cefdf9d8fdfd2ff2a2c /src/runtime/map_faststr.go
parent671bcb59666c37cb32b154c36aa91b29fdbf0835 (diff)
downloadgo-36f30ba289e31df033d100b2adb4eaf557f05a34.tar.gz
go-36f30ba289e31df033d100b2adb4eaf557f05a34.zip
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they are used as map keys. That's a lot of wasted effort and binary size for types which will never be used as a map key. Instead, generate hash functions only for types that we know are map keys. Just doing that is a bit too simple, since maps with an interface type as a key might have to hash any concrete key type that implements that interface. So for that case, implement hashing of such types at runtime (instead of with generated code). It will be slower, but only for maps with interface types as keys, and maybe only a bit slower as the aeshash time probably dominates the dispatch time. Reorg where we keep the equals and hash functions. Move the hash function from the key type to the map type, saving a field in every non-map type. That leaves only one function in the alg structure, so get rid of that and just keep the equal function in the type descriptor itself. cmd/go now has 10 generated hash functions, instead of 504. Makes cmd/go 1.0% smaller. Update #6853. Speed on non-interface keys is unchanged. Speed on interface keys is ~20% slower: name old time/op new time/op delta MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10) MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8) Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263 Reviewed-on: https://go-review.googlesource.com/c/go/+/191198 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Martin Möhrmann <moehrmann@google.com>
Diffstat (limited to 'src/runtime/map_faststr.go')
-rw-r--r--src/runtime/map_faststr.go14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index 069994f1b7..069cda6554 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -76,7 +76,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return unsafe.Pointer(&zeroVal[0])
}
dohash:
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -171,7 +171,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return unsafe.Pointer(&zeroVal[0]), false
}
dohash:
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -211,9 +211,9 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
throw("concurrent map writes")
}
key := stringStructOf(&s)
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -307,9 +307,9 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
}
key := stringStructOf(&ky)
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
@@ -429,7 +429,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.alg.hash(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}