aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/alg.go
diff options
context:
space:
mode:
authorKeith Randall <keithr@alum.mit.edu>2019-08-06 15:22:51 -0700
committerKeith Randall <khr@golang.org>2019-09-03 20:41:29 +0000
commit36f30ba289e31df033d100b2adb4eaf557f05a34 (patch)
tree17579106197e4c1d80b67cefdf9d8fdfd2ff2a2c /src/runtime/alg.go
parent671bcb59666c37cb32b154c36aa91b29fdbf0835 (diff)
downloadgo-36f30ba289e31df033d100b2adb4eaf557f05a34.tar.gz
go-36f30ba289e31df033d100b2adb4eaf557f05a34.zip
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they are used as map keys. That's a lot of wasted effort and binary size for types which will never be used as a map key. Instead, generate hash functions only for types that we know are map keys. Just doing that is a bit too simple, since maps with an interface type as a key might have to hash any concrete key type that implements that interface. So for that case, implement hashing of such types at runtime (instead of with generated code). It will be slower, but only for maps with interface types as keys, and maybe only a bit slower as the aeshash time probably dominates the dispatch time. Reorg where we keep the equals and hash functions. Move the hash function from the key type to the map type, saving a field in every non-map type. That leaves only one function in the alg structure, so get rid of that and just keep the equal function in the type descriptor itself. cmd/go now has 10 generated hash functions, instead of 504. Makes cmd/go 1.0% smaller. Update #6853. Speed on non-interface keys is unchanged. Speed on interface keys is ~20% slower: name old time/op new time/op delta MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10) MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8) Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263 Reviewed-on: https://go-review.googlesource.com/c/go/+/191198 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Martin Möhrmann <moehrmann@google.com>
Diffstat (limited to 'src/runtime/alg.go')
-rw-r--r--src/runtime/alg.go121
1 files changed, 78 insertions, 43 deletions
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index 57306f81d9..935d45d503 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -34,17 +34,6 @@ const (
alg_max
)
-// typeAlg is also copied/used in reflect/type.go.
-// keep them in sync.
-type typeAlg struct {
- // function for hashing objects of this type
- // (ptr to object, seed) -> hash
- hash func(unsafe.Pointer, uintptr) uintptr
- // function for comparing objects of this type
- // (ptr to object A, ptr to object B) -> ==?
- equal func(unsafe.Pointer, unsafe.Pointer) bool
-}
-
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
return h
}
@@ -68,23 +57,9 @@ func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, size)
}
-var algarray = [alg_max]typeAlg{
- alg_NOEQ: {nil, nil},
- alg_MEM0: {memhash0, memequal0},
- alg_MEM8: {memhash8, memequal8},
- alg_MEM16: {memhash16, memequal16},
- alg_MEM32: {memhash32, memequal32},
- alg_MEM64: {memhash64, memequal64},
- alg_MEM128: {memhash128, memequal128},
- alg_STRING: {strhash, strequal},
- alg_INTER: {interhash, interequal},
- alg_NILINTER: {nilinterhash, nilinterequal},
- alg_FLOAT32: {f32hash, f32equal},
- alg_FLOAT64: {f64hash, f64equal},
- alg_CPLX64: {c64hash, c64equal},
- alg_CPLX128: {c128hash, c128equal},
-}
-
+// runtime variable to check if the processor we're running on
+// actually supports the instructions used by the AES-based
+// hash implementation.
var useAeshash bool
// in asm_*.s
@@ -144,14 +119,17 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr {
return h
}
t := tab._type
- fn := t.alg.hash
- if fn == nil {
+ if t.equal == nil {
+ // Check hashability here. We could do this check inside
+ // typehash, but we want to report the topmost type in
+ // the error text (e.g. in a struct with a field of slice type
+ // we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0)
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0)
+ return c1 * typehash(t, a.data, h^c0)
}
}
@@ -161,15 +139,72 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
if t == nil {
return h
}
- fn := t.alg.hash
- if fn == nil {
+ if t.equal == nil {
+ // See comment in interhash above.
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0)
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0)
+ return c1 * typehash(t, a.data, h^c0)
+ }
+}
+
+// typehash computes the hash of the object of type t at address p.
+// h is the seed.
+// This function is seldom used. Most maps use for hashing either
+// fixed functions (e.g. f32hash) or compiler-generated functions
+// (e.g. for a type like struct { x, y string }). This implementation
+// is slower but more general and is used for hashing interface types
+// (called from interhash or nilinterhash, above) or for hashing in
+// maps generated by reflect.MapOf (reflect_typehash, below).
+func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ if t.tflag&tflagRegularMemory != 0 {
+ return memhash(p, h, t.size)
}
+ switch t.kind & kindMask {
+ case kindFloat32:
+ return f32hash(p, h)
+ case kindFloat64:
+ return f64hash(p, h)
+ case kindComplex64:
+ return c64hash(p, h)
+ case kindComplex128:
+ return c128hash(p, h)
+ case kindString:
+ return strhash(p, h)
+ case kindInterface:
+ i := (*interfacetype)(unsafe.Pointer(t))
+ if len(i.mhdr) == 0 {
+ return nilinterhash(p, h)
+ }
+ return interhash(p, h)
+ case kindArray:
+ a := (*arraytype)(unsafe.Pointer(t))
+ for i := uintptr(0); i < a.len; i++ {
+ h = typehash(a.elem, add(p, i*a.elem.size), h)
+ }
+ return h
+ case kindStruct:
+ s := (*structtype)(unsafe.Pointer(t))
+ for _, f := range s.fields {
+ // TODO: maybe we could hash several contiguous fields all at once.
+ if f.name.isBlank() {
+ continue
+ }
+ h = typehash(f.typ, add(p, f.offset()), h)
+ }
+ return h
+ default:
+ // Should never happen, as typehash should only be called
+ // with comparable types.
+ panic(errorString("hash of unhashable type " + t.string()))
+ }
+}
+
+//go:linkname reflect_typehash reflect.typehash
+func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ return typehash(t, p, h)
}
func memequal0(p, q unsafe.Pointer) bool {
@@ -219,7 +254,7 @@ func efaceeq(t *_type, x, y unsafe.Pointer) bool {
if t == nil {
return true
}
- eq := t.alg.equal
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
@@ -236,7 +271,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
return true
}
t := tab._type
- eq := t.alg.equal
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
@@ -249,7 +284,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
// Testing adapters for hash quality tests (see hash_test.go)
func stringHash(s string, seed uintptr) uintptr {
- return algarray[alg_STRING].hash(noescape(unsafe.Pointer(&s)), seed)
+ return strhash(noescape(unsafe.Pointer(&s)), seed)
}
func bytesHash(b []byte, seed uintptr) uintptr {
@@ -258,21 +293,21 @@ func bytesHash(b []byte, seed uintptr) uintptr {
}
func int32Hash(i uint32, seed uintptr) uintptr {
- return algarray[alg_MEM32].hash(noescape(unsafe.Pointer(&i)), seed)
+ return memhash32(noescape(unsafe.Pointer(&i)), seed)
}
func int64Hash(i uint64, seed uintptr) uintptr {
- return algarray[alg_MEM64].hash(noescape(unsafe.Pointer(&i)), seed)
+ return memhash64(noescape(unsafe.Pointer(&i)), seed)
}
func efaceHash(i interface{}, seed uintptr) uintptr {
- return algarray[alg_NILINTER].hash(noescape(unsafe.Pointer(&i)), seed)
+ return nilinterhash(noescape(unsafe.Pointer(&i)), seed)
}
func ifaceHash(i interface {
F()
}, seed uintptr) uintptr {
- return algarray[alg_INTER].hash(noescape(unsafe.Pointer(&i)), seed)
+ return interhash(noescape(unsafe.Pointer(&i)), seed)
}
const hashRandomBytes = sys.PtrSize / 4 * 64