aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/asm_amd64.s
diff options
context:
space:
mode:
authorKeith Randall <keithr@alum.mit.edu>2019-08-20 11:03:13 -0700
committerKeith Randall <khr@golang.org>2019-08-29 21:16:09 +0000
commitfbfb41e6389089b637562b41e05d40f5581b3bbd (patch)
tree1a87c88aadc8eeb91cfaeaef2a442e9467882638 /src/runtime/asm_amd64.s
parent9675f819288ae27ed4b95521303ec7ceb16686ab (diff)
downloadgo-fbfb41e6389089b637562b41e05d40f5581b3bbd.tar.gz
go-fbfb41e6389089b637562b41e05d40f5581b3bbd.zip
runtime: switch default order of hashing algorithms
Currently the standard hasher is memhash, which checks whether aes instructions are available, and if so redirects to aeshash. With this CL, we call aeshash directly, which then redirects to the fallback hash if aes instructions are not available. This reduces the overhead for the hash function in the common case, as it requires just one call instead of two. On architectures which have no assembly hasher, it's a single jump slower. Thanks to Martin for this idea. name old time/op new time/op delta BigKeyMap-4 22.6ns ± 1% 21.1ns ± 2% -6.55% (p=0.000 n=9+10) Change-Id: Ib7ca77b63d28222eb0189bc3d7130531949d853c Reviewed-on: https://go-review.googlesource.com/c/go/+/190998 Reviewed-by: Martin Möhrmann <moehrmann@google.com>
Diffstat (limited to 'src/runtime/asm_amd64.s')
-rw-r--r--src/runtime/asm_amd64.s36
1 files changed, 27 insertions, 9 deletions
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index fd3a9c3127..ba673432c1 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -885,21 +885,29 @@ done:
MOVQ AX, ret+0(FP)
RET
-// func aeshash(p unsafe.Pointer, h, s uintptr) uintptr
+// func memhash(p unsafe.Pointer, h, s uintptr) uintptr
// hash function using AES hardware instructions
-TEXT runtime·aeshash(SB),NOSPLIT,$0-32
+TEXT runtime·memhash(SB),NOSPLIT,$0-32
+ CMPB runtime·useAeshash(SB), $0
+ JEQ noaes
MOVQ p+0(FP), AX // ptr to data
MOVQ s+16(FP), CX // size
LEAQ ret+24(FP), DX
JMP aeshashbody<>(SB)
+noaes:
+ JMP runtime·memhashFallback(SB)
-// func aeshashstr(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·aeshashstr(SB),NOSPLIT,$0-24
+// func strhash(p unsafe.Pointer, h uintptr) uintptr
+TEXT runtime·strhash(SB),NOSPLIT,$0-24
+ CMPB runtime·useAeshash(SB), $0
+ JEQ noaes
MOVQ p+0(FP), AX // ptr to string struct
MOVQ 8(AX), CX // length of string
MOVQ (AX), AX // string data
LEAQ ret+16(FP), DX
JMP aeshashbody<>(SB)
+noaes:
+ JMP runtime·strhashFallback(SB)
// AX: data
// CX: length
@@ -1232,8 +1240,11 @@ aesloop:
MOVQ X8, (DX)
RET
-// func aeshash32(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·aeshash32(SB),NOSPLIT,$0-24
+// func memhash32(p unsafe.Pointer, h uintptr) uintptr
+TEXT runtime·memhash32(SB),NOSPLIT,$0-24
+ CMPB runtime·useAeshash(SB), $0
+ JEQ noaes
+ JMP runtime·memhash32Fallback(SB)
MOVQ p+0(FP), AX // ptr to data
MOVQ h+8(FP), X0 // seed
PINSRD $2, (AX), X0 // data
@@ -1242,9 +1253,14 @@ TEXT runtime·aeshash32(SB),NOSPLIT,$0-24
AESENC runtime·aeskeysched+32(SB), X0
MOVQ X0, ret+16(FP)
RET
-
-// func aeshash64(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·aeshash64(SB),NOSPLIT,$0-24
+noaes:
+ JMP runtime·memhash32Fallback(SB)
+
+// func memhash64(p unsafe.Pointer, h uintptr) uintptr
+TEXT runtime·memhash64(SB),NOSPLIT,$0-24
+ CMPB runtime·useAeshash(SB), $0
+ JEQ noaes
+ JMP runtime·memhash64Fallback(SB)
MOVQ p+0(FP), AX // ptr to data
MOVQ h+8(FP), X0 // seed
PINSRQ $1, (AX), X0 // data
@@ -1253,6 +1269,8 @@ TEXT runtime·aeshash64(SB),NOSPLIT,$0-24
AESENC runtime·aeskeysched+32(SB), X0
MOVQ X0, ret+16(FP)
RET
+noaes:
+ JMP runtime·memhash64Fallback(SB)
// simple mask to get rid of data in the high part of the register.
DATA masks<>+0x00(SB)/8, $0x0000000000000000