aboutsummaryrefslogtreecommitdiff
path: root/src/hash
diff options
context:
space:
mode:
authorRadu Berinde <radu@cockroachlabs.com>2016-08-16 08:05:39 -0400
committerKeith Randall <khr@golang.org>2016-08-17 21:20:50 +0000
commit0c819b654f0e2b24f418aeac5c5627516905eda9 (patch)
treefffdf67f74c158bc7f8fb292dab818231cc99af3 /src/hash
parent3d5cf72ca9beaedc5dcc8b094945de95fa35a670 (diff)
downloadgo-0c819b654f0e2b24f418aeac5c5627516905eda9.tar.gz
go-0c819b654f0e2b24f418aeac5c5627516905eda9.zip
hash/crc32: improve the processing of the last bytes in the SSE4.2 code for AMD64
This commit improves the processing of the final few bytes in castagnoliSSE42: instead of processing one byte at a time, we use all versions of the CRC32 instruction to process 4 bytes, then 2, then 1. The difference is only noticeable for small "odd" sized buffers. We do the similar improvement for processing the first few bytes in the case of unaligned buffer. Fixing the test which was not actually verifying the results for misaligned buffers (WriteString was creating an internal copy which was aligned). Adding benchmarks for length 15 (aligned and misaligned), results below. name old time/op new time/op delta CastagnoliCrc15B-4 25.1ns ± 0% 22.1ns ± 1% -12.14% CastagnoliCrc15BMisaligned-4 25.2ns ± 0% 22.9ns ± 1% -9.03% CastagnoliCrc40B-4 23.1ns ± 0% 23.4ns ± 0% +1.08% CastagnoliCrc1KB-4 127ns ± 0% 128ns ± 0% +1.18% CastagnoliCrc4KB-4 462ns ± 0% 464ns ± 0% ~ CastagnoliCrc32KB-4 3.58µs ± 0% 3.60µs ± 0% +0.58% name old speed new speed delta CastagnoliCrc15B-4 597MB/s ± 0% 679MB/s ± 1% +13.77% CastagnoliCrc15BMisaligned-4 596MB/s ± 0% 655MB/s ± 1% +9.94% CastagnoliCrc40B-4 1.73GB/s ± 0% 1.71GB/s ± 0% -1.14% CastagnoliCrc1KB-4 8.01GB/s ± 0% 7.93GB/s ± 1% -1.06% CastagnoliCrc4KB-4 8.86GB/s ± 0% 8.83GB/s ± 0% ~ CastagnoliCrc32KB-4 9.14GB/s ± 0% 9.09GB/s ± 0% -0.58% Change-Id: I499e37af2241d28e3e5d522bbab836c1a718430a Reviewed-on: https://go-review.googlesource.com/24470 Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/hash')
-rw-r--r--src/hash/crc32/crc32_amd64.s67
-rw-r--r--src/hash/crc32/crc32_test.go52
2 files changed, 85 insertions, 34 deletions
diff --git a/src/hash/crc32/crc32_amd64.s b/src/hash/crc32/crc32_amd64.s
index caacfae21d..a775a194df 100644
--- a/src/hash/crc32/crc32_amd64.s
+++ b/src/hash/crc32/crc32_amd64.s
@@ -12,40 +12,79 @@ TEXT ·castagnoliSSE42(SB),NOSPLIT,$0
NOTL AX
- /* If there's less than 8 bytes to process, we do it byte-by-byte. */
+ // If there are fewer than 8 bytes to process, skip alignment.
CMPQ CX, $8
- JL cleanup
+ JL less_than_8
- /* Process individual bytes until the input is 8-byte aligned. */
-startup:
MOVQ SI, BX
ANDQ $7, BX
JZ aligned
+ // Process the first few bytes to 8-byte align the input.
+
+ // BX = 8 - BX. We need to process this many bytes to align.
+ SUBQ $1, BX
+ XORQ $7, BX
+
+ BTQ $0, BX
+ JNC align_2
+
CRC32B (SI), AX
DECQ CX
INCQ SI
- JMP startup
+
+align_2:
+ BTQ $1, BX
+ JNC align_4
+
+ // CRC32W (SI), AX
+ BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
+
+ SUBQ $2, CX
+ ADDQ $2, SI
+
+align_4:
+ BTQ $2, BX
+ JNC aligned
+
+ // CRC32L (SI), AX
+ BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
+
+ SUBQ $4, CX
+ ADDQ $4, SI
aligned:
- /* The input is now 8-byte aligned and we can process 8-byte chunks. */
+ // The input is now 8-byte aligned and we can process 8-byte chunks.
CMPQ CX, $8
- JL cleanup
+ JL less_than_8
CRC32Q (SI), AX
ADDQ $8, SI
SUBQ $8, CX
JMP aligned
-cleanup:
- /* We may have some bytes left over that we process one at a time. */
- CMPQ CX, $0
- JE done
+less_than_8:
+ // We may have some bytes left over; process 4 bytes, then 2, then 1.
+ BTQ $2, CX
+ JNC less_than_4
+
+ // CRC32L (SI), AX
+ BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
+ ADDQ $4, SI
+
+less_than_4:
+ BTQ $1, CX
+ JNC less_than_2
+
+ // CRC32W (SI), AX
+ BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
+ ADDQ $2, SI
+
+less_than_2:
+ BTQ $0, CX
+ JNC done
CRC32B (SI), AX
- INCQ SI
- DECQ CX
- JMP cleanup
done:
NOTL AX
diff --git a/src/hash/crc32/crc32_test.go b/src/hash/crc32/crc32_test.go
index e2b3557828..067c42adf0 100644
--- a/src/hash/crc32/crc32_test.go
+++ b/src/hash/crc32/crc32_test.go
@@ -67,56 +67,68 @@ func TestGolden(t *testing.T) {
t.Errorf("Castagnoli(%s) = 0x%x want 0x%x", g.in, s, g.castagnoli)
}
- if len(g.in) > 0 {
- // The SSE4.2 implementation of this has code to deal
- // with misaligned data so we ensure that we test that
- // too.
- castagnoli = New(castagnoliTab)
- io.WriteString(castagnoli, g.in[:1])
- io.WriteString(castagnoli, g.in[1:])
- s = castagnoli.Sum32()
- if s != g.castagnoli {
- t.Errorf("Castagnoli[misaligned](%s) = 0x%x want 0x%x", g.in, s, g.castagnoli)
+ // The SSE4.2 implementation of this has code to deal
+ // with misaligned data so we ensure that we test that
+ // too.
+ for delta := 1; delta <= 7; delta++ {
+ if len(g.in) > delta {
+ in := []byte(g.in)
+ castagnoli = New(castagnoliTab)
+ castagnoli.Write(in[:delta])
+ castagnoli.Write(in[delta:])
+ s = castagnoli.Sum32()
+ if s != g.castagnoli {
+ t.Errorf("Castagnoli[misaligned](%s) = 0x%x want 0x%x", g.in, s, g.castagnoli)
+ }
}
}
}
}
func BenchmarkIEEECrc40B(b *testing.B) {
- benchmark(b, NewIEEE(), 40)
+ benchmark(b, NewIEEE(), 40, 0)
}
func BenchmarkIEEECrc1KB(b *testing.B) {
- benchmark(b, NewIEEE(), 1<<10)
+ benchmark(b, NewIEEE(), 1<<10, 0)
}
func BenchmarkIEEECrc4KB(b *testing.B) {
- benchmark(b, NewIEEE(), 4<<10)
+ benchmark(b, NewIEEE(), 4<<10, 0)
}
func BenchmarkIEEECrc32KB(b *testing.B) {
- benchmark(b, NewIEEE(), 32<<10)
+ benchmark(b, NewIEEE(), 32<<10, 0)
+}
+
+func BenchmarkCastagnoliCrc15B(b *testing.B) {
+ benchmark(b, New(MakeTable(Castagnoli)), 15, 0)
+}
+
+func BenchmarkCastagnoliCrc15BMisaligned(b *testing.B) {
+ benchmark(b, New(MakeTable(Castagnoli)), 15, 1)
}
func BenchmarkCastagnoliCrc40B(b *testing.B) {
- benchmark(b, New(MakeTable(Castagnoli)), 40)
+ benchmark(b, New(MakeTable(Castagnoli)), 40, 0)
}
func BenchmarkCastagnoliCrc1KB(b *testing.B) {
- benchmark(b, New(MakeTable(Castagnoli)), 1<<10)
+ benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 0)
}
func BenchmarkCastagnoliCrc4KB(b *testing.B) {
- benchmark(b, New(MakeTable(Castagnoli)), 4<<10)
+ benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 0)
}
func BenchmarkCastagnoliCrc32KB(b *testing.B) {
- benchmark(b, New(MakeTable(Castagnoli)), 32<<10)
+ benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 0)
}
-func benchmark(b *testing.B, h hash.Hash32, n int64) {
+func benchmark(b *testing.B, h hash.Hash32, n, alignment int64) {
b.SetBytes(n)
- data := make([]byte, n)
+ data := make([]byte, n+alignment)
+ data = data[alignment:]
for i := range data {
data[i] = byte(i)
}