aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorfanzha02 <fannie.zhang@arm.com>2020-10-21 18:51:42 +0800
committerfannie zhang <Fannie.Zhang@arm.com>2021-09-10 02:44:36 +0000
commit2091bd3f26e5143bd050833b3558893e1bc34625 (patch)
treee38140e685e10ec2e3e0ad8f24a363981f37562f /test
parentb32209d22d0418594bd60af152b0f2c90c677941 (diff)
downloadgo-2091bd3f26e5143bd050833b3558893e1bc34625.tar.gz
go-2091bd3f26e5143bd050833b3558893e1bc34625.zip
cmd/compile: simiplify arm64 bitfield optimizations
In some rewrite rules for arm64 bitfield optimizations, the bitfield lsb value and the bitfield width value are related to datasize, some of them use datasize directly to check the bitfield lsb value is valid, to get the bitfiled width value, but some of them call isARM64BFMask() and arm64BFWidth() functions. In order to be consistent, this patch changes them all to use datasize. Besides, this patch sorts the codegen test cases. Run the "toolstash-check -all" command and find one inconsistent code is as the following. new: src/math/fma.go:104 BEQ 247 master: src/math/fma.go:104 BEQ 248 The above inconsistence is due to this patch changing the range of the field lsb value in "UBFIZ" optimization rules from "lc+(32|16|8)<64" to "lc<64", so that the following code is generated as "UBFIZ". The logical of changed code is still correct. The code of src/math/fma.go:160: const uvinf = 0x7FF0000000000000 func FMA(a, b uint32) float64 { ps := a+b return Float64frombits(uint64(ps)<<63 | uvinf) } The new assembly code: TEXT "".FMA(SB), LEAF|NOFRAME|ABIInternal, $0-16 MOVWU "".a(FP), R0 MOVWU "".b+4(FP), R1 ADD R1, R0, R0 UBFIZ $63, R0, $1, R0 ORR $9218868437227405312, R0, R0 MOVD R0, "".~r2+8(FP) RET (R30) The master assembly code: TEXT "".FMA(SB), LEAF|NOFRAME|ABIInternal, $0-16 MOVWU "".a(FP), R0 MOVWU "".b+4(FP), R1 ADD R1, R0, R0 MOVWU R0, R0 LSL $63, R0, R0 ORR $9218868437227405312, R0, R0 MOVD R0, "".~r2+8(FP) RET (R30) Change-Id: I9061104adfdfd3384d0525327ae1e5c8b0df5c35 Reviewed-on: https://go-review.googlesource.com/c/go/+/265038 Trust: fannie zhang <Fannie.Zhang@arm.com> Run-TryBot: fannie zhang <Fannie.Zhang@arm.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'test')
-rw-r--r--test/codegen/bitfield.go144
1 files changed, 100 insertions, 44 deletions
diff --git a/test/codegen/bitfield.go b/test/codegen/bitfield.go
index 3ed9cfe603..8327da6cf8 100644
--- a/test/codegen/bitfield.go
+++ b/test/codegen/bitfield.go
@@ -77,11 +77,13 @@ func bfxil2(x, y uint64) uint64 {
}
// sbfiz
+// merge shifts into sbfiz: (x << lc) >> rc && lc > rc.
func sbfiz1(x int64) int64 {
// arm64:"SBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR"
return (x << 4) >> 3
}
+// merge shift and sign-extension into sbfiz.
func sbfiz2(x int32) int64 {
return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]29",-"LSL"
}
@@ -94,6 +96,8 @@ func sbfiz4(x int8) int64 {
return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]5",-"LSL"
}
+// sbfiz combinations.
+// merge shift with sbfiz into sbfiz.
func sbfiz5(x int32) int32 {
// arm64:"SBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR"
return (x << 4) >> 3
@@ -112,6 +116,7 @@ func sbfiz8(x int32) int64 {
}
// sbfx
+// merge shifts into sbfx: (x << lc) >> rc && lc <= rc.
func sbfx1(x int64) int64 {
return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR"
}
@@ -120,6 +125,7 @@ func sbfx2(x int64) int64 {
return (x << 60) >> 60 // arm64:"SBFX\tZR, R[0-9]+, [$]4",-"LSL",-"ASR"
}
+// merge shift and sign-extension into sbfx.
func sbfx3(x int32) int64 {
return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]29",-"ASR"
}
@@ -132,131 +138,181 @@ func sbfx5(x int8) int64 {
return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]5",-"ASR"
}
-func sbfx6(x int32) int32 {
+func sbfx6(x int32) int64 {
+ return int64(x >> 30) // arm64:"SBFX\t[$]30, R[0-9]+, [$]2"
+}
+
+func sbfx7(x int16) int64 {
+ return int64(x >> 10) // arm64:"SBFX\t[$]10, R[0-9]+, [$]6"
+}
+
+func sbfx8(x int8) int64 {
+ return int64(x >> 5) // arm64:"SBFX\t[$]5, R[0-9]+, [$]3"
+}
+
+// sbfx combinations.
+// merge shifts with sbfiz into sbfx.
+func sbfx9(x int32) int32 {
return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR"
}
// merge sbfx and sign-extension into sbfx.
-func sbfx7(x int32) int64 {
+func sbfx10(x int32) int64 {
c := x + 5
return int64(c >> 20) // arm64"SBFX\t[$]20, R[0-9]+, [$]12",-"MOVW\tR[0-9]+, R[0-9]+"
}
// ubfiz
+// merge shifts into ubfiz: (x<<lc)>>rc && lc>rc
func ubfiz1(x uint64) uint64 {
- // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND"
- // s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND"
- return (x & 0xfff) << 3
-}
-
-func ubfiz2(x uint64) uint64 {
- // arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND"
- // s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND"
- return (x << 4) & 0xfff0
+ // arm64:"UBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"LSR"
+ // s390x:"RISBGZ\t[$]3, [$]62, [$]1, ",-"SLD",-"SRD"
+ return (x << 4) >> 3
}
-func ubfiz3(x uint32) uint64 {
+// merge shift and zero-extension into ubfiz.
+func ubfiz2(x uint32) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]32",-"LSL"
}
-func ubfiz4(x uint16) uint64 {
+func ubfiz3(x uint16) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]16",-"LSL"
}
-func ubfiz5(x uint8) uint64 {
+func ubfiz4(x uint8) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]8",-"LSL"
}
-func ubfiz6(x uint64) uint64 {
- // arm64:"UBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"LSR"
- // s390x:"RISBGZ\t[$]3, [$]62, [$]1, ",-"SLD",-"SRD"
- return (x << 4) >> 3
+func ubfiz5(x uint8) uint64 {
+ return uint64(x) << 60 // arm64:"UBFIZ\t[$]60, R[0-9]+, [$]4",-"LSL"
+}
+
+func ubfiz6(x uint32) uint64 {
+ return uint64(x << 30) // arm64:"UBFIZ\t[$]30, R[0-9]+, [$]2",
+}
+
+func ubfiz7(x uint16) uint64 {
+ return uint64(x << 10) // arm64:"UBFIZ\t[$]10, R[0-9]+, [$]6",
+}
+
+func ubfiz8(x uint8) uint64 {
+ return uint64(x << 7) // arm64:"UBFIZ\t[$]7, R[0-9]+, [$]1",
+}
+
+// merge ANDconst into ubfiz.
+func ubfiz9(x uint64) uint64 {
+ // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND"
+ // s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND"
+ return (x & 0xfff) << 3
+}
+
+func ubfiz10(x uint64) uint64 {
+ // arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND"
+ // s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND"
+ return (x << 4) & 0xfff0
}
-func ubfiz7(x uint32) uint32 {
+// ubfiz combinations
+func ubfiz11(x uint32) uint32 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"LSR"
return (x << 4) >> 3
}
-func ubfiz8(x uint64) uint64 {
+func ubfiz12(x uint64) uint64 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]20",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]43, [$]62, [$]1, ",-"SLD",-"SRD",-"AND"
return ((x & 0xfffff) << 4) >> 3
}
-func ubfiz9(x uint64) uint64 {
+func ubfiz13(x uint64) uint64 {
// arm64:"UBFIZ\t[$]5, R[0-9]+, [$]13",-"LSL",-"LSR",-"AND"
return ((x << 3) & 0xffff) << 2
}
-func ubfiz10(x uint64) uint64 {
+func ubfiz14(x uint64) uint64 {
// arm64:"UBFIZ\t[$]7, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]45, [$]56, [$]7, ",-"SLD",-"SRD",-"AND"
return ((x << 5) & (0xfff << 5)) << 2
}
// ubfx
+// merge shifts into ubfx: (x<<lc)>>rc && lc<rc
func ubfx1(x uint64) uint64 {
- // arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND"
- // s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND"
- return (x >> 25) & 1023
-}
-
-func ubfx2(x uint64) uint64 {
- // arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND"
- // s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND"
- return (x & 0x0ff0) >> 4
+ // arm64:"UBFX\t[$]1, R[0-9]+, [$]62",-"LSL",-"LSR"
+ // s390x:"RISBGZ\t[$]2, [$]63, [$]63,",-"SLD",-"SRD"
+ return (x << 1) >> 2
}
-func ubfx3(x uint32) uint64 {
+// merge shift and zero-extension into ubfx.
+func ubfx2(x uint32) uint64 {
return uint64(x >> 15) // arm64:"UBFX\t[$]15, R[0-9]+, [$]17",-"LSR"
}
-func ubfx4(x uint16) uint64 {
+func ubfx3(x uint16) uint64 {
return uint64(x >> 9) // arm64:"UBFX\t[$]9, R[0-9]+, [$]7",-"LSR"
}
-func ubfx5(x uint8) uint64 {
+func ubfx4(x uint8) uint64 {
return uint64(x >> 3) // arm64:"UBFX\t[$]3, R[0-9]+, [$]5",-"LSR"
}
-func ubfx6(x uint64) uint64 {
- // arm64:"UBFX\t[$]1, R[0-9]+, [$]62",-"LSL",-"LSR"
- // s390x:"RISBGZ\t[$]2, [$]63, [$]63,",-"SLD",-"SRD"
- return (x << 1) >> 2
+func ubfx5(x uint32) uint64 {
+ return uint64(x) >> 30 // arm64:"UBFX\t[$]30, R[0-9]+, [$]2"
+}
+
+func ubfx6(x uint16) uint64 {
+ return uint64(x) >> 10 // arm64:"UBFX\t[$]10, R[0-9]+, [$]6"
+}
+
+func ubfx7(x uint8) uint64 {
+ return uint64(x) >> 3 // arm64:"UBFX\t[$]3, R[0-9]+, [$]5"
+}
+
+// merge ANDconst into ubfx.
+func ubfx8(x uint64) uint64 {
+ // arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND"
+ // s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND"
+ return (x >> 25) & 1023
}
-func ubfx7(x uint32) uint32 {
+func ubfx9(x uint64) uint64 {
+ // arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND"
+ // s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND"
+ return (x & 0x0ff0) >> 4
+}
+
+// ubfx combinations.
+func ubfx10(x uint32) uint32 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]30",-"LSL",-"LSR"
return (x << 1) >> 2
}
-func ubfx8(x uint64) uint64 {
+func ubfx11(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]52, [$]63, [$]63,",-"SLD",-"SRD",-"AND"
return ((x << 1) >> 2) & 0xfff
}
-func ubfx9(x uint64) uint64 {
+func ubfx12(x uint64) uint64 {
// arm64:"UBFX\t[$]4, R[0-9]+, [$]11",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]53, [$]63, [$]60, ",-"SLD",-"SRD",-"AND"
return ((x >> 3) & 0xfff) >> 1
}
-func ubfx10(x uint64) uint64 {
+func ubfx13(x uint64) uint64 {
// arm64:"UBFX\t[$]5, R[0-9]+, [$]56",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]8, [$]63, [$]59, ",-"SLD",-"SRD"
return ((x >> 2) << 5) >> 8
}
-func ubfx11(x uint64) uint64 {
+func ubfx14(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]19",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]45, [$]63, [$]63, ",-"SLD",-"SRD",-"AND"
return ((x & 0xfffff) << 3) >> 4
}
// merge ubfx and zero-extension into ubfx.
-func ubfx12(x uint64) bool {
+func ubfx15(x uint64) bool {
midr := x + 10
part_num := uint16((midr >> 4) & 0xfff)
if part_num == 0xd0c { // arm64:"UBFX\t[$]4, R[0-9]+, [$]12",-"MOVHU\tR[0-9]+, R[0-9]+"