aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2022-12-09 19:49:53 -0800
committerDavid Chase <drchase@google.com>2022-12-19 21:45:00 +0000
commitda744b195587474281adc8f5678e61370de19fca (patch)
tree6d02f365dd54414d32eaa8566499b64f896ea064
parent7683c71ae1cfe016cc283d630460395a61174ad1 (diff)
downloadgo-da744b195587474281adc8f5678e61370de19fca.tar.gz
go-da744b195587474281adc8f5678e61370de19fca.zip
[release-branch.go1.19] cmd/compile: fix conditional move rule on PPC64
Similar to CL 456556 but for ppc64 instead of arm64. Change docs about how booleans are stored in registers for ppc64. We now don't promise to keep the upper bits zeroed; they might be junk. To test, I changed the boolean generation instructions (MOVBZload* and ISEL* with boolean type) to OR in 0x100 to the result. all.bash still passed, so I think nothing else is depending on the upper bits of booleans. Update #57212 Change-Id: Ie66f8934a0dafa34d0a8c2a37324868d959a852c Reviewed-on: https://go-review.googlesource.com/c/go/+/456437 Run-TryBot: Keith Randall <khr@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Keith Randall <khr@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: KAMPANAT THUMWONG (KONG PC) <1992kongpc.kth@gmail.com> Run-TryBot: Archana Ravindar <aravind5@in.ibm.com> Reviewed-on: https://go-review.googlesource.com/c/go/+/456736 Reviewed-by: Than McIntosh <thanm@google.com>
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules4
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64Ops.go4
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go25
3 files changed, 20 insertions, 13 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index 834bf4ab8a..1ecef32f09 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -578,9 +578,9 @@
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
-(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPWconst [0] bool))
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
// Fold any CR -> GPR -> CR transfers when applying the above rule.
-(ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp))) => (ISEL [c] x y cmp)
+(ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp)))) => (ISEL [c] x y cmp)
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
index a4d906ddd3..166065fc45 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -11,8 +11,8 @@ import "strings"
// Notes:
// - Less-than-64-bit integer types live in the low portion of registers.
-// For now, the upper portion is junk; sign/zero-extension might be optimized in the future, but not yet.
-// - Boolean types are zero or 1; stored in a byte, but loaded with AMOVBZ so the upper bytes of a register are zero.
+// The upper portion is junk.
+// - Boolean types are zero or 1; stored in a byte, with upper bytes of the register containing junk.
// - *const instructions may use a constant larger than the instruction can encode.
// In this case the assembler expands to multiple instructions and uses tmp
// register (R31).
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 8d6f976d74..2ade69f91b 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -1175,9 +1175,10 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ typ := &b.Func.Config.Types
// match: (CondSelect x y bool)
// cond: flagArg(bool) == nil
- // result: (ISEL [6] x y (CMPWconst [0] bool))
+ // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
for {
x := v_0
y := v_1
@@ -1187,9 +1188,11 @@ func rewriteValuePPC64_OpCondSelect(v *Value) bool {
}
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(6)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
- v0.AuxInt = int32ToAuxInt(0)
- v0.AddArg(bool)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(bool)
+ v0.AddArg(v1)
v.AddArg3(x, y, v0)
return true
}
@@ -5931,7 +5934,7 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
v.AddArg(y)
return true
}
- // match: (ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp)))
+ // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp))))
// result: (ISEL [c] x y cmp)
for {
if auxIntToInt32(v.AuxInt) != 6 {
@@ -5939,15 +5942,19 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
}
x := v_0
y := v_1
- if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ if v_2.Op != OpSelect1 {
break
}
v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpPPC64ISELB {
+ if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64ISELB {
break
}
- c := auxIntToInt32(v_2_0.AuxInt)
- cmp := v_2_0.Args[1]
+ c := auxIntToInt32(v_2_0_0.AuxInt)
+ cmp := v_2_0_0.Args[1]
v.reset(OpPPC64ISEL)
v.AuxInt = int32ToAuxInt(c)
v.AddArg3(x, y, cmp)