aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go78
-rw-r--r--src/cmd/compile/internal/ssa/branchelim.go27
-rw-r--r--src/cmd/compile/internal/ssa/branchelim_test.go244
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules50
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go57
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go641
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go2578
-rw-r--r--test/codegen/condmove.go178
9 files changed, 3738 insertions, 117 deletions
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 92d3ec22fc..5becdd018e 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -398,7 +398,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- case ssa.OpAMD64CMOVQEQ, ssa.OpAMD64CMOVLEQ:
+ case ssa.OpAMD64CMOVQEQ, ssa.OpAMD64CMOVLEQ, ssa.OpAMD64CMOVWEQ,
+ ssa.OpAMD64CMOVQLT, ssa.OpAMD64CMOVLLT, ssa.OpAMD64CMOVWLT,
+ ssa.OpAMD64CMOVQNE, ssa.OpAMD64CMOVLNE, ssa.OpAMD64CMOVWNE,
+ ssa.OpAMD64CMOVQGT, ssa.OpAMD64CMOVLGT, ssa.OpAMD64CMOVWGT,
+ ssa.OpAMD64CMOVQLE, ssa.OpAMD64CMOVLLE, ssa.OpAMD64CMOVWLE,
+ ssa.OpAMD64CMOVQGE, ssa.OpAMD64CMOVLGE, ssa.OpAMD64CMOVWGE,
+ ssa.OpAMD64CMOVQHI, ssa.OpAMD64CMOVLHI, ssa.OpAMD64CMOVWHI,
+ ssa.OpAMD64CMOVQLS, ssa.OpAMD64CMOVLLS, ssa.OpAMD64CMOVWLS,
+ ssa.OpAMD64CMOVQCC, ssa.OpAMD64CMOVLCC, ssa.OpAMD64CMOVWCC,
+ ssa.OpAMD64CMOVQCS, ssa.OpAMD64CMOVLCS, ssa.OpAMD64CMOVWCS,
+ ssa.OpAMD64CMOVQGTF, ssa.OpAMD64CMOVLGTF, ssa.OpAMD64CMOVWGTF,
+ ssa.OpAMD64CMOVQGEF, ssa.OpAMD64CMOVLGEF, ssa.OpAMD64CMOVWGEF:
r := v.Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
@@ -409,6 +420,71 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
+ case ssa.OpAMD64CMOVQNEF, ssa.OpAMD64CMOVLNEF, ssa.OpAMD64CMOVWNEF:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // Flag condition: ^ZERO || PARITY
+ // Generate:
+ // CMOV*NE SRC,DST
+ // CMOV*PS SRC,DST
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ var q *obj.Prog
+ if v.Op == ssa.OpAMD64CMOVQNEF {
+ q = s.Prog(x86.ACMOVQPS)
+ } else if v.Op == ssa.OpAMD64CMOVLNEF {
+ q = s.Prog(x86.ACMOVLPS)
+ } else {
+ q = s.Prog(x86.ACMOVWPS)
+ }
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = v.Args[1].Reg()
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+
+ case ssa.OpAMD64CMOVQEQF, ssa.OpAMD64CMOVLEQF, ssa.OpAMD64CMOVWEQF:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+
+ // Flag condition: ZERO && !PARITY
+ // Generate:
+ // MOV SRC,AX
+ // CMOV*NE DST,AX
+ // CMOV*PC AX,DST
+ //
+ // TODO(rasky): we could generate:
+ // CMOV*NE DST,SRC
+ // CMOV*PC SRC,DST
+ // But this requires a way for regalloc to know that SRC might be
+ // clobbered by this instruction.
+ if v.Args[1].Reg() != x86.REG_AX {
+ opregreg(s, moveByType(v.Type), x86.REG_AX, v.Args[1].Reg())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ var q *obj.Prog
+ if v.Op == ssa.OpAMD64CMOVQEQF {
+ q = s.Prog(x86.ACMOVQPC)
+ } else if v.Op == ssa.OpAMD64CMOVLEQF {
+ q = s.Prog(x86.ACMOVLPC)
+ } else {
+ q = s.Prog(x86.ACMOVWPC)
+ }
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = x86.REG_AX
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+
case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
r := v.Reg()
if r != v.Args[0].Reg() {
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
index 54508985b3..75a6b8238c 100644
--- a/src/cmd/compile/internal/ssa/branchelim.go
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -19,7 +19,10 @@ package ssa
// rewrite Phis in the postdominator as CondSelects.
func branchelim(f *Func) {
// FIXME: add support for lowering CondSelects on more architectures
- if f.Config.arch != "arm64" {
+ switch f.Config.arch {
+ case "arm64", "amd64":
+ // implemented
+ default:
return
}
@@ -32,10 +35,22 @@ func branchelim(f *Func) {
}
}
-func canCondSelect(v *Value) bool {
+func canCondSelect(v *Value, arch string) bool {
// For now, stick to simple scalars that fit in registers
- sz := v.Type.Size()
- return sz <= v.Block.Func.Config.RegSize && (v.Type.IsInteger() || v.Type.IsPtrShaped())
+ switch {
+ case v.Type.Size() > v.Block.Func.Config.RegSize:
+ return false
+ case v.Type.IsPtrShaped():
+ return true
+ case v.Type.IsInteger():
+ if arch == "amd64" && v.Type.Size() < 2 {
+ // amd64 doesn't support CMOV with byte registers
+ return false
+ }
+ return true
+ default:
+ return false
+ }
}
func elimIf(f *Func, dom *Block) bool {
@@ -68,7 +83,7 @@ func elimIf(f *Func, dom *Block) bool {
for _, v := range post.Values {
if v.Op == OpPhi {
hasphis = true
- if !canCondSelect(v) {
+ if !canCondSelect(v, f.Config.arch) {
return false
}
}
@@ -169,7 +184,7 @@ func elimIfElse(f *Func, b *Block) bool {
for _, v := range post.Values {
if v.Op == OpPhi {
hasphis = true
- if !canCondSelect(v) {
+ if !canCondSelect(v, f.Config.arch) {
return false
}
}
diff --git a/src/cmd/compile/internal/ssa/branchelim_test.go b/src/cmd/compile/internal/ssa/branchelim_test.go
index 979ba1d961..30bb133f8e 100644
--- a/src/cmd/compile/internal/ssa/branchelim_test.go
+++ b/src/cmd/compile/internal/ssa/branchelim_test.go
@@ -11,128 +11,162 @@ import (
// Test that a trivial 'if' is eliminated
func TestBranchElimIf(t *testing.T) {
- c := testConfig(t)
- c.config.arch = "arm64" // FIXME
- boolType := types.New(types.TBOOL)
- intType := types.New(types.TINT32)
- fun := c.Fun("entry",
- Bloc("entry",
- Valu("start", OpInitMem, types.TypeMem, 0, nil),
- Valu("sb", OpSB, types.TypeInvalid, 0, nil),
- Valu("const1", OpConst32, intType, 1, nil),
- Valu("const2", OpConst32, intType, 2, nil),
- Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
- Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
- If("cond", "b2", "b3")),
- Bloc("b2",
- Goto("b3")),
- Bloc("b3",
- Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
- Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
- Exit("retstore")))
+ var testData = []struct {
+ arch string
+ intType string
+ ok bool
+ }{
+ {"arm64", "int32", true},
+ {"amd64", "int32", true},
+ {"amd64", "int8", false},
+ }
- CheckFunc(fun.f)
- branchelim(fun.f)
- CheckFunc(fun.f)
- Deadcode(fun.f)
- CheckFunc(fun.f)
+ for _, data := range testData {
+ t.Run(data.arch+"/"+data.intType, func(t *testing.T) {
+ c := testConfigArch(t, data.arch)
+ boolType := c.config.Types.Bool
+ var intType *types.Type
+ switch data.intType {
+ case "int32":
+ intType = c.config.Types.Int32
+ case "int8":
+ intType = c.config.Types.Int8
+ default:
+ t.Fatal("invalid integer type:", data.intType)
+ }
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("const1", OpConst32, intType, 1, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ If("cond", "b2", "b3")),
+ Bloc("b2",
+ Goto("b3")),
+ Bloc("b3",
+ Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
+ Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
+ Exit("retstore")))
- if len(fun.f.Blocks) != 1 {
- t.Errorf("expected 1 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
- }
- if fun.values["phi"].Op != OpCondSelect {
- t.Errorf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
- }
- if fun.values["phi"].Args[2] != fun.values["cond"] {
- t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
- }
- if fun.blocks["entry"].Kind != BlockExit {
- t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if data.ok {
+
+ if len(fun.f.Blocks) != 1 {
+ t.Fatalf("expected 1 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpCondSelect {
+ t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ if fun.values["phi"].Args[2] != fun.values["cond"] {
+ t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
+ }
+ if fun.blocks["entry"].Kind != BlockExit {
+ t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ }
+ } else {
+ if len(fun.f.Blocks) != 3 {
+ t.Fatalf("expected 3 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
+ }
+ }
+ })
}
}
// Test that a trivial if/else is eliminated
func TestBranchElimIfElse(t *testing.T) {
- c := testConfig(t)
- c.config.arch = "arm64" // FIXME
- boolType := types.New(types.TBOOL)
- intType := types.New(types.TINT32)
- fun := c.Fun("entry",
- Bloc("entry",
- Valu("start", OpInitMem, types.TypeMem, 0, nil),
- Valu("sb", OpSB, types.TypeInvalid, 0, nil),
- Valu("const1", OpConst32, intType, 1, nil),
- Valu("const2", OpConst32, intType, 2, nil),
- Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
- Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
- If("cond", "b2", "b3")),
- Bloc("b2",
- Goto("b4")),
- Bloc("b3",
- Goto("b4")),
- Bloc("b4",
- Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
- Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
- Exit("retstore")))
+ for _, arch := range []string{"arm64", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ c := testConfigArch(t, arch)
+ boolType := c.config.Types.Bool
+ intType := c.config.Types.Int32
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("const1", OpConst32, intType, 1, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ If("cond", "b2", "b3")),
+ Bloc("b2",
+ Goto("b4")),
+ Bloc("b3",
+ Goto("b4")),
+ Bloc("b4",
+ Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
+ Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
+ Exit("retstore")))
- CheckFunc(fun.f)
- branchelim(fun.f)
- CheckFunc(fun.f)
- Deadcode(fun.f)
- CheckFunc(fun.f)
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
- if len(fun.f.Blocks) != 1 {
- t.Errorf("expected 1 block after branchelim; found %d", len(fun.f.Blocks))
- }
- if fun.values["phi"].Op != OpCondSelect {
- t.Errorf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
- }
- if fun.values["phi"].Args[2] != fun.values["cond"] {
- t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
- }
- if fun.blocks["entry"].Kind != BlockExit {
- t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ if len(fun.f.Blocks) != 1 {
+ t.Fatalf("expected 1 block after branchelim; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpCondSelect {
+ t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ if fun.values["phi"].Args[2] != fun.values["cond"] {
+ t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
+ }
+ if fun.blocks["entry"].Kind != BlockExit {
+ t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ }
+ })
}
}
// Test that an if/else CFG that loops back
// into itself does *not* get eliminated.
func TestNoBranchElimLoop(t *testing.T) {
- c := testConfig(t)
- c.config.arch = "arm64" // FIXME
- boolType := types.New(types.TBOOL)
- intType := types.New(types.TINT32)
+ for _, arch := range []string{"arm64", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ c := testConfigArch(t, arch)
+ boolType := c.config.Types.Bool
+ intType := c.config.Types.Int32
- // The control flow here is totally bogus,
- // but a dead cycle seems like the only plausible
- // way to arrive at a diamond CFG that is also a loop.
- fun := c.Fun("entry",
- Bloc("entry",
- Valu("start", OpInitMem, types.TypeMem, 0, nil),
- Valu("sb", OpSB, types.TypeInvalid, 0, nil),
- Valu("const2", OpConst32, intType, 2, nil),
- Valu("const3", OpConst32, intType, 3, nil),
- Goto("b5")),
- Bloc("b2",
- Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
- Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
- Valu("phi", OpPhi, intType, 0, nil, "const2", "const3"),
- If("cond", "b3", "b4")),
- Bloc("b3",
- Goto("b2")),
- Bloc("b4",
- Goto("b2")),
- Bloc("b5",
- Exit("start")))
+ // The control flow here is totally bogus,
+ // but a dead cycle seems like the only plausible
+ // way to arrive at a diamond CFG that is also a loop.
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("const3", OpConst32, intType, 3, nil),
+ Goto("b5")),
+ Bloc("b2",
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ Valu("phi", OpPhi, intType, 0, nil, "const2", "const3"),
+ If("cond", "b3", "b4")),
+ Bloc("b3",
+ Goto("b2")),
+ Bloc("b4",
+ Goto("b2")),
+ Bloc("b5",
+ Exit("start")))
- CheckFunc(fun.f)
- branchelim(fun.f)
- CheckFunc(fun.f)
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
- if len(fun.f.Blocks) != 5 {
- t.Errorf("expected 5 block after branchelim; found %d", len(fun.f.Blocks))
- }
- if fun.values["phi"].Op != OpPhi {
- t.Errorf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ if len(fun.f.Blocks) != 5 {
+ t.Errorf("expected 5 block after branchelim; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpPhi {
+ t.Errorf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ })
}
}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 1fe0bbe6ae..8d3bd74fa5 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -7,6 +7,7 @@ package ssa
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
+ "cmd/internal/obj/arm64"
"cmd/internal/obj/s390x"
"cmd/internal/obj/x86"
"cmd/internal/src"
@@ -22,6 +23,7 @@ var Copyelim = copyelim
var testCtxts = map[string]*obj.Link{
"amd64": obj.Linknew(&x86.Linkamd64),
"s390x": obj.Linknew(&s390x.Links390x),
+ "arm64": obj.Linknew(&arm64.Linkarm64),
}
func testConfig(tb testing.TB) *Conf { return testConfigArch(tb, "amd64") }
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 647a5d9cd1..ffac45bf66 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -475,6 +475,52 @@
(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
+// Lowering conditional moves
+// If the condition is a SETxx, we can just run a CMOV from the comparison that was
+// setting the flags.
+// Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t))
+ -> (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t)
+ -> (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
+ -> (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+
+// If the condition does not set the flags, we need to generate a comparison.
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1
+ -> (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2
+ -> (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4
+ -> (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
+ -> (CMOVQNE y x (CMPQconst [0] check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
+ -> (CMOVLNE y x (CMPQconst [0] check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
+ -> (CMOVWNE y x (CMPQconst [0] check))
+
+// Absorb InvertFlags
+(CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ -> (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+(CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ -> (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+(CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ -> (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+
+// Absorb constants generated during lower
+(CMOVQ(EQ|LE|GE|CC|LS) _ x (FlagEQ)) -> x
+(CMOVQ(NE|LT|GT|CS|HI) y _ (FlagEQ)) -> y
+(CMOVQ(NE|GT|GE|HI|CC) _ x (FlagGT_UGT)) -> x
+(CMOVQ(EQ|LE|LT|LS|CS) y _ (FlagGT_UGT)) -> y
+(CMOVQ(NE|GT|GE|LS|CS) _ x (FlagGT_ULT)) -> x
+(CMOVQ(EQ|LE|LT|HI|CC) y _ (FlagGT_ULT)) -> y
+(CMOVQ(NE|LT|LE|CS|LS) _ x (FlagLT_ULT)) -> x
+(CMOVQ(EQ|GT|GE|HI|CC) y _ (FlagLT_ULT)) -> y
+(CMOVQ(NE|LT|LE|HI|CC) _ x (FlagLT_UGT)) -> x
+(CMOVQ(EQ|GT|GE|CS|LS) y _ (FlagLT_ULT)) -> y
+
// Miscellaneous
(Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem)
(Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem)
@@ -1353,6 +1399,10 @@
(CMPLconst x [0]) -> (TESTL x x)
(CMPWconst x [0]) -> (TESTW x x)
(CMPBconst x [0]) -> (TESTB x x)
+(TESTQconst [-1] x) -> (TESTQ x x)
+(TESTLconst [-1] x) -> (TESTL x x)
+(TESTWconst [-1] x) -> (TESTW x x)
+(TESTBconst [-1] x) -> (TESTB x x)
// Combining byte loads into larger (unaligned) loads.
// There are many ways these combinations could occur. This is
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index c5c2e8ceac..9577890f9a 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -132,6 +132,7 @@ func init() {
gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
+ gp21pax = regInfo{inputs: []regMask{gp &^ ax, gp}, outputs: []regMask{gp &^ ax}, clobbers: ax}
gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}}
@@ -340,10 +341,57 @@ func init() {
{name: "BSRQ", argLength: 1, reg: gp11flags, asm: "BSRQ", typ: "(UInt64,Flags)"}, // # of high-order zeroes in 64-bit arg
{name: "BSRL", argLength: 1, reg: gp11flags, asm: "BSRL", typ: "(UInt32,Flags)"}, // # of high-order zeroes in 32-bit arg
- // Note ASM for ops moves whole register
- //
- {name: "CMOVQEQ", argLength: 3, reg: gp21, asm: "CMOVQEQ", resultInArg0: true}, // if arg2 encodes "equal" return arg1 else arg0
- {name: "CMOVLEQ", argLength: 3, reg: gp21, asm: "CMOVLEQ", resultInArg0: true}, // if arg2 encodes "equal" return arg1 else arg0
+ // CMOV instructions: 64, 32 and 16-bit sizes.
+ // if arg2 encodes a true result, return arg1, else arg0
+ {name: "CMOVQEQ", argLength: 3, reg: gp21, asm: "CMOVQEQ", resultInArg0: true},
+ {name: "CMOVQNE", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQLT", argLength: 3, reg: gp21, asm: "CMOVQLT", resultInArg0: true},
+ {name: "CMOVQGT", argLength: 3, reg: gp21, asm: "CMOVQGT", resultInArg0: true},
+ {name: "CMOVQLE", argLength: 3, reg: gp21, asm: "CMOVQLE", resultInArg0: true},
+ {name: "CMOVQGE", argLength: 3, reg: gp21, asm: "CMOVQGE", resultInArg0: true},
+ {name: "CMOVQLS", argLength: 3, reg: gp21, asm: "CMOVQLS", resultInArg0: true},
+ {name: "CMOVQHI", argLength: 3, reg: gp21, asm: "CMOVQHI", resultInArg0: true},
+ {name: "CMOVQCC", argLength: 3, reg: gp21, asm: "CMOVQCC", resultInArg0: true},
+ {name: "CMOVQCS", argLength: 3, reg: gp21, asm: "CMOVQCS", resultInArg0: true},
+
+ {name: "CMOVLEQ", argLength: 3, reg: gp21, asm: "CMOVLEQ", resultInArg0: true},
+ {name: "CMOVLNE", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLLT", argLength: 3, reg: gp21, asm: "CMOVLLT", resultInArg0: true},
+ {name: "CMOVLGT", argLength: 3, reg: gp21, asm: "CMOVLGT", resultInArg0: true},
+ {name: "CMOVLLE", argLength: 3, reg: gp21, asm: "CMOVLLE", resultInArg0: true},
+ {name: "CMOVLGE", argLength: 3, reg: gp21, asm: "CMOVLGE", resultInArg0: true},
+ {name: "CMOVLLS", argLength: 3, reg: gp21, asm: "CMOVLLS", resultInArg0: true},
+ {name: "CMOVLHI", argLength: 3, reg: gp21, asm: "CMOVLHI", resultInArg0: true},
+ {name: "CMOVLCC", argLength: 3, reg: gp21, asm: "CMOVLCC", resultInArg0: true},
+ {name: "CMOVLCS", argLength: 3, reg: gp21, asm: "CMOVLCS", resultInArg0: true},
+
+ {name: "CMOVWEQ", argLength: 3, reg: gp21, asm: "CMOVWEQ", resultInArg0: true},
+ {name: "CMOVWNE", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWLT", argLength: 3, reg: gp21, asm: "CMOVWLT", resultInArg0: true},
+ {name: "CMOVWGT", argLength: 3, reg: gp21, asm: "CMOVWGT", resultInArg0: true},
+ {name: "CMOVWLE", argLength: 3, reg: gp21, asm: "CMOVWLE", resultInArg0: true},
+ {name: "CMOVWGE", argLength: 3, reg: gp21, asm: "CMOVWGE", resultInArg0: true},
+ {name: "CMOVWLS", argLength: 3, reg: gp21, asm: "CMOVWLS", resultInArg0: true},
+ {name: "CMOVWHI", argLength: 3, reg: gp21, asm: "CMOVWHI", resultInArg0: true},
+ {name: "CMOVWCC", argLength: 3, reg: gp21, asm: "CMOVWCC", resultInArg0: true},
+ {name: "CMOVWCS", argLength: 3, reg: gp21, asm: "CMOVWCS", resultInArg0: true},
+
+ // CMOV with floating point instructions. We need separate pseudo-op to handle
+ // InvertFlags correctly, and to generate special code that handles NaN (unordered flag).
+ // NOTE: the fact that CMOV*EQF here is marked to generate CMOV*NE is not a bug. See
+ // code generation in amd64/ssa.go.
+ {name: "CMOVQEQF", argLength: 3, reg: gp21pax, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQNEF", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQGTF", argLength: 3, reg: gp21, asm: "CMOVQHI", resultInArg0: true},
+ {name: "CMOVQGEF", argLength: 3, reg: gp21, asm: "CMOVQCC", resultInArg0: true},
+ {name: "CMOVLEQF", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLNEF", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLGTF", argLength: 3, reg: gp21, asm: "CMOVLHI", resultInArg0: true},
+ {name: "CMOVLGEF", argLength: 3, reg: gp21, asm: "CMOVLCC", resultInArg0: true},
+ {name: "CMOVWEQF", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWNEF", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWGTF", argLength: 3, reg: gp21, asm: "CMOVWHI", resultInArg0: true},
+ {name: "CMOVWGEF", argLength: 3, reg: gp21, asm: "CMOVWCC", resultInArg0: true},
{name: "BSWAPQ", argLength: 1, reg: gp11, asm: "BSWAPQ", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
{name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
@@ -578,7 +626,6 @@ func init() {
{name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
//arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
-
// LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
// It saves all GP registers if necessary, but may clobber others.
{name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), ax}, clobbers: callerSave &^ gp}, clobberFlags: true, aux: "Sym", symEffect: "None"},
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 48ec74b3d2..2d73208623 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -560,7 +560,47 @@ const (
OpAMD64BSRQ
OpAMD64BSRL
OpAMD64CMOVQEQ
+ OpAMD64CMOVQNE
+ OpAMD64CMOVQLT
+ OpAMD64CMOVQGT
+ OpAMD64CMOVQLE
+ OpAMD64CMOVQGE
+ OpAMD64CMOVQLS
+ OpAMD64CMOVQHI
+ OpAMD64CMOVQCC
+ OpAMD64CMOVQCS
OpAMD64CMOVLEQ
+ OpAMD64CMOVLNE
+ OpAMD64CMOVLLT
+ OpAMD64CMOVLGT
+ OpAMD64CMOVLLE
+ OpAMD64CMOVLGE
+ OpAMD64CMOVLLS
+ OpAMD64CMOVLHI
+ OpAMD64CMOVLCC
+ OpAMD64CMOVLCS
+ OpAMD64CMOVWEQ
+ OpAMD64CMOVWNE
+ OpAMD64CMOVWLT
+ OpAMD64CMOVWGT
+ OpAMD64CMOVWLE
+ OpAMD64CMOVWGE
+ OpAMD64CMOVWLS
+ OpAMD64CMOVWHI
+ OpAMD64CMOVWCC
+ OpAMD64CMOVWCS
+ OpAMD64CMOVQEQF
+ OpAMD64CMOVQNEF
+ OpAMD64CMOVQGTF
+ OpAMD64CMOVQGEF
+ OpAMD64CMOVLEQF
+ OpAMD64CMOVLNEF
+ OpAMD64CMOVLGTF
+ OpAMD64CMOVLGEF
+ OpAMD64CMOVWEQF
+ OpAMD64CMOVWNEF
+ OpAMD64CMOVWGTF
+ OpAMD64CMOVWGEF
OpAMD64BSWAPQ
OpAMD64BSWAPL
OpAMD64POPCNTQ
@@ -6804,6 +6844,141 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CMOVQNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
name: "CMOVLEQ",
argLen: 3,
resultInArg0: true,
@@ -6819,6 +6994,472 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CMOVLNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
name: "BSWAPQ",
argLen: 1,
resultInArg0: true,
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index c500c757ef..eb2489ac77 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -55,8 +55,66 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64BSFQ_0(v)
case OpAMD64BTQconst:
return rewriteValueAMD64_OpAMD64BTQconst_0(v)
+ case OpAMD64CMOVLCC:
+ return rewriteValueAMD64_OpAMD64CMOVLCC_0(v)
+ case OpAMD64CMOVLCS:
+ return rewriteValueAMD64_OpAMD64CMOVLCS_0(v)
+ case OpAMD64CMOVLEQ:
+ return rewriteValueAMD64_OpAMD64CMOVLEQ_0(v)
+ case OpAMD64CMOVLGE:
+ return rewriteValueAMD64_OpAMD64CMOVLGE_0(v)
+ case OpAMD64CMOVLGT:
+ return rewriteValueAMD64_OpAMD64CMOVLGT_0(v)
+ case OpAMD64CMOVLHI:
+ return rewriteValueAMD64_OpAMD64CMOVLHI_0(v)
+ case OpAMD64CMOVLLE:
+ return rewriteValueAMD64_OpAMD64CMOVLLE_0(v)
+ case OpAMD64CMOVLLS:
+ return rewriteValueAMD64_OpAMD64CMOVLLS_0(v)
+ case OpAMD64CMOVLLT:
+ return rewriteValueAMD64_OpAMD64CMOVLLT_0(v)
+ case OpAMD64CMOVLNE:
+ return rewriteValueAMD64_OpAMD64CMOVLNE_0(v)
+ case OpAMD64CMOVQCC:
+ return rewriteValueAMD64_OpAMD64CMOVQCC_0(v)
+ case OpAMD64CMOVQCS:
+ return rewriteValueAMD64_OpAMD64CMOVQCS_0(v)
case OpAMD64CMOVQEQ:
return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v)
+ case OpAMD64CMOVQGE:
+ return rewriteValueAMD64_OpAMD64CMOVQGE_0(v)
+ case OpAMD64CMOVQGT:
+ return rewriteValueAMD64_OpAMD64CMOVQGT_0(v)
+ case OpAMD64CMOVQHI:
+ return rewriteValueAMD64_OpAMD64CMOVQHI_0(v)
+ case OpAMD64CMOVQLE:
+ return rewriteValueAMD64_OpAMD64CMOVQLE_0(v)
+ case OpAMD64CMOVQLS:
+ return rewriteValueAMD64_OpAMD64CMOVQLS_0(v)
+ case OpAMD64CMOVQLT:
+ return rewriteValueAMD64_OpAMD64CMOVQLT_0(v)
+ case OpAMD64CMOVQNE:
+ return rewriteValueAMD64_OpAMD64CMOVQNE_0(v)
+ case OpAMD64CMOVWCC:
+ return rewriteValueAMD64_OpAMD64CMOVWCC_0(v)
+ case OpAMD64CMOVWCS:
+ return rewriteValueAMD64_OpAMD64CMOVWCS_0(v)
+ case OpAMD64CMOVWEQ:
+ return rewriteValueAMD64_OpAMD64CMOVWEQ_0(v)
+ case OpAMD64CMOVWGE:
+ return rewriteValueAMD64_OpAMD64CMOVWGE_0(v)
+ case OpAMD64CMOVWGT:
+ return rewriteValueAMD64_OpAMD64CMOVWGT_0(v)
+ case OpAMD64CMOVWHI:
+ return rewriteValueAMD64_OpAMD64CMOVWHI_0(v)
+ case OpAMD64CMOVWLE:
+ return rewriteValueAMD64_OpAMD64CMOVWLE_0(v)
+ case OpAMD64CMOVWLS:
+ return rewriteValueAMD64_OpAMD64CMOVWLS_0(v)
+ case OpAMD64CMOVWLT:
+ return rewriteValueAMD64_OpAMD64CMOVWLT_0(v)
+ case OpAMD64CMOVWNE:
+ return rewriteValueAMD64_OpAMD64CMOVWNE_0(v)
case OpAMD64CMPB:
return rewriteValueAMD64_OpAMD64CMPB_0(v)
case OpAMD64CMPBconst:
@@ -391,12 +449,20 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64SUBSSmem_0(v)
case OpAMD64TESTB:
return rewriteValueAMD64_OpAMD64TESTB_0(v)
+ case OpAMD64TESTBconst:
+ return rewriteValueAMD64_OpAMD64TESTBconst_0(v)
case OpAMD64TESTL:
return rewriteValueAMD64_OpAMD64TESTL_0(v)
+ case OpAMD64TESTLconst:
+ return rewriteValueAMD64_OpAMD64TESTLconst_0(v)
case OpAMD64TESTQ:
return rewriteValueAMD64_OpAMD64TESTQ_0(v)
+ case OpAMD64TESTQconst:
+ return rewriteValueAMD64_OpAMD64TESTQconst_0(v)
case OpAMD64TESTW:
return rewriteValueAMD64_OpAMD64TESTW_0(v)
+ case OpAMD64TESTWconst:
+ return rewriteValueAMD64_OpAMD64TESTWconst_0(v)
case OpAMD64XADDLlock:
return rewriteValueAMD64_OpAMD64XADDLlock_0(v)
case OpAMD64XADDQlock:
@@ -493,6 +559,8 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpCom64_0(v)
case OpCom8:
return rewriteValueAMD64_OpCom8_0(v)
+ case OpCondSelect:
+ return rewriteValueAMD64_OpCondSelect_0(v) || rewriteValueAMD64_OpCondSelect_10(v) || rewriteValueAMD64_OpCondSelect_20(v) || rewriteValueAMD64_OpCondSelect_30(v) || rewriteValueAMD64_OpCondSelect_40(v)
case OpConst16:
return rewriteValueAMD64_OpConst16_0(v)
case OpConst32:
@@ -3285,7 +3353,502 @@ func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool {
+ // match: (CMOVLCC x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLLS x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLS)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLCS_0(v *Value) bool {
+ // match: (CMOVLCS x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLHI x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLHI)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLEQ_0(v *Value) bool {
+ // match: (CMOVLEQ x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLEQ x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLEQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLGE_0(v *Value) bool {
+ // match: (CMOVLGE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLLE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLGT_0(v *Value) bool {
+ // match: (CMOVLGT x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLLT x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLT)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLHI_0(v *Value) bool {
+ // match: (CMOVLHI x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLCS x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLCS)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLE_0(v *Value) bool {
+ // match: (CMOVLLE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLGE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLGE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLS_0(v *Value) bool {
+ // match: (CMOVLLS x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLCC x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLCC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLT_0(v *Value) bool {
+ // match: (CMOVLLT x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLGT x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLGT)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLNE_0(v *Value) bool {
+ // match: (CMOVLNE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVLNE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQCC_0(v *Value) bool {
+ // match: (CMOVQCC x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQLS x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLS)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagEQ))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagGT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQCC y _ (FlagGT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQCC y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagLT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQCS_0(v *Value) bool {
+ // match: (CMOVQCS x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQHI x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQHI)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagGT_UGT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQCS _ x (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQCS _ x (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool {
+ // match: (CMOVQEQ x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQEQ x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQEQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQEQ _ x (FlagEQ))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagGT_UGT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagGT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
// match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
// cond: c != 0
// result: x
@@ -3315,6 +3878,888 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64CMOVQGE_0(v *Value) bool {
+ // match: (CMOVQGE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQLE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagEQ))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagGT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQGE y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQGE y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQGT_0(v *Value) bool {
+ // match: (CMOVQGT x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQLT x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLT)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQGT _ x (FlagGT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQGT _ x (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQHI_0(v *Value) bool {
+ // match: (CMOVQHI x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQCS x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQCS)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQHI _ x (FlagGT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagGT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQHI _ x (FlagLT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLE_0(v *Value) bool {
+ // match: (CMOVQLE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQGE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQGE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagEQ))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQLE y _ (FlagGT_UGT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQLE y _ (FlagGT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagLT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLS_0(v *Value) bool {
+ // match: (CMOVQLS x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQCC x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQCC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagEQ))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQLS y _ (FlagGT_UGT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQLS y _ (FlagLT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLT_0(v *Value) bool {
+ // match: (CMOVQLT x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQGT x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQGT)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagGT_UGT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagGT_ULT))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQLT _ x (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQLT _ x (FlagLT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQNE_0(v *Value) bool {
+ // match: (CMOVQNE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVQNE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CMOVQNE y _ (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ _ = v.Args[2]
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagGT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagLT_UGT))
+ // cond:
+ // result: x
+ for {
+ _ = v.Args[2]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWCC_0(v *Value) bool {
+ // match: (CMOVWCC x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWLS x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLS)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWCS_0(v *Value) bool {
+ // match: (CMOVWCS x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWHI x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWHI)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWEQ_0(v *Value) bool {
+ // match: (CMOVWEQ x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWEQ x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWEQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWGE_0(v *Value) bool {
+ // match: (CMOVWGE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWLE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWGT_0(v *Value) bool {
+ // match: (CMOVWGT x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWLT x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLT)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWHI_0(v *Value) bool {
+ // match: (CMOVWHI x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWCS x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWCS)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLE_0(v *Value) bool {
+ // match: (CMOVWLE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWGE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWGE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLS_0(v *Value) bool {
+ // match: (CMOVWLS x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWCC x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWCC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLT_0(v *Value) bool {
+ // match: (CMOVWLT x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWGT x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWGT)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWNE_0(v *Value) bool {
+ // match: (CMOVWNE x y (InvertFlags cond))
+ // cond:
+ // result: (CMOVWNE x y cond)
+ for {
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool {
b := v.Block
_ = b
@@ -42931,6 +44376,22 @@ func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool {
+ // match: (TESTBconst [-1] x)
+ // cond:
+ // result: (TESTB x x)
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpAMD64TESTB)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool {
b := v.Block
_ = b
@@ -43024,6 +44485,22 @@ func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool {
+ // match: (TESTLconst [-1] x)
+ // cond:
+ // result: (TESTL x x)
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpAMD64TESTL)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool {
b := v.Block
_ = b
@@ -43123,6 +44600,22 @@ func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool {
+ // match: (TESTQconst [-1] x)
+ // cond:
+ // result: (TESTQ x x)
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpAMD64TESTQ)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool {
b := v.Block
_ = b
@@ -43216,6 +44709,22 @@ func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool {
+ // match: (TESTWconst [-1] x)
+ // cond:
+ // result: (TESTW x x)
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpAMD64TESTW)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool {
// match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
@@ -44939,6 +46448,1075 @@ func rewriteValueAMD64_OpCom8_0(v *Value) bool {
return true
}
}
+func rewriteValueAMD64_OpCondSelect_0(v *Value) bool {
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQEQ y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQ)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLT y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLT)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGT y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGT)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQHI y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQHI)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQCS y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQCS)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQCC y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQCC)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLS y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLS)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCondSelect_10(v *Value) bool {
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQEQF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNEF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNEF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGTF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGTF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGEF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGEF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLEQ y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLEQ)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLNE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLT y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLT)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGT y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGT)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCondSelect_20(v *Value) bool {
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLHI y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLHI)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLCS y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLCS)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLCC y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLCC)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLS y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLS)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLEQF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLEQF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLNEF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNEF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGTF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGTF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGEF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGEF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWEQ y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWEQ)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWNE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCondSelect_30(v *Value) bool {
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLT y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLT)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGT y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGT)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGE y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWHI y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWHI)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWCS y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWCS)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWCC y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWCC)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLS y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLS)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWEQF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWEQF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWNEF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNEF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCondSelect_40(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGTF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGTF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGEF y x cond)
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGEF)
+ v.AddArg(y)
+ v.AddArg(x)
+ v.AddArg(cond)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 1
+ // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ check := v.Args[2]
+ if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(y)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 2
+ // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ check := v.Args[2]
+ if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(y)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 4
+ // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ check := v.Args[2]
+ if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(y)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ check := v.Args[2]
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(check)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
+ // result: (CMOVLNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ check := v.Args[2]
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(check)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
+ // result: (CMOVWNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ _ = v.Args[2]
+ x := v.Args[0]
+ y := v.Args[1]
+ check := v.Args[2]
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg(y)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(check)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpConst16_0(v *Value) bool {
// match: (Const16 [val])
// cond:
diff --git a/test/codegen/condmove.go b/test/codegen/condmove.go
new file mode 100644
index 0000000000..1f51505f61
--- /dev/null
+++ b/test/codegen/condmove.go
@@ -0,0 +1,178 @@
+// asmcheck
+
+package codegen
+
+func cmovint(c int) int {
+ x := c + 4
+ if x < 0 {
+ x = 182
+ }
+ // amd64:"CMOVQLT"
+ // arm64:"CSEL\tLT"
+ return x
+}
+
+func cmovchan(x, y chan int) chan int {
+ if x != y {
+ x = y
+ }
+ // amd64:"CMOVQNE"
+ // arm64:"CSEL\tNE"
+ return x
+}
+
+func cmovuintptr(x, y uintptr) uintptr {
+ if x < y {
+ x = -y
+ }
+ // amd64:"CMOVQCS"
+ // arm64:"CSEL\tLO"
+ return x
+}
+
+func cmov32bit(x, y uint32) uint32 {
+ if x < y {
+ x = -y
+ }
+ // amd64:"CMOVLCS"
+ // arm64:"CSEL\tLO"
+ return x
+}
+
+func cmov16bit(x, y uint16) uint16 {
+ if x < y {
+ x = -y
+ }
+ // amd64:"CMOVWCS"
+ // arm64:"CSEL\tLO"
+ return x
+}
+
+// Floating point comparison. For EQ/NE, we must
+// generate special code to handle NaNs.
+func cmovfloateq(x, y float64) int {
+ a := 128
+ if x == y {
+ a = 256
+ }
+ // amd64:"CMOVQNE","CMOVQPC"
+ // arm64:"CSEL\tEQ"
+ return a
+}
+
+func cmovfloatne(x, y float64) int {
+ a := 128
+ if x != y {
+ a = 256
+ }
+ // amd64:"CMOVQNE","CMOVQPS"
+ // arm64:"CSEL\tNE"
+ return a
+}
+
+//go:noinline
+func frexp(f float64) (frac float64, exp int) {
+ return 1.0, 4
+}
+
+//go:noinline
+func ldexp(frac float64, exp int) float64 {
+ return 1.0
+}
+
+// Generate a CMOV with a floating comparison and integer move.
+func cmovfloatint2(x, y float64) float64 {
+ yfr, yexp := 4.0, 5
+
+ r := x
+ for r >= y {
+ rfr, rexp := frexp(r)
+ if rfr < yfr {
+ rexp = rexp - 1
+ }
+ // amd64:"CMOVQHI"
+ // arm64:"CSEL\tGT"
+ r = r - ldexp(y, (rexp-yexp))
+ }
+ return r
+}
+
+func cmovloaded(x [4]int, y int) int {
+ if x[2] != 0 {
+ y = x[2]
+ } else {
+ y = y >> 2
+ }
+ // amd64:"CMOVQNE"
+ // arm64:"CSEL\tNE"
+ return y
+}
+
+func cmovuintptr2(x, y uintptr) uintptr {
+ a := x * 2
+ if a == 0 {
+ a = 256
+ }
+ // amd64:"CMOVQEQ"
+ // arm64:"CSEL\tEQ"
+ return a
+}
+
+// Floating point CMOVs are not supported by amd64/arm64
+func cmovfloatmove(x, y int) float64 {
+ a := 1.0
+ if x <= y {
+ a = 2.0
+ }
+ // amd64:-"CMOV"
+ // arm64:-"CSEL"
+ return a
+}
+
+// On amd64, the following patterns trigger comparison inversion.
+// Test that we correctly invert the CMOV condition
+var gsink int64
+var gusink uint64
+
+func cmovinvert1(x, y int64) int64 {
+ if x < gsink {
+ y = -y
+ }
+ // amd64:"CMOVQGT"
+ return y
+}
+func cmovinvert2(x, y int64) int64 {
+ if x <= gsink {
+ y = -y
+ }
+ // amd64:"CMOVQGE"
+ return y
+}
+func cmovinvert3(x, y int64) int64 {
+ if x == gsink {
+ y = -y
+ }
+ // amd64:"CMOVQEQ"
+ return y
+}
+func cmovinvert4(x, y int64) int64 {
+ if x != gsink {
+ y = -y
+ }
+ // amd64:"CMOVQNE"
+ return y
+}
+func cmovinvert5(x, y uint64) uint64 {
+ if x > gusink {
+ y = -y
+ }
+ // amd64:"CMOVQCS"
+ return y
+}
+func cmovinvert6(x, y uint64) uint64 {
+ if x >= gusink {
+ y = -y
+ }
+ // amd64:"CMOVQLS"
+ return y
+}