aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCherry Zhang <cherryyz@google.com>2016-06-17 10:34:06 -0400
committerCherry Zhang <cherryyz@google.com>2016-07-15 18:19:59 +0000
commit8cc3f4a17e2f4d63e090fd7bd39bee697521fddf (patch)
tree5e880cfe21e53973cbe855e5573165d3716b7c2e
parent14cf6e20832dd64d79c345e0fd59169c5bd0eb35 (diff)
downloadgo-8cc3f4a17e2f4d63e090fd7bd39bee697521fddf.tar.gz
go-8cc3f4a17e2f4d63e090fd7bd39bee697521fddf.zip
[dev.ssa] cmd/compile: use shifted and indexed ops in SSA for ARM
This CL implements the following optimizations for ARM: - use shifted ops (e.g. ADD R1<<2, R2) and indexed load/stores - break up shift ops. Shifts used to be one SSA op that generates multiple instructions. We break them up to multiple ops, which allows constant folding and CSE for comparisons. Conditional moves are introduced for this. - simplify zero/sign-extension ops. Updates #15365. Change-Id: I55e262a776a7ef2a1505d75e04d1208913c35d39 Reviewed-on: https://go-review.googlesource.com/24512 Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
-rw-r--r--src/cmd/compile/internal/arm/ssa.go305
-rw-r--r--src/cmd/compile/internal/gc/testdata/arith_ssa.go440
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules507
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARMOps.go129
-rw-r--r--src/cmd/compile/internal/ssa/op.go8
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go1653
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go8746
7 files changed, 11560 insertions, 228 deletions
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 51722c4f35..5dcd8ca1ac 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -5,6 +5,7 @@
package arm
import (
+ "fmt"
"math"
"cmd/compile/internal/gc"
@@ -108,6 +109,57 @@ func storeByType(t ssa.Type) obj.As {
panic("bad store type")
}
+// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
+type shift int64
+
+// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
+func (v shift) String() string {
+ op := "<<>>->@>"[((v>>5)&3)<<1:]
+ if v&(1<<4) != 0 {
+ // register shift
+ return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+ // constant shift
+ return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+}
+
+// makeshift encodes a register shifted by a constant
+func makeshift(reg int16, typ int64, s int64) shift {
+ return shift(int64(reg&0xf) | typ | (s&31)<<7)
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by s)
+func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeshift(r1, typ, s))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// makeregshift encodes a register shifted by a register
+func makeregshift(r1 int16, typ int64, r2 int16) shift {
+ return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
+}
+
+// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
+func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeregshift(r1, typ, r2))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.SetLineno(v.Line)
switch v.Op {
@@ -237,45 +289,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpARMSLL,
- ssa.OpARMSRL:
- // ARM shift instructions uses only the low-order byte of the shift amount
- // generate conditional instructions to deal with large shifts
- // CMP $32, Rarg1
- // SLL Rarg1, Rarg0, Rdst
- // MOVW.HS $0, Rdst
+ ssa.OpARMSRL,
+ ssa.OpARMSRA:
r := gc.SSARegNum(v)
r1 := gc.SSARegNum(v.Args[0])
r2 := gc.SSARegNum(v.Args[1])
- p := gc.Prog(arm.ACMP)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 32
- p.Reg = r2
- p = gc.Prog(v.Op.Asm())
+ p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- p = gc.Prog(arm.AMOVW)
- p.Scond = arm.C_SCOND_HS
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = r
- case ssa.OpARMSRA:
+ case ssa.OpARMSRAcond:
// ARM shift instructions uses only the low-order byte of the shift amount
// generate conditional instructions to deal with large shifts
- // CMP $32, Rarg1
+ // flag is already set
// SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit
// SRA.LO Rarg1, Rarg0, Rdst
r := gc.SSARegNum(v)
r1 := gc.SSARegNum(v.Args[0])
r2 := gc.SSARegNum(v.Args[1])
- p := gc.Prog(arm.ACMP)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 32
- p.Reg = r2
- p = gc.Prog(arm.ASRA)
+ p := gc.Prog(arm.ASRA)
p.Scond = arm.C_SCOND_HS
p.From.Type = obj.TYPE_CONST
p.From.Offset = 31
@@ -319,11 +353,115 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpARMSRRconst:
- p := gc.Prog(arm.AMOVW)
- p.From.Type = obj.TYPE_SHIFT
- p.From.Offset = int64(gc.SSARegNum(v.Args[0])&0xf) | arm.SHIFT_RR | (v.AuxInt&31)<<7
- p.To.Type = obj.TYPE_REG
- p.To.Reg = gc.SSARegNum(v)
+ genshift(arm.AMOVW, 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_RR, v.AuxInt)
+ case ssa.OpARMADDshiftLL,
+ ssa.OpARMADCshiftLL,
+ ssa.OpARMSUBshiftLL,
+ ssa.OpARMSBCshiftLL,
+ ssa.OpARMRSBshiftLL,
+ ssa.OpARMRSCshiftLL,
+ ssa.OpARMANDshiftLL,
+ ssa.OpARMORshiftLL,
+ ssa.OpARMXORshiftLL,
+ ssa.OpARMBICshiftLL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMADDSshiftLL,
+ ssa.OpARMSUBSshiftLL,
+ ssa.OpARMRSBSshiftLL:
+ p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRL,
+ ssa.OpARMADCshiftRL,
+ ssa.OpARMSUBshiftRL,
+ ssa.OpARMSBCshiftRL,
+ ssa.OpARMRSBshiftRL,
+ ssa.OpARMRSCshiftRL,
+ ssa.OpARMANDshiftRL,
+ ssa.OpARMORshiftRL,
+ ssa.OpARMXORshiftRL,
+ ssa.OpARMBICshiftRL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMADDSshiftRL,
+ ssa.OpARMSUBSshiftRL,
+ ssa.OpARMRSBSshiftRL:
+ p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRA,
+ ssa.OpARMADCshiftRA,
+ ssa.OpARMSUBshiftRA,
+ ssa.OpARMSBCshiftRA,
+ ssa.OpARMRSBshiftRA,
+ ssa.OpARMRSCshiftRA,
+ ssa.OpARMANDshiftRA,
+ ssa.OpARMORshiftRA,
+ ssa.OpARMXORshiftRA,
+ ssa.OpARMBICshiftRA:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMADDSshiftRA,
+ ssa.OpARMSUBSshiftRA,
+ ssa.OpARMRSBSshiftRA:
+ p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMMVNshiftLL:
+ genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMMVNshiftRL:
+ genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMMVNshiftRA:
+ genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMMVNshiftLLreg:
+ genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL)
+ case ssa.OpARMMVNshiftRLreg:
+ genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR)
+ case ssa.OpARMMVNshiftRAreg:
+ genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR)
+ case ssa.OpARMADDshiftLLreg,
+ ssa.OpARMADCshiftLLreg,
+ ssa.OpARMSUBshiftLLreg,
+ ssa.OpARMSBCshiftLLreg,
+ ssa.OpARMRSBshiftLLreg,
+ ssa.OpARMRSCshiftLLreg,
+ ssa.OpARMANDshiftLLreg,
+ ssa.OpARMORshiftLLreg,
+ ssa.OpARMXORshiftLLreg,
+ ssa.OpARMBICshiftLLreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LL)
+ case ssa.OpARMADDSshiftLLreg,
+ ssa.OpARMSUBSshiftLLreg,
+ ssa.OpARMRSBSshiftLLreg:
+ p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LL)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRLreg,
+ ssa.OpARMADCshiftRLreg,
+ ssa.OpARMSUBshiftRLreg,
+ ssa.OpARMSBCshiftRLreg,
+ ssa.OpARMRSBshiftRLreg,
+ ssa.OpARMRSCshiftRLreg,
+ ssa.OpARMANDshiftRLreg,
+ ssa.OpARMORshiftRLreg,
+ ssa.OpARMXORshiftRLreg,
+ ssa.OpARMBICshiftRLreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LR)
+ case ssa.OpARMADDSshiftRLreg,
+ ssa.OpARMSUBSshiftRLreg,
+ ssa.OpARMRSBSshiftRLreg:
+ p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRAreg,
+ ssa.OpARMADCshiftRAreg,
+ ssa.OpARMSUBshiftRAreg,
+ ssa.OpARMSBCshiftRAreg,
+ ssa.OpARMRSBshiftRAreg,
+ ssa.OpARMRSCshiftRAreg,
+ ssa.OpARMANDshiftRAreg,
+ ssa.OpARMORshiftRAreg,
+ ssa.OpARMXORshiftRAreg,
+ ssa.OpARMBICshiftRAreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_AR)
+ case ssa.OpARMADDSshiftRAreg,
+ ssa.OpARMSUBSshiftRAreg,
+ ssa.OpARMRSBSshiftRAreg:
+ p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_AR)
+ p.Scond = arm.C_SBIT
case ssa.OpARMHMUL,
ssa.OpARMHMULU:
// 32-bit high multiplication
@@ -385,6 +523,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMCMPshiftLL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMCMPshiftRL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMCMPshiftRA:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMCMPshiftLLreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LL)
+ case ssa.OpARMCMPshiftRLreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LR)
+ case ssa.OpARMCMPshiftRAreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_AR)
case ssa.OpARMMOVWaddr:
p := gc.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
@@ -440,11 +590,72 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.To, v)
+ case ssa.OpARMMOVWloadidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWloadshiftLL:
+ p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMMOVWloadshiftRL:
+ p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMMOVWloadshiftRA:
+ p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMMOVWstoreidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWstoreshiftLL:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LL, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRL:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LR, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRA:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_AR, v.AuxInt))
case ssa.OpARMMOVBreg,
ssa.OpARMMOVBUreg,
ssa.OpARMMOVHreg,
- ssa.OpARMMOVHUreg,
- ssa.OpARMMVN,
+ ssa.OpARMMOVHUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) {
+ return
+ }
+ p := gc.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpARMMVN,
ssa.OpARMSQRTD,
ssa.OpARMMOVWF,
ssa.OpARMMOVWD,
@@ -467,6 +678,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARMCMOVWHSconst:
+ p := gc.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARMCMOVWLSconst:
+ p := gc.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_LS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
case ssa.OpARMCALLstatic:
if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
// Deferred calls will appear to be returning to
@@ -657,24 +882,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4 := gc.Prog(arm.ABLT)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
- case ssa.OpARMLoweredZeromask:
- // int32(arg0>>1 - arg0) >> 31
- // RSB r0>>1, r0, r
- // SRA $31, r, r
- r0 := gc.SSARegNum(v.Args[0])
- r := gc.SSARegNum(v)
- p := gc.Prog(arm.ARSB)
- p.From.Type = obj.TYPE_SHIFT
- p.From.Offset = int64(r0&0xf) | arm.SHIFT_LR | 1<<7 // unsigned r0>>1
- p.Reg = r0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = r
- p = gc.Prog(arm.ASRA)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 31
- p.Reg = r
- p.To.Type = obj.TYPE_REG
- p.To.Reg = r
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
diff --git a/src/cmd/compile/internal/gc/testdata/arith_ssa.go b/src/cmd/compile/internal/gc/testdata/arith_ssa.go
index 7c82bbd6ce..d850ce27b2 100644
--- a/src/cmd/compile/internal/gc/testdata/arith_ssa.go
+++ b/src/cmd/compile/internal/gc/testdata/arith_ssa.go
@@ -553,6 +553,445 @@ func testOrPhi() {
}
}
+//go:noinline
+func addshiftLL_ssa(a, b uint32) uint32 {
+ return a + b<<3
+}
+
+//go:noinline
+func subshiftLL_ssa(a, b uint32) uint32 {
+ return a - b<<3
+}
+
+//go:noinline
+func rsbshiftLL_ssa(a, b uint32) uint32 {
+ return a<<3 - b
+}
+
+//go:noinline
+func andshiftLL_ssa(a, b uint32) uint32 {
+ return a & (b << 3)
+}
+
+//go:noinline
+func orshiftLL_ssa(a, b uint32) uint32 {
+ return a | b<<3
+}
+
+//go:noinline
+func xorshiftLL_ssa(a, b uint32) uint32 {
+ return a ^ b<<3
+}
+
+//go:noinline
+func bicshiftLL_ssa(a, b uint32) uint32 {
+ return a &^ (b << 3)
+}
+
+//go:noinline
+func notshiftLL_ssa(a uint32) uint32 {
+ return ^(a << 3)
+}
+
+//go:noinline
+func addshiftRL_ssa(a, b uint32) uint32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRL_ssa(a, b uint32) uint32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRL_ssa(a, b uint32) uint32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRL_ssa(a, b uint32) uint32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRL_ssa(a, b uint32) uint32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRL_ssa(a, b uint32) uint32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRL_ssa(a, b uint32) uint32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRL_ssa(a uint32) uint32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftRA_ssa(a, b int32) int32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRA_ssa(a, b int32) int32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRA_ssa(a, b int32) int32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRA_ssa(a, b int32) int32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRA_ssa(a, b int32) int32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRA_ssa(a, b int32) int32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRA_ssa(a, b int32) int32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRA_ssa(a int32) int32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b<<s
+}
+
+//go:noinline
+func subshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b<<s
+}
+
+//go:noinline
+func rsbshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a<<s - b
+}
+
+//go:noinline
+func andshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b << s)
+}
+
+//go:noinline
+func orshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b<<s
+}
+
+//go:noinline
+func xorshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b<<s
+}
+
+//go:noinline
+func bicshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b << s)
+}
+
+//go:noinline
+func notshiftLLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a << s)
+}
+
+//go:noinline
+func addshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a >> s)
+}
+
+//go:noinline
+func addshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRAreg_ssa(a int32, s uint8) int32 {
+ return ^(a >> s)
+}
+
+// test ARM shifted ops
+func testShiftedOps() {
+ a, b := uint32(10), uint32(42)
+ if want, got := a+b<<3, addshiftLL_ssa(a, b); got != want {
+ println("addshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a-b<<3, subshiftLL_ssa(a, b); got != want {
+ println("subshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a<<3-b, rsbshiftLL_ssa(a, b); got != want {
+ println("rsbshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&(b<<3), andshiftLL_ssa(a, b); got != want {
+ println("andshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a|b<<3, orshiftLL_ssa(a, b); got != want {
+ println("orshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a^b<<3, xorshiftLL_ssa(a, b); got != want {
+ println("xorshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&^(b<<3), bicshiftLL_ssa(a, b); got != want {
+ println("bicshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(a << 3), notshiftLL_ssa(a); got != want {
+ println("notshiftLL_ssa(10) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a+b>>3, addshiftRL_ssa(a, b); got != want {
+ println("addshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a-b>>3, subshiftRL_ssa(a, b); got != want {
+ println("subshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a>>3-b, rsbshiftRL_ssa(a, b); got != want {
+ println("rsbshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&(b>>3), andshiftRL_ssa(a, b); got != want {
+ println("andshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a|b>>3, orshiftRL_ssa(a, b); got != want {
+ println("orshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a^b>>3, xorshiftRL_ssa(a, b); got != want {
+ println("xorshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&^(b>>3), bicshiftRL_ssa(a, b); got != want {
+ println("bicshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(a >> 3), notshiftRL_ssa(a); got != want {
+ println("notshiftRL_ssa(10) =", got, " want ", want)
+ failed = true
+ }
+ c, d := int32(10), int32(-42)
+ if want, got := c+d>>3, addshiftRA_ssa(c, d); got != want {
+ println("addshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c-d>>3, subshiftRA_ssa(c, d); got != want {
+ println("subshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c>>3-d, rsbshiftRA_ssa(c, d); got != want {
+ println("rsbshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c&(d>>3), andshiftRA_ssa(c, d); got != want {
+ println("andshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c|d>>3, orshiftRA_ssa(c, d); got != want {
+ println("orshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c^d>>3, xorshiftRA_ssa(c, d); got != want {
+ println("xorshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c&^(d>>3), bicshiftRA_ssa(c, d); got != want {
+ println("bicshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(d >> 3), notshiftRA_ssa(d); got != want {
+ println("notshiftRA_ssa(-42) =", got, " want ", want)
+ failed = true
+ }
+ s := uint8(3)
+ if want, got := a+b<<s, addshiftLLreg_ssa(a, b, s); got != want {
+ println("addshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a-b<<s, subshiftLLreg_ssa(a, b, s); got != want {
+ println("subshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a<<s-b, rsbshiftLLreg_ssa(a, b, s); got != want {
+ println("rsbshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&(b<<s), andshiftLLreg_ssa(a, b, s); got != want {
+ println("andshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a|b<<s, orshiftLLreg_ssa(a, b, s); got != want {
+ println("orshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a^b<<s, xorshiftLLreg_ssa(a, b, s); got != want {
+ println("xorshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&^(b<<s), bicshiftLLreg_ssa(a, b, s); got != want {
+ println("bicshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(a << s), notshiftLLreg_ssa(a, s); got != want {
+ println("notshiftLLreg_ssa(10) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a+b>>s, addshiftRLreg_ssa(a, b, s); got != want {
+ println("addshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a-b>>s, subshiftRLreg_ssa(a, b, s); got != want {
+ println("subshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a>>s-b, rsbshiftRLreg_ssa(a, b, s); got != want {
+ println("rsbshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&(b>>s), andshiftRLreg_ssa(a, b, s); got != want {
+ println("andshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a|b>>s, orshiftRLreg_ssa(a, b, s); got != want {
+ println("orshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a^b>>s, xorshiftRLreg_ssa(a, b, s); got != want {
+ println("xorshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&^(b>>s), bicshiftRLreg_ssa(a, b, s); got != want {
+ println("bicshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(a >> s), notshiftRLreg_ssa(a, s); got != want {
+ println("notshiftRLreg_ssa(10) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c+d>>s, addshiftRAreg_ssa(c, d, s); got != want {
+ println("addshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c-d>>s, subshiftRAreg_ssa(c, d, s); got != want {
+ println("subshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c>>s-d, rsbshiftRAreg_ssa(c, d, s); got != want {
+ println("rsbshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c&(d>>s), andshiftRAreg_ssa(c, d, s); got != want {
+ println("andshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c|d>>s, orshiftRAreg_ssa(c, d, s); got != want {
+ println("orshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c^d>>s, xorshiftRAreg_ssa(c, d, s); got != want {
+ println("xorshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c&^(d>>s), bicshiftRAreg_ssa(c, d, s); got != want {
+ println("bicshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(d >> s), notshiftRAreg_ssa(d, s); got != want {
+ println("notshiftRAreg_ssa(-42, 3) =", got, " want ", want)
+ failed = true
+ }
+}
+
var failed = false
func main() {
@@ -573,6 +1012,7 @@ func main() {
testLoadCombine()
testLoadSymCombine()
testShiftRemoval()
+ testShiftedOps()
if failed {
panic("failed")
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index ee68ad540f..7ec0e502ec 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -87,40 +87,42 @@
(Not x) -> (XORconst [1] x)
// shifts
-(Lsh32x32 x y) -> (SLL x y)
-(Lsh32x16 x y) -> (SLL x (ZeroExt16to32 y))
+// hardware instruction uses only the low byte of the shift
+// we compare to 256 to ensure Go semantics for large shifts
+(Lsh32x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh32x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
(Lsh32x8 x y) -> (SLL x (ZeroExt8to32 y))
-(Lsh16x32 x y) -> (SLL x y)
-(Lsh16x16 x y) -> (SLL x (ZeroExt16to32 y))
+(Lsh16x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh16x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
(Lsh16x8 x y) -> (SLL x (ZeroExt8to32 y))
-(Lsh8x32 x y) -> (SLL x y)
-(Lsh8x16 x y) -> (SLL x (ZeroExt16to32 y))
+(Lsh8x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh8x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
(Lsh8x8 x y) -> (SLL x (ZeroExt8to32 y))
-(Rsh32Ux32 x y) -> (SRL x y)
-(Rsh32Ux16 x y) -> (SRL x (ZeroExt16to32 y))
+(Rsh32Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+(Rsh32Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
(Rsh32Ux8 x y) -> (SRL x (ZeroExt8to32 y))
-(Rsh16Ux32 x y) -> (SRL (ZeroExt16to32 x) y)
-(Rsh16Ux16 x y) -> (SRL (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Rsh16Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+(Rsh16Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
(Rsh16Ux8 x y) -> (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
-(Rsh8Ux32 x y) -> (SRL (ZeroExt8to32 x) y)
-(Rsh8Ux16 x y) -> (SRL (ZeroExt8to32 x) (ZeroExt16to32 y))
+(Rsh8Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+(Rsh8Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
(Rsh8Ux8 x y) -> (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
-(Rsh32x32 x y) -> (SRA x y)
-(Rsh32x16 x y) -> (SRA x (ZeroExt16to32 y))
+(Rsh32x32 x y) -> (SRAcond x y (CMPconst [256] y))
+(Rsh32x16 x y) -> (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
(Rsh32x8 x y) -> (SRA x (ZeroExt8to32 y))
-(Rsh16x32 x y) -> (SRA (SignExt16to32 x) y)
-(Rsh16x16 x y) -> (SRA (SignExt16to32 x) (ZeroExt16to32 y))
+(Rsh16x32 x y) -> (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+(Rsh16x16 x y) -> (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
(Rsh16x8 x y) -> (SRA (SignExt16to32 x) (ZeroExt8to32 y))
-(Rsh8x32 x y) -> (SRA (SignExt8to32 x) y)
-(Rsh8x16 x y) -> (SRA (SignExt8to32 x) (ZeroExt16to32 y))
+(Rsh8x32 x y) -> (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+(Rsh8x16 x y) -> (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
(Rsh8x8 x y) -> (SRA (SignExt8to32 x) (ZeroExt8to32 y))
// constant shifts
@@ -177,7 +179,7 @@
(SignExt16to32 x) -> (MOVHreg x)
(Signmask x) -> (SRAconst x [31])
-(Zeromask x) -> (LoweredZeromask x)
+(Zeromask x) -> (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
// float <-> int conversion
(Cvt32to32F x) -> (MOVWF x)
@@ -452,6 +454,11 @@
(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+
// fold constant into arithmatic ops
(ADD (MOVWconst [c]) x) -> (ADDconst [c] x)
(ADD x (MOVWconst [c])) -> (ADDconst [c] x)
@@ -485,9 +492,6 @@
(CMP x (MOVWconst [c])) -> (CMPconst [c] x)
(CMP (MOVWconst [c]) x) -> (InvertFlags (CMPconst [c] x))
-(LoweredZeromask (MOVWconst [0])) -> (MOVWconst [0])
-(LoweredZeromask (MOVWconst [c])) && c != 0 -> (MOVWconst [0xffffffff])
-
// don't extend after proper load
// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
(MOVBreg x:(MOVBload _ _)) -> (MOVWreg x)
@@ -504,6 +508,15 @@
(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c&0x7f] x)
(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c&0x7fff] x)
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) -> (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) -> (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) -> (MOVWreg x)
+
// don't extend before store
(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
@@ -513,25 +526,49 @@
(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
// mul by constant
-(MUL x (MOVWconst [-1])) -> (RSBconst [0] x)
+(MUL x (MOVWconst [c])) && int32(c) == -1 -> (RSBconst [0] x)
(MUL _ (MOVWconst [0])) -> (MOVWconst [0])
(MUL x (MOVWconst [1])) -> x
(MUL x (MOVWconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
-
-(MUL (MOVWconst [-1]) x) -> (RSBconst [0] x)
+(MUL x (MOVWconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MUL x (MOVWconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSBshiftLL x x [log2(c+1)])
+(MUL x (MOVWconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVWconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+(MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MUL (MOVWconst [c]) x) && int32(c) == -1 -> (RSBconst [0] x)
(MUL (MOVWconst [0]) _) -> (MOVWconst [0])
(MUL (MOVWconst [1]) x) -> x
(MUL (MOVWconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
-
-(MULA x (MOVWconst [-1]) a) -> (SUB a x)
+(MUL (MOVWconst [c]) x) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MUL (MOVWconst [c]) x) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSBshiftLL x x [log2(c+1)])
+(MUL (MOVWconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL (MOVWconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL (MOVWconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+(MUL (MOVWconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULA x (MOVWconst [c]) a) && int32(c) == -1 -> (SUB a x)
(MULA _ (MOVWconst [0]) a) -> a
(MULA x (MOVWconst [1]) a) -> (ADD x a)
(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c) -> (ADD (SLLconst <x.Type> [log2(c)] x) a)
-
-(MULA (MOVWconst [-1]) x a) -> (SUB a x)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+(MULA x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULA (MOVWconst [c]) x a) && int32(c) == -1 -> (SUB a x)
(MULA (MOVWconst [0]) _ a) -> a
(MULA (MOVWconst [1]) x a) -> (ADD x a)
(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c) -> (ADD (SLLconst <x.Type> [log2(c)] x) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+(MULA (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
// div by constant
(DIVU x (MOVWconst [1])) -> x
@@ -696,6 +733,28 @@
(GreaterEqual (InvertFlags x)) -> (LessEqual x)
(GreaterEqualU (InvertFlags x)) -> (LessEqualU x)
+// absorb flag constants into conditional instructions
+(CMOVWLSconst _ (FlagEQ) [c]) -> (MOVWconst [c])
+(CMOVWLSconst _ (FlagLT_ULT) [c]) -> (MOVWconst [c])
+(CMOVWLSconst x (FlagLT_UGT)) -> x
+(CMOVWLSconst _ (FlagGT_ULT) [c]) -> (MOVWconst [c])
+(CMOVWLSconst x (FlagGT_UGT)) -> x
+
+(CMOVWHSconst _ (FlagEQ) [c]) -> (MOVWconst [c])
+(CMOVWHSconst x (FlagLT_ULT)) -> x
+(CMOVWHSconst _ (FlagLT_UGT) [c]) -> (MOVWconst [c])
+(CMOVWHSconst x (FlagGT_ULT)) -> x
+(CMOVWHSconst _ (FlagGT_UGT) [c]) -> (MOVWconst [c])
+
+(CMOVWLSconst x (InvertFlags flags) [c]) -> (CMOVWHSconst x flags [c])
+(CMOVWHSconst x (InvertFlags flags) [c]) -> (CMOVWLSconst x flags [c])
+
+(SRAcond x _ (FlagEQ)) -> (SRAconst x [31])
+(SRAcond x y (FlagLT_ULT)) -> (SRA x y)
+(SRAcond x _ (FlagLT_UGT)) -> (SRAconst x [31])
+(SRAcond x y (FlagGT_ULT)) -> (SRA x y)
+(SRAcond x _ (FlagGT_UGT)) -> (SRAconst x [31])
+
// remove redundant *const ops
(ADDconst [0] x) -> x
(SUBconst [0] x) -> x
@@ -741,10 +800,377 @@
(XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
(BICconst [c] (MOVWconst [d])) -> (MOVWconst [d&^c])
(MVN (MOVWconst [c])) -> (MOVWconst [^c])
+(MOVBreg (MOVWconst [c])) -> (MOVWconst [int64(int8(c))])
+(MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))])
+(MOVHreg (MOVWconst [c])) -> (MOVWconst [int64(int16(c))])
+(MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))])
+(MOVWreg (MOVWconst [c])) -> (MOVWconst [c])
+
+// absorb shifts into ops
+(ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c])
+(ADD (SLLconst [c] y) x) -> (ADDshiftLL x y [c])
+(ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c])
+(ADD (SRLconst [c] y) x) -> (ADDshiftRL x y [c])
+(ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c])
+(ADD (SRAconst [c] y) x) -> (ADDshiftRA x y [c])
+(ADD x (SLL y z)) -> (ADDshiftLLreg x y z)
+(ADD (SLL y z) x) -> (ADDshiftLLreg x y z)
+(ADD x (SRL y z)) -> (ADDshiftRLreg x y z)
+(ADD (SRL y z) x) -> (ADDshiftRLreg x y z)
+(ADD x (SRA y z)) -> (ADDshiftRAreg x y z)
+(ADD (SRA y z) x) -> (ADDshiftRAreg x y z)
+(ADC x (SLLconst [c] y) flags) -> (ADCshiftLL x y [c] flags)
+(ADC (SLLconst [c] y) x flags) -> (ADCshiftLL x y [c] flags)
+(ADC x (SRLconst [c] y) flags) -> (ADCshiftRL x y [c] flags)
+(ADC (SRLconst [c] y) x flags) -> (ADCshiftRL x y [c] flags)
+(ADC x (SRAconst [c] y) flags) -> (ADCshiftRA x y [c] flags)
+(ADC (SRAconst [c] y) x flags) -> (ADCshiftRA x y [c] flags)
+(ADC x (SLL y z) flags) -> (ADCshiftLLreg x y z flags)
+(ADC (SLL y z) x flags) -> (ADCshiftLLreg x y z flags)
+(ADC x (SRL y z) flags) -> (ADCshiftRLreg x y z flags)
+(ADC (SRL y z) x flags) -> (ADCshiftRLreg x y z flags)
+(ADC x (SRA y z) flags) -> (ADCshiftRAreg x y z flags)
+(ADC (SRA y z) x flags) -> (ADCshiftRAreg x y z flags)
+(ADDS x (SLLconst [c] y)) -> (ADDSshiftLL x y [c])
+(ADDS (SLLconst [c] y) x) -> (ADDSshiftLL x y [c])
+(ADDS x (SRLconst [c] y)) -> (ADDSshiftRL x y [c])
+(ADDS (SRLconst [c] y) x) -> (ADDSshiftRL x y [c])
+(ADDS x (SRAconst [c] y)) -> (ADDSshiftRA x y [c])
+(ADDS (SRAconst [c] y) x) -> (ADDSshiftRA x y [c])
+(ADDS x (SLL y z)) -> (ADDSshiftLLreg x y z)
+(ADDS (SLL y z) x) -> (ADDSshiftLLreg x y z)
+(ADDS x (SRL y z)) -> (ADDSshiftRLreg x y z)
+(ADDS (SRL y z) x) -> (ADDSshiftRLreg x y z)
+(ADDS x (SRA y z)) -> (ADDSshiftRAreg x y z)
+(ADDS (SRA y z) x) -> (ADDSshiftRAreg x y z)
+(SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c])
+(SUB (SLLconst [c] y) x) -> (RSBshiftLL x y [c])
+(SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c])
+(SUB (SRLconst [c] y) x) -> (RSBshiftRL x y [c])
+(SUB x (SRAconst [c] y)) -> (SUBshiftRA x y [c])
+(SUB (SRAconst [c] y) x) -> (RSBshiftRA x y [c])
+(SUB x (SLL y z)) -> (SUBshiftLLreg x y z)
+(SUB (SLL y z) x) -> (RSBshiftLLreg x y z)
+(SUB x (SRL y z)) -> (SUBshiftRLreg x y z)
+(SUB (SRL y z) x) -> (RSBshiftRLreg x y z)
+(SUB x (SRA y z)) -> (SUBshiftRAreg x y z)
+(SUB (SRA y z) x) -> (RSBshiftRAreg x y z)
+(SBC x (SLLconst [c] y) flags) -> (SBCshiftLL x y [c] flags)
+(SBC (SLLconst [c] y) x flags) -> (RSCshiftLL x y [c] flags)
+(SBC x (SRLconst [c] y) flags) -> (SBCshiftRL x y [c] flags)
+(SBC (SRLconst [c] y) x flags) -> (RSCshiftRL x y [c] flags)
+(SBC x (SRAconst [c] y) flags) -> (SBCshiftRA x y [c] flags)
+(SBC (SRAconst [c] y) x flags) -> (RSCshiftRA x y [c] flags)
+(SBC x (SLL y z) flags) -> (SBCshiftLLreg x y z flags)
+(SBC (SLL y z) x flags) -> (RSCshiftLLreg x y z flags)
+(SBC x (SRL y z) flags) -> (SBCshiftRLreg x y z flags)
+(SBC (SRL y z) x flags) -> (RSCshiftRLreg x y z flags)
+(SBC x (SRA y z) flags) -> (SBCshiftRAreg x y z flags)
+(SBC (SRA y z) x flags) -> (RSCshiftRAreg x y z flags)
+(SUBS x (SLLconst [c] y)) -> (SUBSshiftLL x y [c])
+(SUBS (SLLconst [c] y) x) -> (RSBSshiftLL x y [c])
+(SUBS x (SRLconst [c] y)) -> (SUBSshiftRL x y [c])
+(SUBS (SRLconst [c] y) x) -> (RSBSshiftRL x y [c])
+(SUBS x (SRAconst [c] y)) -> (SUBSshiftRA x y [c])
+(SUBS (SRAconst [c] y) x) -> (RSBSshiftRA x y [c])
+(SUBS x (SLL y z)) -> (SUBSshiftLLreg x y z)
+(SUBS (SLL y z) x) -> (RSBSshiftLLreg x y z)
+(SUBS x (SRL y z)) -> (SUBSshiftRLreg x y z)
+(SUBS (SRL y z) x) -> (RSBSshiftRLreg x y z)
+(SUBS x (SRA y z)) -> (SUBSshiftRAreg x y z)
+(SUBS (SRA y z) x) -> (RSBSshiftRAreg x y z)
+(RSB x (SLLconst [c] y)) -> (RSBshiftLL x y [c])
+(RSB (SLLconst [c] y) x) -> (SUBshiftLL x y [c])
+(RSB x (SRLconst [c] y)) -> (RSBshiftRL x y [c])
+(RSB (SRLconst [c] y) x) -> (SUBshiftRL x y [c])
+(RSB x (SRAconst [c] y)) -> (RSBshiftRA x y [c])
+(RSB (SRAconst [c] y) x) -> (SUBshiftRA x y [c])
+(RSB x (SLL y z)) -> (RSBshiftLLreg x y z)
+(RSB (SLL y z) x) -> (SUBshiftLLreg x y z)
+(RSB x (SRL y z)) -> (RSBshiftRLreg x y z)
+(RSB (SRL y z) x) -> (SUBshiftRLreg x y z)
+(RSB x (SRA y z)) -> (RSBshiftRAreg x y z)
+(RSB (SRA y z) x) -> (SUBshiftRAreg x y z)
+(AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c])
+(AND (SLLconst [c] y) x) -> (ANDshiftLL x y [c])
+(AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c])
+(AND (SRLconst [c] y) x) -> (ANDshiftRL x y [c])
+(AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c])
+(AND (SRAconst [c] y) x) -> (ANDshiftRA x y [c])
+(AND x (SLL y z)) -> (ANDshiftLLreg x y z)
+(AND (SLL y z) x) -> (ANDshiftLLreg x y z)
+(AND x (SRL y z)) -> (ANDshiftRLreg x y z)
+(AND (SRL y z) x) -> (ANDshiftRLreg x y z)
+(AND x (SRA y z)) -> (ANDshiftRAreg x y z)
+(AND (SRA y z) x) -> (ANDshiftRAreg x y z)
+(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c])
+(OR (SLLconst [c] y) x) -> (ORshiftLL x y [c])
+(OR x (SRLconst [c] y)) -> (ORshiftRL x y [c])
+(OR (SRLconst [c] y) x) -> (ORshiftRL x y [c])
+(OR x (SRAconst [c] y)) -> (ORshiftRA x y [c])
+(OR (SRAconst [c] y) x) -> (ORshiftRA x y [c])
+(OR x (SLL y z)) -> (ORshiftLLreg x y z)
+(OR (SLL y z) x) -> (ORshiftLLreg x y z)
+(OR x (SRL y z)) -> (ORshiftRLreg x y z)
+(OR (SRL y z) x) -> (ORshiftRLreg x y z)
+(OR x (SRA y z)) -> (ORshiftRAreg x y z)
+(OR (SRA y z) x) -> (ORshiftRAreg x y z)
+(XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c])
+(XOR (SLLconst [c] y) x) -> (XORshiftLL x y [c])
+(XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c])
+(XOR (SRLconst [c] y) x) -> (XORshiftRL x y [c])
+(XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c])
+(XOR (SRAconst [c] y) x) -> (XORshiftRA x y [c])
+(XOR x (SLL y z)) -> (XORshiftLLreg x y z)
+(XOR (SLL y z) x) -> (XORshiftLLreg x y z)
+(XOR x (SRL y z)) -> (XORshiftRLreg x y z)
+(XOR (SRL y z) x) -> (XORshiftRLreg x y z)
+(XOR x (SRA y z)) -> (XORshiftRAreg x y z)
+(XOR (SRA y z) x) -> (XORshiftRAreg x y z)
+(BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c])
+(BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c])
+(BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c])
+(BIC x (SLL y z)) -> (BICshiftLLreg x y z)
+(BIC x (SRL y z)) -> (BICshiftRLreg x y z)
+(BIC x (SRA y z)) -> (BICshiftRAreg x y z)
+(MVN (SLLconst [c] x)) -> (MVNshiftLL x [c])
+(MVN (SRLconst [c] x)) -> (MVNshiftRL x [c])
+(MVN (SRAconst [c] x)) -> (MVNshiftRA x [c])
+(MVN (SLL x y)) -> (MVNshiftLLreg x y)
+(MVN (SRL x y)) -> (MVNshiftRLreg x y)
+(MVN (SRA x y)) -> (MVNshiftRAreg x y)
+
+(CMP x (SLLconst [c] y)) -> (CMPshiftLL x y [c])
+(CMP (SLLconst [c] y) x) -> (InvertFlags (CMPshiftLL x y [c]))
+(CMP x (SRLconst [c] y)) -> (CMPshiftRL x y [c])
+(CMP (SRLconst [c] y) x) -> (InvertFlags (CMPshiftRL x y [c]))
+(CMP x (SRAconst [c] y)) -> (CMPshiftRA x y [c])
+(CMP (SRAconst [c] y) x) -> (InvertFlags (CMPshiftRA x y [c]))
+(CMP x (SLL y z)) -> (CMPshiftLLreg x y z)
+(CMP (SLL y z) x) -> (InvertFlags (CMPshiftLLreg x y z))
+(CMP x (SRL y z)) -> (CMPshiftRLreg x y z)
+(CMP (SRL y z) x) -> (InvertFlags (CMPshiftRLreg x y z))
+(CMP x (SRA y z)) -> (CMPshiftRAreg x y z)
+(CMP (SRA y z) x) -> (InvertFlags (CMPshiftRAreg x y z))
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVWconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVWconst [c]) x [d]) -> (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVWconst [c]) x [d]) -> (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ADCshiftLL (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+(ADCshiftRL (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+(ADCshiftRA (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+(ADDSshiftLL (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SLLconst <x.Type> x [d]))
+(ADDSshiftRL (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SRLconst <x.Type> x [d]))
+(ADDSshiftRA (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SRAconst <x.Type> x [d]))
+(SUBshiftLL (MOVWconst [c]) x [d]) -> (RSBconst [c] (SLLconst <x.Type> x [d]))
+(SUBshiftRL (MOVWconst [c]) x [d]) -> (RSBconst [c] (SRLconst <x.Type> x [d]))
+(SUBshiftRA (MOVWconst [c]) x [d]) -> (RSBconst [c] (SRAconst <x.Type> x [d]))
+(SBCshiftLL (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+(SBCshiftRL (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+(SBCshiftRA (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+(SUBSshiftLL (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SLLconst <x.Type> x [d]))
+(SUBSshiftRL (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SRLconst <x.Type> x [d]))
+(SUBSshiftRA (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SRAconst <x.Type> x [d]))
+(RSBshiftLL (MOVWconst [c]) x [d]) -> (SUBconst [c] (SLLconst <x.Type> x [d]))
+(RSBshiftRL (MOVWconst [c]) x [d]) -> (SUBconst [c] (SRLconst <x.Type> x [d]))
+(RSBshiftRA (MOVWconst [c]) x [d]) -> (SUBconst [c] (SRAconst <x.Type> x [d]))
+(RSCshiftLL (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+(RSCshiftRL (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+(RSCshiftRA (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+(RSBSshiftLL (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SLLconst <x.Type> x [d]))
+(RSBSshiftRL (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SRLconst <x.Type> x [d]))
+(RSBSshiftRA (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVWconst [c]) x [d]) -> (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVWconst [c]) x [d]) -> (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVWconst [c]) x [d]) -> (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVWconst [c]) x [d]) -> (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVWconst [c]) x [d]) -> (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVWconst [c]) x [d]) -> (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVWconst [c]) x [d]) -> (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVWconst [c]) x [d]) -> (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVWconst [c]) x [d]) -> (XORconst [c] (SRAconst <x.Type> x [d]))
+(CMPshiftLL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+
+(ADDshiftLLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SLL <x.Type> x y))
+(ADDshiftRLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SRL <x.Type> x y))
+(ADDshiftRAreg (MOVWconst [c]) x y) -> (ADDconst [c] (SRA <x.Type> x y))
+(ADCshiftLLreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SLL <x.Type> x y) flags)
+(ADCshiftRLreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SRL <x.Type> x y) flags)
+(ADCshiftRAreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SRA <x.Type> x y) flags)
+(ADDSshiftLLreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SLL <x.Type> x y))
+(ADDSshiftRLreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SRL <x.Type> x y))
+(ADDSshiftRAreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SRA <x.Type> x y))
+(SUBshiftLLreg (MOVWconst [c]) x y) -> (RSBconst [c] (SLL <x.Type> x y))
+(SUBshiftRLreg (MOVWconst [c]) x y) -> (RSBconst [c] (SRL <x.Type> x y))
+(SUBshiftRAreg (MOVWconst [c]) x y) -> (RSBconst [c] (SRA <x.Type> x y))
+(SBCshiftLLreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SLL <x.Type> x y) flags)
+(SBCshiftRLreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SRL <x.Type> x y) flags)
+(SBCshiftRAreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SRA <x.Type> x y) flags)
+(SUBSshiftLLreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SLL <x.Type> x y))
+(SUBSshiftRLreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SRL <x.Type> x y))
+(SUBSshiftRAreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SRA <x.Type> x y))
+(RSBshiftLLreg (MOVWconst [c]) x y) -> (SUBconst [c] (SLL <x.Type> x y))
+(RSBshiftRLreg (MOVWconst [c]) x y) -> (SUBconst [c] (SRL <x.Type> x y))
+(RSBshiftRAreg (MOVWconst [c]) x y) -> (SUBconst [c] (SRA <x.Type> x y))
+(RSCshiftLLreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SLL <x.Type> x y) flags)
+(RSCshiftRLreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SRL <x.Type> x y) flags)
+(RSCshiftRAreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SRA <x.Type> x y) flags)
+(RSBSshiftLLreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SLL <x.Type> x y))
+(RSBSshiftRLreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SRL <x.Type> x y))
+(RSBSshiftRAreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SRA <x.Type> x y))
+(ANDshiftLLreg (MOVWconst [c]) x y) -> (ANDconst [c] (SLL <x.Type> x y))
+(ANDshiftRLreg (MOVWconst [c]) x y) -> (ANDconst [c] (SRL <x.Type> x y))
+(ANDshiftRAreg (MOVWconst [c]) x y) -> (ANDconst [c] (SRA <x.Type> x y))
+(ORshiftLLreg (MOVWconst [c]) x y) -> (ORconst [c] (SLL <x.Type> x y))
+(ORshiftRLreg (MOVWconst [c]) x y) -> (ORconst [c] (SRL <x.Type> x y))
+(ORshiftRAreg (MOVWconst [c]) x y) -> (ORconst [c] (SRA <x.Type> x y))
+(XORshiftLLreg (MOVWconst [c]) x y) -> (XORconst [c] (SLL <x.Type> x y))
+(XORshiftRLreg (MOVWconst [c]) x y) -> (XORconst [c] (SRL <x.Type> x y))
+(XORshiftRAreg (MOVWconst [c]) x y) -> (XORconst [c] (SRA <x.Type> x y))
+(CMPshiftLLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+(CMPshiftRLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+(CMPshiftRAreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+
+// constant folding in *shift ops
+(ADDshiftLL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(uint32(c)<<uint64(d))])
+(ADDshiftRL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(uint32(c)>>uint64(d))])
+(ADDshiftRA x (MOVWconst [c]) [d]) -> (ADDconst x [int64(int32(c)>>uint64(d))])
+(ADCshiftLL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
+(ADCshiftRL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
+(ADCshiftRA x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(int32(c)>>uint64(d))] flags)
+(ADDSshiftLL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(uint32(c)<<uint64(d))])
+(ADDSshiftRL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(uint32(c)>>uint64(d))])
+(ADDSshiftRA x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(int32(c)>>uint64(d))])
+(SUBshiftLL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(uint32(c)<<uint64(d))])
+(SUBshiftRL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(uint32(c)>>uint64(d))])
+(SUBshiftRA x (MOVWconst [c]) [d]) -> (SUBconst x [int64(int32(c)>>uint64(d))])
+(SBCshiftLL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
+(SBCshiftRL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
+(SBCshiftRA x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(int32(c)>>uint64(d))] flags)
+(SUBSshiftLL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(uint32(c)<<uint64(d))])
+(SUBSshiftRL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(uint32(c)>>uint64(d))])
+(SUBSshiftRA x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(int32(c)>>uint64(d))])
+(RSBshiftLL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(uint32(c)<<uint64(d))])
+(RSBshiftRL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(uint32(c)>>uint64(d))])
+(RSBshiftRA x (MOVWconst [c]) [d]) -> (RSBconst x [int64(int32(c)>>uint64(d))])
+(RSCshiftLL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
+(RSCshiftRL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
+(RSCshiftRA x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(int32(c)>>uint64(d))] flags)
+(RSBSshiftLL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(uint32(c)<<uint64(d))])
+(RSBSshiftRL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(uint32(c)>>uint64(d))])
+(RSBSshiftRA x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(int32(c)>>uint64(d))])
+(ANDshiftLL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(uint32(c)<<uint64(d))])
+(ANDshiftRL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(uint32(c)>>uint64(d))])
+(ANDshiftRA x (MOVWconst [c]) [d]) -> (ANDconst x [int64(int32(c)>>uint64(d))])
+(ORshiftLL x (MOVWconst [c]) [d]) -> (ORconst x [int64(uint32(c)<<uint64(d))])
+(ORshiftRL x (MOVWconst [c]) [d]) -> (ORconst x [int64(uint32(c)>>uint64(d))])
+(ORshiftRA x (MOVWconst [c]) [d]) -> (ORconst x [int64(int32(c)>>uint64(d))])
+(XORshiftLL x (MOVWconst [c]) [d]) -> (XORconst x [int64(uint32(c)<<uint64(d))])
+(XORshiftRL x (MOVWconst [c]) [d]) -> (XORconst x [int64(uint32(c)>>uint64(d))])
+(XORshiftRA x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(c)>>uint64(d))])
+(BICshiftLL x (MOVWconst [c]) [d]) -> (BICconst x [int64(uint32(c)<<uint64(d))])
+(BICshiftRL x (MOVWconst [c]) [d]) -> (BICconst x [int64(uint32(c)>>uint64(d))])
+(BICshiftRA x (MOVWconst [c]) [d]) -> (BICconst x [int64(int32(c)>>uint64(d))])
+(MVNshiftLL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)<<uint64(d))])
+(MVNshiftRL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)>>uint64(d))])
+(MVNshiftRA (MOVWconst [c]) [d]) -> (MOVWconst [^int64(int32(c)>>uint64(d))])
+(CMPshiftLL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)<<uint64(d))])
+(CMPshiftRL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)>>uint64(d))])
+(CMPshiftRA x (MOVWconst [c]) [d]) -> (CMPconst x [int64(int32(c)>>uint64(d))])
+
+(ADDshiftLLreg x y (MOVWconst [c])) -> (ADDshiftLL x y [c])
+(ADDshiftRLreg x y (MOVWconst [c])) -> (ADDshiftRL x y [c])
+(ADDshiftRAreg x y (MOVWconst [c])) -> (ADDshiftRA x y [c])
+(ADCshiftLLreg x y (MOVWconst [c]) flags) -> (ADCshiftLL x y [c] flags)
+(ADCshiftRLreg x y (MOVWconst [c]) flags) -> (ADCshiftRL x y [c] flags)
+(ADCshiftRAreg x y (MOVWconst [c]) flags) -> (ADCshiftRA x y [c] flags)
+(ADDSshiftLLreg x y (MOVWconst [c])) -> (ADDSshiftLL x y [c])
+(ADDSshiftRLreg x y (MOVWconst [c])) -> (ADDSshiftRL x y [c])
+(ADDSshiftRAreg x y (MOVWconst [c])) -> (ADDSshiftRA x y [c])
+(SUBshiftLLreg x y (MOVWconst [c])) -> (SUBshiftLL x y [c])
+(SUBshiftRLreg x y (MOVWconst [c])) -> (SUBshiftRL x y [c])
+(SUBshiftRAreg x y (MOVWconst [c])) -> (SUBshiftRA x y [c])
+(SBCshiftLLreg x y (MOVWconst [c]) flags) -> (SBCshiftLL x y [c] flags)
+(SBCshiftRLreg x y (MOVWconst [c]) flags) -> (SBCshiftRL x y [c] flags)
+(SBCshiftRAreg x y (MOVWconst [c]) flags) -> (SBCshiftRA x y [c] flags)
+(SUBSshiftLLreg x y (MOVWconst [c])) -> (SUBSshiftLL x y [c])
+(SUBSshiftRLreg x y (MOVWconst [c])) -> (SUBSshiftRL x y [c])
+(SUBSshiftRAreg x y (MOVWconst [c])) -> (SUBSshiftRA x y [c])
+(RSBshiftLLreg x y (MOVWconst [c])) -> (RSBshiftLL x y [c])
+(RSBshiftRLreg x y (MOVWconst [c])) -> (RSBshiftRL x y [c])
+(RSBshiftRAreg x y (MOVWconst [c])) -> (RSBshiftRA x y [c])
+(RSCshiftLLreg x y (MOVWconst [c]) flags) -> (RSCshiftLL x y [c] flags)
+(RSCshiftRLreg x y (MOVWconst [c]) flags) -> (RSCshiftRL x y [c] flags)
+(RSCshiftRAreg x y (MOVWconst [c]) flags) -> (RSCshiftRA x y [c] flags)
+(RSBSshiftLLreg x y (MOVWconst [c])) -> (RSBSshiftLL x y [c])
+(RSBSshiftRLreg x y (MOVWconst [c])) -> (RSBSshiftRL x y [c])
+(RSBSshiftRAreg x y (MOVWconst [c])) -> (RSBSshiftRA x y [c])
+(ANDshiftLLreg x y (MOVWconst [c])) -> (ANDshiftLL x y [c])
+(ANDshiftRLreg x y (MOVWconst [c])) -> (ANDshiftRL x y [c])
+(ANDshiftRAreg x y (MOVWconst [c])) -> (ANDshiftRA x y [c])
+(ORshiftLLreg x y (MOVWconst [c])) -> (ORshiftLL x y [c])
+(ORshiftRLreg x y (MOVWconst [c])) -> (ORshiftRL x y [c])
+(ORshiftRAreg x y (MOVWconst [c])) -> (ORshiftRA x y [c])
+(XORshiftLLreg x y (MOVWconst [c])) -> (XORshiftLL x y [c])
+(XORshiftRLreg x y (MOVWconst [c])) -> (XORshiftRL x y [c])
+(XORshiftRAreg x y (MOVWconst [c])) -> (XORshiftRA x y [c])
+(BICshiftLLreg x y (MOVWconst [c])) -> (BICshiftLL x y [c])
+(BICshiftRLreg x y (MOVWconst [c])) -> (BICshiftRL x y [c])
+(BICshiftRAreg x y (MOVWconst [c])) -> (BICshiftRA x y [c])
+(MVNshiftLLreg x (MOVWconst [c])) -> (MVNshiftLL x [c])
+(MVNshiftRLreg x (MOVWconst [c])) -> (MVNshiftRL x [c])
+(MVNshiftRAreg x (MOVWconst [c])) -> (MVNshiftRA x [c])
+(CMPshiftLLreg x y (MOVWconst [c])) -> (CMPshiftLL x y [c])
+(CMPshiftRLreg x y (MOVWconst [c])) -> (CMPshiftRL x y [c])
+(CMPshiftRAreg x y (MOVWconst [c])) -> (CMPshiftRA x y [c])
+
+// use indexed loads and stores
+(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVWloadidx ptr idx mem)
+(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVWstoreidx ptr idx val mem)
+(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRA ptr idx [c] val mem)
+
+// constant folding in indexed loads and stores
+(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
+(MOVWloadidx (MOVWconst [c]) ptr mem) -> (MOVWload [c] ptr mem)
+
+(MOVWstoreidx ptr (MOVWconst [c]) val mem) -> (MOVWstore [c] ptr val mem)
+(MOVWstoreidx (MOVWconst [c]) ptr val mem) -> (MOVWstore [c] ptr val mem)
+
+(MOVWloadidx ptr (SLLconst idx [c]) mem) -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx (SLLconst idx [c]) ptr mem) -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx ptr (SRLconst idx [c]) mem) -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx (SRLconst idx [c]) ptr mem) -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx ptr (SRAconst idx [c]) mem) -> (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWloadidx (SRAconst idx [c]) ptr mem) -> (MOVWloadshiftRA ptr idx [c] mem)
+
+(MOVWstoreidx ptr (SLLconst idx [c]) val mem) -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx (SLLconst idx [c]) ptr val mem) -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRLconst idx [c]) val mem) -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx (SRLconst idx [c]) ptr val mem) -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRAconst idx [c]) val mem) -> (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVWstoreidx (SRAconst idx [c]) ptr val mem) -> (MOVWstoreshiftRA ptr idx [c] val mem)
+
+(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
+(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
+
+(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
+(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
// generic simplifications
(ADD x (RSBconst [0] y)) -> (SUB x y)
+(ADD (RSBconst [0] y) x) -> (SUB x y)
(SUB x x) -> (MOVWconst [0])
+(RSB x x) -> (MOVWconst [0])
(AND x x) -> x
(OR x x) -> x
(XOR x x) -> (MOVWconst [0])
@@ -754,3 +1180,26 @@
(ADD a (MUL x y)) -> (MULA x y a)
(AND x (MVN y)) -> (BIC x y)
+
+// simplification with *shift ops
+(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(SUBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(SUBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
+(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
+(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
+(ORshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
+(ORshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
+(ORshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
+(XORshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(XORshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(XORshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(AND x (MVNshiftLL y [c])) -> (BICshiftLL x y [c])
+(AND x (MVNshiftRL y [c])) -> (BICshiftRL x y [c])
+(AND x (MVNshiftRA y [c])) -> (BICshiftRA x y [c])
diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go
index a58bdf8b58..89576daf0e 100644
--- a/src/cmd/compile/internal/ssa/gen/ARMOps.go
+++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go
@@ -110,8 +110,13 @@ func init() {
gp2flags = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{flags}}
gp2flags1 = regInfo{inputs: []regMask{gp, gp, flags}, outputs: []regMask{gp}}
gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp31cf = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}, clobbers: flags} // cf: clobbers flags
+ gp3flags = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{flags}}
+ gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp, flags}, outputs: []regMask{gp}}
gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{}}
fp01 = regInfo{inputs: []regMask{}, outputs: []regMask{fp}}
fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
@@ -176,14 +181,105 @@ func init() {
{name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
// shifts
- {name: "SLL", argLength: 2, reg: gp21cf, asm: "SLL"}, // arg0 << arg1, results 0 for large shift
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 256
{name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt
- {name: "SRL", argLength: 2, reg: gp21cf, asm: "SRL"}, // arg0 >> arg1, unsigned, results 0 for large shift
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 256
{name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned
- {name: "SRA", argLength: 2, reg: gp21cf, asm: "SRA"}, // arg0 >> arg1, signed, results 0/-1 for large shift
+ {name: "SRA", argLength: 2, reg: gp21cf, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 256
{name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed
{name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"}, // arg0 right rotate by auxInt bits
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift
+ {name: "RSBshiftLL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0
+ {name: "RSBshiftRL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift
+ {name: "RSBshiftRA", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1<<auxInt)
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), unsigned shift
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), signed shift
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1<<auxInt
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, unsigned shift
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, signed shift
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1<<auxInt
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, unsigned shift
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, signed shift
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1<<auxInt)
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), unsigned shift
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), signed shift
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0<<auxInt)
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), unsigned shift
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), signed shift
+
+ {name: "ADCshiftLL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1<<auxInt + carry, arg2=flags
+ {name: "ADCshiftRL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, unsigned shift, arg2=flags
+ {name: "ADCshiftRA", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, signed shift, arg2=flags
+ {name: "SBCshiftLL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1<<auxInt - carry, arg2=flags
+ {name: "SBCshiftRL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, unsigned shift, arg2=flags
+ {name: "SBCshiftRA", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, signed shift, arg2=flags
+ {name: "RSCshiftLL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1<<auxInt - arg0 - carry, arg2=flags
+ {name: "RSCshiftRL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, unsigned shift, arg2=flags
+ {name: "RSCshiftRA", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, signed shift, arg2=flags
+
+ {name: "ADDSshiftLL", argLength: 2, reg: gp21cf, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt, set carry flag
+ {name: "ADDSshiftRL", argLength: 2, reg: gp21cf, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift, set carry flag
+ {name: "ADDSshiftRA", argLength: 2, reg: gp21cf, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift, set carry flag
+ {name: "SUBSshiftLL", argLength: 2, reg: gp21cf, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt, set carry flag
+ {name: "SUBSshiftRL", argLength: 2, reg: gp21cf, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift, set carry flag
+ {name: "SUBSshiftRA", argLength: 2, reg: gp21cf, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift, set carry flag
+ {name: "RSBSshiftLL", argLength: 2, reg: gp21cf, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0, set carry flag
+ {name: "RSBSshiftRL", argLength: 2, reg: gp21cf, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRA", argLength: 2, reg: gp21cf, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift, set carry flag
+
+ {name: "ADDshiftLLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1<<arg2
+ {name: "ADDshiftRLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift
+ {name: "ADDshiftRAreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift
+ {name: "SUBshiftLLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1<<arg2
+ {name: "SUBshiftRLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift
+ {name: "SUBshiftRAreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift
+ {name: "RSBshiftLLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1<<arg2 - arg0
+ {name: "RSBshiftRLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift
+ {name: "RSBshiftRAreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift
+ {name: "ANDshiftLLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1<<arg2)
+ {name: "ANDshiftRLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), unsigned shift
+ {name: "ANDshiftRAreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), signed shift
+ {name: "ORshiftLLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1<<arg2
+ {name: "ORshiftRLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, unsigned shift
+ {name: "ORshiftRAreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, signed shift
+ {name: "XORshiftLLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1<<arg2
+ {name: "XORshiftRLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, unsigned shift
+ {name: "XORshiftRAreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, signed shift
+ {name: "BICshiftLLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1<<arg2)
+ {name: "BICshiftRLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), unsigned shift
+ {name: "BICshiftRAreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), signed shift
+ {name: "MVNshiftLLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0<<arg1)
+ {name: "MVNshiftRLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), unsigned shift
+ {name: "MVNshiftRAreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), signed shift
+
+ {name: "ADCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1<<arg2 + carry, arg3=flags
+ {name: "ADCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, unsigned shift, arg3=flags
+ {name: "ADCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, signed shift, arg3=flags
+ {name: "SBCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1<<arg2 - carry, arg3=flags
+ {name: "SBCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, unsigned shift, arg3=flags
+ {name: "SBCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, signed shift, arg3=flags
+ {name: "RSCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1<<arg2 - arg0 - carry, arg3=flags
+ {name: "RSCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, unsigned shift, arg3=flags
+ {name: "RSCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, signed shift, arg3=flags
+
+ {name: "ADDSshiftLLreg", argLength: 3, reg: gp31cf, asm: "ADD"}, // arg0 + arg1<<arg2, set carry flag
+ {name: "ADDSshiftRLreg", argLength: 3, reg: gp31cf, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift, set carry flag
+ {name: "ADDSshiftRAreg", argLength: 3, reg: gp31cf, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift, set carry flag
+ {name: "SUBSshiftLLreg", argLength: 3, reg: gp31cf, asm: "SUB"}, // arg0 - arg1<<arg2, set carry flag
+ {name: "SUBSshiftRLreg", argLength: 3, reg: gp31cf, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift, set carry flag
+ {name: "SUBSshiftRAreg", argLength: 3, reg: gp31cf, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift, set carry flag
+ {name: "RSBSshiftLLreg", argLength: 3, reg: gp31cf, asm: "RSB"}, // arg1<<arg2 - arg0, set carry flag
+ {name: "RSBSshiftRLreg", argLength: 3, reg: gp31cf, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRAreg", argLength: 3, reg: gp31cf, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift, set carry flag
+
+ // comparisons
{name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt
{name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags"}, // arg0 compare to -arg1
@@ -195,6 +291,15 @@ func init() {
{name: "CMPF", argLength: 2, reg: fp2flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to arg1, float32
{name: "CMPD", argLength: 2, reg: fp2flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<<auxInt
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+
+ {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<<arg2
+ {name: "CMPshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, unsigned shift
+ {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift
+
+ // moves
{name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // 32 low bits of auxint
{name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
{name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
@@ -215,6 +320,16 @@ func init() {
{name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
{name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1<<auxInt. arg2=mem
+ {name: "MOVWloadshiftRL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1>>auxInt, unsigned shift. arg2=mem
+ {name: "MOVWloadshiftRA", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1>>auxInt, signed shift. arg2=mem
+
+ {name: "MOVWstoreidx", argLength: 4, reg: gp2store, asm: "MOVW"}, // store arg2 to arg0 + arg1. arg3=mem
+ {name: "MOVWstoreshiftLL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1<<auxInt. arg3=mem
+ {name: "MOVWstoreshiftRL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1>>auxInt, unsigned shift. arg3=mem
+ {name: "MOVWstoreshiftRA", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1>>auxInt, signed shift. arg3=mem
+
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVBS"}, // move from arg0, sign-extended from byte
{name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
{name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVHS"}, // move from arg0, sign-extended from half
@@ -232,6 +347,12 @@ func init() {
{name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
{name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+ // conditional instructions, for lowering shifts
+ {name: "CMOVWHSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates HS, arg1=flags
+ {name: "CMOVWLSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates LS, arg1=flags
+ {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags
+
+ // function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "Int64"}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64"}, // call deferproc. arg0=mem, auxint=argsize, returns mem
@@ -256,8 +377,6 @@ func init() {
{name: "LoweredSelect0", argLength: 1, reg: regInfo{inputs: []regMask{}, outputs: []regMask{buildReg("R0")}}}, // the first component of a tuple, implicitly in R0, arg0=tuple
{name: "LoweredSelect1", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // the second component of a tuple, arg0=tuple
- {name: "LoweredZeromask", argLength: 1, reg: gp11}, // 0 if arg0 == 1, 0xffffffff if arg0 != 0
-
// duffzero (must be 4-byte aligned)
// arg0 = address of memory to zero (in R1, changed as side effect)
// arg1 = value to store (always zero)
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index 07c439de4b..a7c2b6e3de 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -157,7 +157,13 @@ func (op Op) isTupleGenerator() bool {
switch op {
case OpAdd32carry, OpSub32carry, OpMul32uhilo,
OpARMADDS, OpARMSUBS, OpARMMULLU,
- OpARMADDSconst, OpARMSUBSconst, OpARMRSBSconst:
+ OpARMADDSconst, OpARMSUBSconst, OpARMRSBSconst,
+ OpARMADDSshiftLL, OpARMSUBSshiftLL, OpARMRSBSshiftLL,
+ OpARMADDSshiftRL, OpARMSUBSshiftRL, OpARMRSBSshiftRL,
+ OpARMADDSshiftRA, OpARMSUBSshiftRA, OpARMRSBSshiftRA,
+ OpARMADDSshiftLLreg, OpARMSUBSshiftLLreg, OpARMRSBSshiftLLreg,
+ OpARMADDSshiftRLreg, OpARMSUBSshiftRLreg, OpARMRSBSshiftRLreg,
+ OpARMADDSshiftRAreg, OpARMSUBSshiftRAreg, OpARMRSBSshiftRAreg:
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 7ea67a99b9..159e1b26b4 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -596,6 +596,90 @@ const (
OpARMSRA
OpARMSRAconst
OpARMSRRconst
+ OpARMADDshiftLL
+ OpARMADDshiftRL
+ OpARMADDshiftRA
+ OpARMSUBshiftLL
+ OpARMSUBshiftRL
+ OpARMSUBshiftRA
+ OpARMRSBshiftLL
+ OpARMRSBshiftRL
+ OpARMRSBshiftRA
+ OpARMANDshiftLL
+ OpARMANDshiftRL
+ OpARMANDshiftRA
+ OpARMORshiftLL
+ OpARMORshiftRL
+ OpARMORshiftRA
+ OpARMXORshiftLL
+ OpARMXORshiftRL
+ OpARMXORshiftRA
+ OpARMBICshiftLL
+ OpARMBICshiftRL
+ OpARMBICshiftRA
+ OpARMMVNshiftLL
+ OpARMMVNshiftRL
+ OpARMMVNshiftRA
+ OpARMADCshiftLL
+ OpARMADCshiftRL
+ OpARMADCshiftRA
+ OpARMSBCshiftLL
+ OpARMSBCshiftRL
+ OpARMSBCshiftRA
+ OpARMRSCshiftLL
+ OpARMRSCshiftRL
+ OpARMRSCshiftRA
+ OpARMADDSshiftLL
+ OpARMADDSshiftRL
+ OpARMADDSshiftRA
+ OpARMSUBSshiftLL
+ OpARMSUBSshiftRL
+ OpARMSUBSshiftRA
+ OpARMRSBSshiftLL
+ OpARMRSBSshiftRL
+ OpARMRSBSshiftRA
+ OpARMADDshiftLLreg
+ OpARMADDshiftRLreg
+ OpARMADDshiftRAreg
+ OpARMSUBshiftLLreg
+ OpARMSUBshiftRLreg
+ OpARMSUBshiftRAreg
+ OpARMRSBshiftLLreg
+ OpARMRSBshiftRLreg
+ OpARMRSBshiftRAreg
+ OpARMANDshiftLLreg
+ OpARMANDshiftRLreg
+ OpARMANDshiftRAreg
+ OpARMORshiftLLreg
+ OpARMORshiftRLreg
+ OpARMORshiftRAreg
+ OpARMXORshiftLLreg
+ OpARMXORshiftRLreg
+ OpARMXORshiftRAreg
+ OpARMBICshiftLLreg
+ OpARMBICshiftRLreg
+ OpARMBICshiftRAreg
+ OpARMMVNshiftLLreg
+ OpARMMVNshiftRLreg
+ OpARMMVNshiftRAreg
+ OpARMADCshiftLLreg
+ OpARMADCshiftRLreg
+ OpARMADCshiftRAreg
+ OpARMSBCshiftLLreg
+ OpARMSBCshiftRLreg
+ OpARMSBCshiftRAreg
+ OpARMRSCshiftLLreg
+ OpARMRSCshiftRLreg
+ OpARMRSCshiftRAreg
+ OpARMADDSshiftLLreg
+ OpARMADDSshiftRLreg
+ OpARMADDSshiftRAreg
+ OpARMSUBSshiftLLreg
+ OpARMSUBSshiftRLreg
+ OpARMSUBSshiftRAreg
+ OpARMRSBSshiftLLreg
+ OpARMRSBSshiftRLreg
+ OpARMRSBSshiftRAreg
OpARMCMP
OpARMCMPconst
OpARMCMN
@@ -606,6 +690,12 @@ const (
OpARMTEQconst
OpARMCMPF
OpARMCMPD
+ OpARMCMPshiftLL
+ OpARMCMPshiftRL
+ OpARMCMPshiftRA
+ OpARMCMPshiftLLreg
+ OpARMCMPshiftRLreg
+ OpARMCMPshiftRAreg
OpARMMOVWconst
OpARMMOVFconst
OpARMMOVDconst
@@ -622,6 +712,14 @@ const (
OpARMMOVWstore
OpARMMOVFstore
OpARMMOVDstore
+ OpARMMOVWloadidx
+ OpARMMOVWloadshiftLL
+ OpARMMOVWloadshiftRL
+ OpARMMOVWloadshiftRA
+ OpARMMOVWstoreidx
+ OpARMMOVWstoreshiftLL
+ OpARMMOVWstoreshiftRL
+ OpARMMOVWstoreshiftRA
OpARMMOVBreg
OpARMMOVBUreg
OpARMMOVHreg
@@ -637,6 +735,9 @@ const (
OpARMMOVDWU
OpARMMOVFD
OpARMMOVDF
+ OpARMCMOVWHSconst
+ OpARMCMOVWLSconst
+ OpARMSRAcond
OpARMCALLstatic
OpARMCALLclosure
OpARMCALLdefer
@@ -656,7 +757,6 @@ const (
OpARMCarry
OpARMLoweredSelect0
OpARMLoweredSelect1
- OpARMLoweredZeromask
OpARMDUFFZERO
OpARMDUFFCOPY
OpARMLoweredZero
@@ -7155,7 +7255,6 @@ var opcodeTable = [...]opInfo{
{0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
{1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
},
- clobbers: 4294967296, // FLAGS
outputs: []regMask{
5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
@@ -7184,7 +7283,6 @@ var opcodeTable = [...]opInfo{
{0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
{1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
},
- clobbers: 4294967296, // FLAGS
outputs: []regMask{
5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
@@ -7247,6 +7345,1296 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ADDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftLL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftLLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftRLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftRAreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftLLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftRLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftRAreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftLLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftRLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftRAreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftLLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftRLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftRAreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftLLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftRLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftRAreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftLLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftRLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftRAreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftLLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftRLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftRAreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftLLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftRLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftRAreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {3, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294967296, // FLAGS
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
name: "CMP",
argLen: 2,
asm: arm.ACMP,
@@ -7389,6 +8777,96 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CMPshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 4294967296, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 4294967296, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []regMask{
+ 4294967296, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPshiftLLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 4294967296, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPshiftRLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 4294967296, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPshiftRAreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 4294967296, // FLAGS
+ },
+ },
+ },
+ {
name: "MOVWconst",
auxType: auxInt32,
argLen: 0,
@@ -7598,6 +9076,116 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {2, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftLL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {2, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {2, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRA",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {2, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 8589948927}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
name: "MOVBreg",
argLen: 1,
asm: arm.AMOVBS,
@@ -7793,6 +9381,53 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CMOVWHSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "CMOVWLSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SRAcond",
+ argLen: 3,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 4294967296}, // FLAGS
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []regMask{
+ 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
name: "CALLstatic",
auxType: auxSymOff,
argLen: 1,
@@ -8000,18 +9635,6 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredZeromask",
- argLen: 1,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
- },
- outputs: []regMask{
- 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
- },
- },
- },
- {
name: "DUFFZERO",
auxType: auxInt64,
argLen: 3,
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index c36976c953..ceac5839ef 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -12,16 +12,64 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMADC(v, config)
case OpARMADCconst:
return rewriteValueARM_OpARMADCconst(v, config)
+ case OpARMADCshiftLL:
+ return rewriteValueARM_OpARMADCshiftLL(v, config)
+ case OpARMADCshiftLLreg:
+ return rewriteValueARM_OpARMADCshiftLLreg(v, config)
+ case OpARMADCshiftRA:
+ return rewriteValueARM_OpARMADCshiftRA(v, config)
+ case OpARMADCshiftRAreg:
+ return rewriteValueARM_OpARMADCshiftRAreg(v, config)
+ case OpARMADCshiftRL:
+ return rewriteValueARM_OpARMADCshiftRL(v, config)
+ case OpARMADCshiftRLreg:
+ return rewriteValueARM_OpARMADCshiftRLreg(v, config)
case OpARMADD:
return rewriteValueARM_OpARMADD(v, config)
case OpARMADDS:
return rewriteValueARM_OpARMADDS(v, config)
+ case OpARMADDSshiftLL:
+ return rewriteValueARM_OpARMADDSshiftLL(v, config)
+ case OpARMADDSshiftLLreg:
+ return rewriteValueARM_OpARMADDSshiftLLreg(v, config)
+ case OpARMADDSshiftRA:
+ return rewriteValueARM_OpARMADDSshiftRA(v, config)
+ case OpARMADDSshiftRAreg:
+ return rewriteValueARM_OpARMADDSshiftRAreg(v, config)
+ case OpARMADDSshiftRL:
+ return rewriteValueARM_OpARMADDSshiftRL(v, config)
+ case OpARMADDSshiftRLreg:
+ return rewriteValueARM_OpARMADDSshiftRLreg(v, config)
case OpARMADDconst:
return rewriteValueARM_OpARMADDconst(v, config)
+ case OpARMADDshiftLL:
+ return rewriteValueARM_OpARMADDshiftLL(v, config)
+ case OpARMADDshiftLLreg:
+ return rewriteValueARM_OpARMADDshiftLLreg(v, config)
+ case OpARMADDshiftRA:
+ return rewriteValueARM_OpARMADDshiftRA(v, config)
+ case OpARMADDshiftRAreg:
+ return rewriteValueARM_OpARMADDshiftRAreg(v, config)
+ case OpARMADDshiftRL:
+ return rewriteValueARM_OpARMADDshiftRL(v, config)
+ case OpARMADDshiftRLreg:
+ return rewriteValueARM_OpARMADDshiftRLreg(v, config)
case OpARMAND:
return rewriteValueARM_OpARMAND(v, config)
case OpARMANDconst:
return rewriteValueARM_OpARMANDconst(v, config)
+ case OpARMANDshiftLL:
+ return rewriteValueARM_OpARMANDshiftLL(v, config)
+ case OpARMANDshiftLLreg:
+ return rewriteValueARM_OpARMANDshiftLLreg(v, config)
+ case OpARMANDshiftRA:
+ return rewriteValueARM_OpARMANDshiftRA(v, config)
+ case OpARMANDshiftRAreg:
+ return rewriteValueARM_OpARMANDshiftRAreg(v, config)
+ case OpARMANDshiftRL:
+ return rewriteValueARM_OpARMANDshiftRL(v, config)
+ case OpARMANDshiftRLreg:
+ return rewriteValueARM_OpARMANDshiftRLreg(v, config)
case OpAdd16:
return rewriteValueARM_OpAdd16(v, config)
case OpAdd32:
@@ -52,10 +100,38 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMBIC(v, config)
case OpARMBICconst:
return rewriteValueARM_OpARMBICconst(v, config)
+ case OpARMBICshiftLL:
+ return rewriteValueARM_OpARMBICshiftLL(v, config)
+ case OpARMBICshiftLLreg:
+ return rewriteValueARM_OpARMBICshiftLLreg(v, config)
+ case OpARMBICshiftRA:
+ return rewriteValueARM_OpARMBICshiftRA(v, config)
+ case OpARMBICshiftRAreg:
+ return rewriteValueARM_OpARMBICshiftRAreg(v, config)
+ case OpARMBICshiftRL:
+ return rewriteValueARM_OpARMBICshiftRL(v, config)
+ case OpARMBICshiftRLreg:
+ return rewriteValueARM_OpARMBICshiftRLreg(v, config)
+ case OpARMCMOVWHSconst:
+ return rewriteValueARM_OpARMCMOVWHSconst(v, config)
+ case OpARMCMOVWLSconst:
+ return rewriteValueARM_OpARMCMOVWLSconst(v, config)
case OpARMCMP:
return rewriteValueARM_OpARMCMP(v, config)
case OpARMCMPconst:
return rewriteValueARM_OpARMCMPconst(v, config)
+ case OpARMCMPshiftLL:
+ return rewriteValueARM_OpARMCMPshiftLL(v, config)
+ case OpARMCMPshiftLLreg:
+ return rewriteValueARM_OpARMCMPshiftLLreg(v, config)
+ case OpARMCMPshiftRA:
+ return rewriteValueARM_OpARMCMPshiftRA(v, config)
+ case OpARMCMPshiftRAreg:
+ return rewriteValueARM_OpARMCMPshiftRAreg(v, config)
+ case OpARMCMPshiftRL:
+ return rewriteValueARM_OpARMCMPshiftRL(v, config)
+ case OpARMCMPshiftRLreg:
+ return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
case OpClosureCall:
return rewriteValueARM_OpClosureCall(v, config)
case OpCom16:
@@ -244,8 +320,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMLessThanU(v, config)
case OpLoad:
return rewriteValueARM_OpLoad(v, config)
- case OpARMLoweredZeromask:
- return rewriteValueARM_OpARMLoweredZeromask(v, config)
case OpLrot16:
return rewriteValueARM_OpLrot16(v, config)
case OpLrot32:
@@ -306,14 +380,44 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMMOVHstore(v, config)
case OpARMMOVWload:
return rewriteValueARM_OpARMMOVWload(v, config)
+ case OpARMMOVWloadidx:
+ return rewriteValueARM_OpARMMOVWloadidx(v, config)
+ case OpARMMOVWloadshiftLL:
+ return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
+ case OpARMMOVWloadshiftRA:
+ return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
+ case OpARMMOVWloadshiftRL:
+ return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
+ case OpARMMOVWreg:
+ return rewriteValueARM_OpARMMOVWreg(v, config)
case OpARMMOVWstore:
return rewriteValueARM_OpARMMOVWstore(v, config)
+ case OpARMMOVWstoreidx:
+ return rewriteValueARM_OpARMMOVWstoreidx(v, config)
+ case OpARMMOVWstoreshiftLL:
+ return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
+ case OpARMMOVWstoreshiftRA:
+ return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
+ case OpARMMOVWstoreshiftRL:
+ return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
case OpARMMUL:
return rewriteValueARM_OpARMMUL(v, config)
case OpARMMULA:
return rewriteValueARM_OpARMMULA(v, config)
case OpARMMVN:
return rewriteValueARM_OpARMMVN(v, config)
+ case OpARMMVNshiftLL:
+ return rewriteValueARM_OpARMMVNshiftLL(v, config)
+ case OpARMMVNshiftLLreg:
+ return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
+ case OpARMMVNshiftRA:
+ return rewriteValueARM_OpARMMVNshiftRA(v, config)
+ case OpARMMVNshiftRAreg:
+ return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
+ case OpARMMVNshiftRL:
+ return rewriteValueARM_OpARMMVNshiftRL(v, config)
+ case OpARMMVNshiftRLreg:
+ return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
case OpMod16:
return rewriteValueARM_OpMod16(v, config)
case OpMod16u:
@@ -374,6 +478,18 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMOR(v, config)
case OpARMORconst:
return rewriteValueARM_OpARMORconst(v, config)
+ case OpARMORshiftLL:
+ return rewriteValueARM_OpARMORshiftLL(v, config)
+ case OpARMORshiftLLreg:
+ return rewriteValueARM_OpARMORshiftLLreg(v, config)
+ case OpARMORshiftRA:
+ return rewriteValueARM_OpARMORshiftRA(v, config)
+ case OpARMORshiftRAreg:
+ return rewriteValueARM_OpARMORshiftRAreg(v, config)
+ case OpARMORshiftRL:
+ return rewriteValueARM_OpARMORshiftRL(v, config)
+ case OpARMORshiftRLreg:
+ return rewriteValueARM_OpARMORshiftRLreg(v, config)
case OpOffPtr:
return rewriteValueARM_OpOffPtr(v, config)
case OpOr16:
@@ -386,10 +502,46 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpOrB(v, config)
case OpARMRSB:
return rewriteValueARM_OpARMRSB(v, config)
+ case OpARMRSBSshiftLL:
+ return rewriteValueARM_OpARMRSBSshiftLL(v, config)
+ case OpARMRSBSshiftLLreg:
+ return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
+ case OpARMRSBSshiftRA:
+ return rewriteValueARM_OpARMRSBSshiftRA(v, config)
+ case OpARMRSBSshiftRAreg:
+ return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
+ case OpARMRSBSshiftRL:
+ return rewriteValueARM_OpARMRSBSshiftRL(v, config)
+ case OpARMRSBSshiftRLreg:
+ return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
case OpARMRSBconst:
return rewriteValueARM_OpARMRSBconst(v, config)
+ case OpARMRSBshiftLL:
+ return rewriteValueARM_OpARMRSBshiftLL(v, config)
+ case OpARMRSBshiftLLreg:
+ return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
+ case OpARMRSBshiftRA:
+ return rewriteValueARM_OpARMRSBshiftRA(v, config)
+ case OpARMRSBshiftRAreg:
+ return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
+ case OpARMRSBshiftRL:
+ return rewriteValueARM_OpARMRSBshiftRL(v, config)
+ case OpARMRSBshiftRLreg:
+ return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
case OpARMRSCconst:
return rewriteValueARM_OpARMRSCconst(v, config)
+ case OpARMRSCshiftLL:
+ return rewriteValueARM_OpARMRSCshiftLL(v, config)
+ case OpARMRSCshiftLLreg:
+ return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
+ case OpARMRSCshiftRA:
+ return rewriteValueARM_OpARMRSCshiftRA(v, config)
+ case OpARMRSCshiftRAreg:
+ return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
+ case OpARMRSCshiftRL:
+ return rewriteValueARM_OpARMRSCshiftRL(v, config)
+ case OpARMRSCshiftRLreg:
+ return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
case OpRsh16Ux16:
return rewriteValueARM_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
@@ -442,12 +594,26 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMSBC(v, config)
case OpARMSBCconst:
return rewriteValueARM_OpARMSBCconst(v, config)
+ case OpARMSBCshiftLL:
+ return rewriteValueARM_OpARMSBCshiftLL(v, config)
+ case OpARMSBCshiftLLreg:
+ return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
+ case OpARMSBCshiftRA:
+ return rewriteValueARM_OpARMSBCshiftRA(v, config)
+ case OpARMSBCshiftRAreg:
+ return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
+ case OpARMSBCshiftRL:
+ return rewriteValueARM_OpARMSBCshiftRL(v, config)
+ case OpARMSBCshiftRLreg:
+ return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
case OpARMSLL:
return rewriteValueARM_OpARMSLL(v, config)
case OpARMSLLconst:
return rewriteValueARM_OpARMSLLconst(v, config)
case OpARMSRA:
return rewriteValueARM_OpARMSRA(v, config)
+ case OpARMSRAcond:
+ return rewriteValueARM_OpARMSRAcond(v, config)
case OpARMSRAconst:
return rewriteValueARM_OpARMSRAconst(v, config)
case OpARMSRL:
@@ -458,8 +624,32 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMSUB(v, config)
case OpARMSUBS:
return rewriteValueARM_OpARMSUBS(v, config)
+ case OpARMSUBSshiftLL:
+ return rewriteValueARM_OpARMSUBSshiftLL(v, config)
+ case OpARMSUBSshiftLLreg:
+ return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
+ case OpARMSUBSshiftRA:
+ return rewriteValueARM_OpARMSUBSshiftRA(v, config)
+ case OpARMSUBSshiftRAreg:
+ return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
+ case OpARMSUBSshiftRL:
+ return rewriteValueARM_OpARMSUBSshiftRL(v, config)
+ case OpARMSUBSshiftRLreg:
+ return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
case OpARMSUBconst:
return rewriteValueARM_OpARMSUBconst(v, config)
+ case OpARMSUBshiftLL:
+ return rewriteValueARM_OpARMSUBshiftLL(v, config)
+ case OpARMSUBshiftLLreg:
+ return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
+ case OpARMSUBshiftRA:
+ return rewriteValueARM_OpARMSUBshiftRA(v, config)
+ case OpARMSUBshiftRAreg:
+ return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
+ case OpARMSUBshiftRL:
+ return rewriteValueARM_OpARMSUBshiftRL(v, config)
+ case OpARMSUBshiftRLreg:
+ return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
case OpSelect0:
return rewriteValueARM_OpSelect0(v, config)
case OpSelect1:
@@ -504,6 +694,18 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMXOR(v, config)
case OpARMXORconst:
return rewriteValueARM_OpARMXORconst(v, config)
+ case OpARMXORshiftLL:
+ return rewriteValueARM_OpARMXORshiftLL(v, config)
+ case OpARMXORshiftLLreg:
+ return rewriteValueARM_OpARMXORshiftLLreg(v, config)
+ case OpARMXORshiftRA:
+ return rewriteValueARM_OpARMXORshiftRA(v, config)
+ case OpARMXORshiftRAreg:
+ return rewriteValueARM_OpARMXORshiftRAreg(v, config)
+ case OpARMXORshiftRL:
+ return rewriteValueARM_OpARMXORshiftRL(v, config)
+ case OpARMXORshiftRLreg:
+ return rewriteValueARM_OpARMXORshiftRLreg(v, config)
case OpXor16:
return rewriteValueARM_OpXor16(v, config)
case OpXor32:
@@ -560,6 +762,234 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
v.AddArg(flags)
return true
}
+ // match: (ADC x (SLLconst [c] y) flags)
+ // cond:
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SLLconst [c] y) x flags)
+ // cond:
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SRLconst [c] y) flags)
+ // cond:
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SRLconst [c] y) x flags)
+ // cond:
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SRAconst [c] y) flags)
+ // cond:
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SRAconst [c] y) x flags)
+ // cond:
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SLL y z) flags)
+ // cond:
+ // result: (ADCshiftLLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SLL y z) x flags)
+ // cond:
+ // result: (ADCshiftLLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SRL y z) flags)
+ // cond:
+ // result: (ADCshiftRLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SRL y z) x flags)
+ // cond:
+ // result: (ADCshiftRLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SRA y z) flags)
+ // cond:
+ // result: (ADCshiftRAreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SRA y z) x flags)
+ // cond:
+ // result: (ADCshiftRAreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
return false
}
func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool {
@@ -603,6 +1033,273 @@ func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftLL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftLL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftLLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (ADCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMADCshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftRA (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftRA x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (ADCconst x [int64(int32(c)>>uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftRAreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (ADCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMADCshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftRL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftRL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftRLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (ADCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMADCshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -636,6 +1333,210 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (ADD x (SLLconst [c] y))
+ // cond:
+ // result: (ADDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADD (SLLconst [c] y) x)
+ // cond:
+ // result: (ADDshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADD x (SRLconst [c] y))
+ // cond:
+ // result: (ADDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADD (SRLconst [c] y) x)
+ // cond:
+ // result: (ADDshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADD x (SRAconst [c] y))
+ // cond:
+ // result: (ADDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADD (SRAconst [c] y) x)
+ // cond:
+ // result: (ADDshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADD x (SLL y z))
+ // cond:
+ // result: (ADDshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD (SLL y z) x)
+ // cond:
+ // result: (ADDshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD x (SRL y z))
+ // cond:
+ // result: (ADDshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD (SRL y z) x)
+ // cond:
+ // result: (ADDshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD x (SRA y z))
+ // cond:
+ // result: (ADDshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD (SRA y z) x)
+ // cond:
+ // result: (ADDshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
// match: (ADD x (RSBconst [0] y))
// cond:
// result: (SUB x y)
@@ -654,6 +1555,24 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
v.AddArg(y)
return true
}
+ // match: (ADD (RSBconst [0] y) x)
+ // cond:
+ // result: (SUB x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (ADD (MUL x y) a)
// cond:
// result: (MULA x y a)
@@ -723,6 +1642,453 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (ADDS x (SLLconst [c] y))
+ // cond:
+ // result: (ADDSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADDS (SLLconst [c] y) x)
+ // cond:
+ // result: (ADDSshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADDS x (SRLconst [c] y))
+ // cond:
+ // result: (ADDSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADDS (SRLconst [c] y) x)
+ // cond:
+ // result: (ADDSshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADDS x (SRAconst [c] y))
+ // cond:
+ // result: (ADDSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADDS (SRAconst [c] y) x)
+ // cond:
+ // result: (ADDSshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (ADDS x (SLL y z))
+ // cond:
+ // result: (ADDSshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS (SLL y z) x)
+ // cond:
+ // result: (ADDSshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS x (SRL y z))
+ // cond:
+ // result: (ADDSshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS (SRL y z) x)
+ // cond:
+ // result: (ADDSshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS x (SRA y z))
+ // cond:
+ // result: (ADDSshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS (SRA y z) x)
+ // cond:
+ // result: (ADDSshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDSconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDSconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDSshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDSconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDSconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDSshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDSconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDSconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDSshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
return false
}
func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
@@ -823,6 +2189,249 @@ func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMADDconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMADDconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMADDconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -856,6 +2465,210 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (AND x (SLLconst [c] y))
+ // cond:
+ // result: (ANDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (AND (SLLconst [c] y) x)
+ // cond:
+ // result: (ANDshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (AND x (SRLconst [c] y))
+ // cond:
+ // result: (ANDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (AND (SRLconst [c] y) x)
+ // cond:
+ // result: (ANDshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (AND x (SRAconst [c] y))
+ // cond:
+ // result: (ANDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (AND (SRAconst [c] y) x)
+ // cond:
+ // result: (ANDshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (AND x (SLL y z))
+ // cond:
+ // result: (ANDshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMANDshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND (SLL y z) x)
+ // cond:
+ // result: (ANDshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND x (SRL y z))
+ // cond:
+ // result: (ANDshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMANDshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND (SRL y z) x)
+ // cond:
+ // result: (ANDshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND x (SRA y z))
+ // cond:
+ // result: (ANDshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMANDshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND (SRA y z) x)
+ // cond:
+ // result: (ANDshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
// match: (AND x x)
// cond:
// result: x
@@ -884,6 +2697,57 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
v.AddArg(y)
return true
}
+ // match: (AND x (MVNshiftLL y [c]))
+ // cond:
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMVNshiftLL {
+ break
+ }
+ y := v_1.Args[0]
+ c := v_1.AuxInt
+ v.reset(OpARMBICshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (AND x (MVNshiftRL y [c]))
+ // cond:
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMVNshiftRL {
+ break
+ }
+ y := v_1.Args[0]
+ c := v_1.AuxInt
+ v.reset(OpARMBICshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (AND x (MVNshiftRA y [c]))
+ // cond:
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMVNshiftRA {
+ break
+ }
+ y := v_1.Args[0]
+ c := v_1.AuxInt
+ v.reset(OpARMBICshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
return false
}
func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
@@ -946,6 +2810,315 @@ func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMANDconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ // match: (ANDshiftLL x y:(SLLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSLLconst {
+ break
+ }
+ if x != y.Args[0] {
+ break
+ }
+ c := y.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ANDconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ANDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMANDshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMANDconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ // match: (ANDshiftRA x y:(SRAconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRAconst {
+ break
+ }
+ if x != y.Args[0] {
+ break
+ }
+ c := y.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ANDconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ANDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMANDshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMANDconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ // match: (ANDshiftRL x y:(SRLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRLconst {
+ break
+ }
+ if x != y.Args[0] {
+ break
+ }
+ c := y.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ANDconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ANDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMANDshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1161,6 +3334,108 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (BIC x (SLLconst [c] y))
+ // cond:
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (BIC x (SRLconst [c] y))
+ // cond:
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (BIC x (SRAconst [c] y))
+ // cond:
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (BIC x (SLL y z))
+ // cond:
+ // result: (BICshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMBICshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (BIC x (SRL y z))
+ // cond:
+ // result: (BICshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMBICshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (BIC x (SRA y z))
+ // cond:
+ // result: (BICshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMBICshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
// match: (BIC x x)
// cond:
// result: (MOVWconst [0])
@@ -1219,6 +3494,376 @@ func rewriteValueARM_OpARMBICconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMBICshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMBICconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ // match: (BICshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMBICshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMBICconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ // match: (BICshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMBICshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMBICconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ // match: (BICshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMBICshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMOVWHSconst _ (FlagEQ) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagEQ {
+ break
+ }
+ c := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWHSconst x (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVWHSconst _ (FlagLT_UGT) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagLT_UGT {
+ break
+ }
+ c := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWHSconst x (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVWHSconst _ (FlagGT_UGT) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagGT_UGT {
+ break
+ }
+ c := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWHSconst x (InvertFlags flags) [c])
+ // cond:
+ // result: (CMOVWLSconst x flags [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ c := v.AuxInt
+ v.reset(OpARMCMOVWLSconst)
+ v.AddArg(x)
+ v.AddArg(flags)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMOVWLSconst _ (FlagEQ) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagEQ {
+ break
+ }
+ c := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWLSconst _ (FlagLT_ULT) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagLT_ULT {
+ break
+ }
+ c := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWLSconst x (FlagLT_UGT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVWLSconst _ (FlagGT_ULT) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagGT_ULT {
+ break
+ }
+ c := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWLSconst x (FlagGT_UGT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVWLSconst x (InvertFlags flags) [c])
+ // cond:
+ // result: (CMOVWHSconst x flags [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ c := v.AuxInt
+ v.reset(OpARMCMOVWHSconst)
+ v.AddArg(x)
+ v.AddArg(flags)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1254,6 +3899,222 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
+ // match: (CMP x (SLLconst [c] y))
+ // cond:
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMP (SLLconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftLL x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftLL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRLconst [c] y))
+ // cond:
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMP (SRLconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRL x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftRL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRAconst [c] y))
+ // cond:
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMP (SRAconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRA x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftRA, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLL y z))
+ // cond:
+ // result: (CMPshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMCMPshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMP (SLL y z) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftLLreg x y z))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftLLreg, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AddArg(z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRL y z))
+ // cond:
+ // result: (CMPshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMCMPshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMP (SRL y z) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRLreg x y z))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftRLreg, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AddArg(z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRA y z))
+ // cond:
+ // result: (CMPshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMCMPshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMP (SRA y z) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRAreg x y z))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftRAreg, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AddArg(z)
+ v.AddArg(v0)
+ return true
+ }
return false
}
func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
@@ -1403,6 +4264,261 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v1.AddArg(x)
+ v1.AuxInt = d
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMCMPconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMCMPshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v1.AddArg(x)
+ v1.AuxInt = d
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMCMPconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMCMPshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v1.AddArg(x)
+ v1.AuxInt = d
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMCMPconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMCMPshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -3702,42 +6818,6 @@ func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpARMLoweredZeromask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (LoweredZeromask (MOVWconst [0]))
- // cond:
- // result: (MOVWconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- if v_0.AuxInt != 0 {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- // match: (LoweredZeromask (MOVWconst [c]))
- // cond: c != 0
- // result: (MOVWconst [0xffffffff])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- if !(c != 0) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0xffffffff
- return true
- }
- return false
-}
func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -3802,15 +6882,24 @@ func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
_ = b
// match: (Lsh16x16 x y)
// cond:
- // result: (SLL x (ZeroExt16to32 y))
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ v.AuxInt = 0
return true
}
}
@@ -3819,13 +6908,20 @@ func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
_ = b
// match: (Lsh16x32 x y)
// cond:
- // result: (SLL x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v.AuxInt = 0
return true
}
}
@@ -3890,15 +6986,24 @@ func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
_ = b
// match: (Lsh32x16 x y)
// cond:
- // result: (SLL x (ZeroExt16to32 y))
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ v.AuxInt = 0
return true
}
}
@@ -3907,13 +7012,20 @@ func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
_ = b
// match: (Lsh32x32 x y)
// cond:
- // result: (SLL x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v.AuxInt = 0
return true
}
}
@@ -3978,15 +7090,24 @@ func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
_ = b
// match: (Lsh8x16 x y)
// cond:
- // result: (SLL x (ZeroExt16to32 y))
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ v.AuxInt = 0
return true
}
}
@@ -3995,13 +7116,20 @@ func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
_ = b
// match: (Lsh8x32 x y)
// cond:
- // result: (SLL x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v.AuxInt = 0
return true
}
}
@@ -4163,6 +7291,31 @@ func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [int64(uint8(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint8(c))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
@@ -4270,6 +7423,31 @@ func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (MOVBreg x:(MOVBreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [int64(int8(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int8(c))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
@@ -4773,6 +7951,43 @@ func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVHUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [int64(uint16(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint16(c))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
@@ -4904,6 +8119,55 @@ func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (MOVHreg x:(MOVBreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVHreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [int64(int16(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int16(c))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
@@ -5071,6 +8335,446 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ c := v_0.AuxInt
+ mem := v.Args[1]
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ c := v_0.AuxInt
+ mem := v.Args[1]
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ c := v_0.AuxInt
+ mem := v.Args[1]
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWstoreidx {
+ break
+ }
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] {
+ break
+ }
+ x := v_2.Args[2]
+ if !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWloadidx ptr (MOVWconst [c]) mem)
+ // cond:
+ // result: (MOVWload [c] ptr mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVWconst [c]) ptr mem)
+ // cond:
+ // result: (MOVWload [c] ptr mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
+ // cond:
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ idx := v_1.Args[0]
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
+ // cond:
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ idx := v_0.Args[0]
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
+ // cond:
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ idx := v_1.Args[0]
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
+ // cond:
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ idx := v_0.Args[0]
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
+ // cond:
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ idx := v_1.Args[0]
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
+ // cond:
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ idx := v_0.Args[0]
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ c := v.AuxInt
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWstoreshiftLL {
+ break
+ }
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] {
+ break
+ }
+ d := v_2.AuxInt
+ x := v_2.Args[2]
+ if !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
+ // cond:
+ // result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ c := v.AuxInt
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWstoreshiftRA {
+ break
+ }
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] {
+ break
+ }
+ d := v_2.AuxInt
+ x := v_2.Args[2]
+ if !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
+ // cond:
+ // result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ c := v.AuxInt
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWstoreshiftRL {
+ break
+ }
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] {
+ break
+ }
+ d := v_2.AuxInt
+ x := v_2.Args[2]
+ if !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
+ // cond:
+ // result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
@@ -5124,13 +8828,367 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ c := v_0.AuxInt
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ c := v_0.AuxInt
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ c := v_0.AuxInt
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
-func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
+ // cond:
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
+ // cond:
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
+ // cond:
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ idx := v_1.Args[0]
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
+ // cond:
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ idx := v_0.Args[0]
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
+ // cond:
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ idx := v_1.Args[0]
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
+ // cond:
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ idx := v_0.Args[0]
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
+ // cond:
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ idx := v_1.Args[0]
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
+ // cond:
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ idx := v_0.Args[0]
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AuxInt = c
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
+ // cond:
+ // result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
+ // cond:
+ // result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MUL x (MOVWconst [-1]))
+ // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
// cond:
+ // result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MUL x (MOVWconst [c]))
+ // cond: int32(c) == -1
// result: (RSBconst [0] x)
for {
x := v.Args[0]
@@ -5138,7 +9196,8 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
if v_1.Op != OpARMMOVWconst {
break
}
- if v_1.AuxInt != -1 {
+ c := v_1.AuxInt
+ if !(int32(c) == -1) {
break
}
v.reset(OpARMRSBconst)
@@ -5196,18 +9255,145 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
- // match: (MUL (MOVWconst [-1]) x)
- // cond:
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADDshiftLL x x [log2(c-1)])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AddArg(x)
+ v.AddArg(x)
+ v.AuxInt = log2(c - 1)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (RSBshiftLL x x [log2(c+1)])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AddArg(x)
+ v.AddArg(x)
+ v.AuxInt = log2(c + 1)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 2
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 7)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 3
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 3
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: int32(c) == -1
// result: (RSBconst [0] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
- if v_0.AuxInt != -1 {
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(int32(c) == -1) {
break
}
- x := v.Args[1]
v.reset(OpARMRSBconst)
v.AuxInt = 0
v.AddArg(x)
@@ -5263,6 +9449,132 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADDshiftLL x x [log2(c-1)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AddArg(x)
+ v.AddArg(x)
+ v.AuxInt = log2(c - 1)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (RSBshiftLL x x [log2(c+1)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AddArg(x)
+ v.AddArg(x)
+ v.AuxInt = log2(c + 1)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 2
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 7)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 3
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 3
+ v.AddArg(v0)
+ return true
+ }
// match: (MUL (MOVWconst [c]) (MOVWconst [d]))
// cond:
// result: (MOVWconst [int64(int32(c*d))])
@@ -5286,8 +9598,8 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MULA x (MOVWconst [-1]) a)
- // cond:
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: int32(c) == -1
// result: (SUB a x)
for {
x := v.Args[0]
@@ -5295,10 +9607,11 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
if v_1.Op != OpARMMOVWconst {
break
}
- if v_1.AuxInt != -1 {
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(int32(c) == -1) {
break
}
- a := v.Args[2]
v.reset(OpARMSUB)
v.AddArg(a)
v.AddArg(x)
@@ -5361,19 +9674,170 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v.AddArg(a)
return true
}
- // match: (MULA (MOVWconst [-1]) x a)
- // cond:
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = log2(c - 1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = log2(c + 1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 3)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v1.AuxInt = 1
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 5)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v1.AuxInt = 2
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 7)
+ v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v1.AuxInt = 3
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 9)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v1.AuxInt = 3
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: int32(c) == -1
// result: (SUB a x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
- if v_0.AuxInt != -1 {
- break
- }
+ c := v_0.AuxInt
x := v.Args[1]
a := v.Args[2]
+ if !(int32(c) == -1) {
+ break
+ }
v.reset(OpARMSUB)
v.AddArg(a)
v.AddArg(x)
@@ -5436,6 +9900,156 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v.AddArg(a)
return true
}
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = log2(c - 1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = log2(c + 1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 3)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v1.AuxInt = 1
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 5)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v1.AuxInt = 2
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 7)
+ v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v1.AuxInt = 3
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 9)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v1.AuxInt = 3
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
// match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
// cond:
// result: (ADDconst [int64(int32(c*d))] a)
@@ -5474,6 +10088,213 @@ func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
v.AuxInt = ^c
return true
}
+ // match: (MVN (SLLconst [c] x))
+ // cond:
+ // result: (MVNshiftLL x [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftLL)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ // match: (MVN (SRLconst [c] x))
+ // cond:
+ // result: (MVNshiftRL x [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRL)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ // match: (MVN (SRAconst [c] x))
+ // cond:
+ // result: (MVNshiftRA x [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRA)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ // match: (MVN (SLL x y))
+ // cond:
+ // result: (MVNshiftLLreg x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARMMVNshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN (SRL x y))
+ // cond:
+ // result: (MVNshiftRLreg x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARMMVNshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN (SRA x y))
+ // cond:
+ // result: (MVNshiftRAreg x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARMMVNshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftLL (MOVWconst [c]) [d])
+ // cond:
+ // result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = ^int64(uint32(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftLLreg x (MOVWconst [c]))
+ // cond:
+ // result: (MVNshiftLL x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMMVNshiftLL)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftRA (MOVWconst [c]) [d])
+ // cond:
+ // result: (MOVWconst [^int64(int32(c)>>uint64(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = ^int64(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftRAreg x (MOVWconst [c]))
+ // cond:
+ // result: (MVNshiftRA x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMMVNshiftRA)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftRL (MOVWconst [c]) [d])
+ // cond:
+ // result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = ^int64(uint32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftRLreg x (MOVWconst [c]))
+ // cond:
+ // result: (MVNshiftRL x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMMVNshiftRL)
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
return false
}
func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
@@ -6293,6 +11114,210 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (OR x (SLLconst [c] y))
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (OR (SLLconst [c] y) x)
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (OR x (SRLconst [c] y))
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (OR (SRLconst [c] y) x)
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (OR x (SRAconst [c] y))
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (OR (SRAconst [c] y) x)
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (OR x (SLL y z))
+ // cond:
+ // result: (ORshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SLL y z) x)
+ // cond:
+ // result: (ORshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR x (SRL y z))
+ // cond:
+ // result: (ORshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SRL y z) x)
+ // cond:
+ // result: (ORshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR x (SRA y z))
+ // cond:
+ // result: (ORshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SRA y z) x)
+ // cond:
+ // result: (ORshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
// match: (OR x x)
// cond:
// result: x
@@ -6368,6 +11393,315 @@ func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMORconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ // match: (ORshiftLL x y:(SLLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSLLconst {
+ break
+ }
+ if x != y.Args[0] {
+ break
+ }
+ c := y.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMORconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ // match: (ORshiftRA x y:(SRAconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRAconst {
+ break
+ }
+ if x != y.Args[0] {
+ break
+ }
+ c := y.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMORconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ // match: (ORshiftRL x y:(SRLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRLconst {
+ break
+ }
+ if x != y.Args[0] {
+ break
+ }
+ c := y.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -6490,6 +11824,465 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (RSB x (SLLconst [c] y))
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (RSB (SLLconst [c] y) x)
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (RSB x (SRLconst [c] y))
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (RSB (SRLconst [c] y) x)
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (RSB x (SRAconst [c] y))
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (RSB (SRAconst [c] y) x)
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (RSB x (SLL y z))
+ // cond:
+ // result: (RSBshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SLL y z) x)
+ // cond:
+ // result: (SUBshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x (SRL y z))
+ // cond:
+ // result: (RSBshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SRL y z) x)
+ // cond:
+ // result: (SUBshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x (SRA y z))
+ // cond:
+ // result: (RSBshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SRA y z) x)
+ // cond:
+ // result: (SUBshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
return false
}
func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
@@ -6559,6 +12352,312 @@ func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ // match: (RSBshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ // match: (RSBshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ // match: (RSBshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -6600,22 +12699,298 @@ func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh16Ux16 x y)
+ // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
// cond:
- // result: (SRL (ZeroExt16to32 x) (ZeroExt16to32 y))
+ // result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftLL x y [c] flags)
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
v0.AddArg(x)
+ v0.AddArg(y)
v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v3.AuxInt = 256
+ v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ v.AuxInt = 0
return true
}
}
@@ -6624,15 +12999,22 @@ func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
_ = b
// match: (Rsh16Ux32 x y)
// cond:
- // result: (SRL (ZeroExt16to32 x) y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
v.AddArg(v0)
- v.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v2.AddArg(y)
+ v.AddArg(v2)
+ v.AuxInt = 0
return true
}
}
@@ -6702,17 +13084,23 @@ func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
_ = b
// match: (Rsh16x16 x y)
// cond:
- // result: (SRA (SignExt16to32 x) (ZeroExt16to32 y))
+ // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRA)
+ v.reset(OpARMSRAcond)
v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
return true
}
}
@@ -6721,15 +13109,19 @@ func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
_ = b
// match: (Rsh16x32 x y)
// cond:
- // result: (SRA (SignExt16to32 x) y)
+ // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRA)
+ v.reset(OpARMSRAcond)
v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
return true
}
}
@@ -6804,15 +13196,24 @@ func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
_ = b
// match: (Rsh32Ux16 x y)
// cond:
- // result: (SRL x (ZeroExt16to32 y))
+ // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ v.AuxInt = 0
return true
}
}
@@ -6821,13 +13222,20 @@ func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
_ = b
// match: (Rsh32Ux32 x y)
// cond:
- // result: (SRL x y)
+ // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRL)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v.AuxInt = 0
return true
}
}
@@ -6892,15 +13300,21 @@ func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
_ = b
// match: (Rsh32x16 x y)
// cond:
- // result: (SRA x (ZeroExt16to32 y))
+ // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRA)
+ v.reset(OpARMSRAcond)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
return true
}
}
@@ -6909,13 +13323,17 @@ func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
_ = b
// match: (Rsh32x32 x y)
// cond:
- // result: (SRA x y)
+ // result: (SRAcond x y (CMPconst [256] y))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRA)
+ v.reset(OpARMSRAcond)
v.AddArg(x)
v.AddArg(y)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 256
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
}
@@ -6982,17 +13400,26 @@ func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
_ = b
// match: (Rsh8Ux16 x y)
// cond:
- // result: (SRL (ZeroExt8to32 x) (ZeroExt16to32 y))
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
+ v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v3.AuxInt = 256
+ v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ v.AuxInt = 0
return true
}
}
@@ -7001,15 +13428,22 @@ func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
_ = b
// match: (Rsh8Ux32 x y)
// cond:
- // result: (SRL (ZeroExt8to32 x) y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
+ v.reset(OpARMCMOVWHSconst)
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
v.AddArg(v0)
- v.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v2.AddArg(y)
+ v.AddArg(v2)
+ v.AuxInt = 0
return true
}
}
@@ -7079,17 +13513,23 @@ func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
_ = b
// match: (Rsh8x16 x y)
// cond:
- // result: (SRA (SignExt8to32 x) (ZeroExt16to32 y))
+ // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRA)
+ v.reset(OpARMSRAcond)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
return true
}
}
@@ -7098,15 +13538,19 @@ func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
_ = b
// match: (Rsh8x32 x y)
// cond:
- // result: (SRA (SignExt8to32 x) y)
+ // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMSRA)
+ v.reset(OpARMSRAcond)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
return true
}
}
@@ -7213,6 +13657,234 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
v.AddArg(flags)
return true
}
+ // match: (SBC x (SLLconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SLLconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRLconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRLconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRAconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRAconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SLL y z) flags)
+ // cond:
+ // result: (SBCshiftLLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SLL y z) x flags)
+ // cond:
+ // result: (RSCshiftLLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRL y z) flags)
+ // cond:
+ // result: (SBCshiftRLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRL y z) x flags)
+ // cond:
+ // result: (RSCshiftRLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRA y z) flags)
+ // cond:
+ // result: (SBCshiftRAreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRA y z) x flags)
+ // cond:
+ // result: (RSCshiftRAreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
return false
}
func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
@@ -7256,6 +13928,273 @@ func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -7315,6 +14254,83 @@ func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAcond x _ (FlagEQ))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = 31
+ return true
+ }
+ // match: (SRAcond x y (FlagLT_ULT))
+ // cond:
+ // result: (SRA x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAcond x _ (FlagLT_UGT))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = 31
+ return true
+ }
+ // match: (SRAcond x y (FlagGT_ULT))
+ // cond:
+ // result: (SRA x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAcond x _ (FlagGT_UGT))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AddArg(x)
+ v.AuxInt = 31
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -7406,6 +14422,210 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SUB x (SLLconst [c] y))
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUB (SLLconst [c] y) x)
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUB x (SRLconst [c] y))
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUB (SRLconst [c] y) x)
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUB x (SRAconst [c] y))
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUB (SRAconst [c] y) x)
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUB x (SLL y z))
+ // cond:
+ // result: (SUBshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SLL y z) x)
+ // cond:
+ // result: (RSBshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB x (SRL y z))
+ // cond:
+ // result: (SUBshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SRL y z) x)
+ // cond:
+ // result: (RSBshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB x (SRA y z))
+ // cond:
+ // result: (SUBshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SRA y z) x)
+ // cond:
+ // result: (RSBshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
// match: (SUB x x)
// cond:
// result: (MOVWconst [0])
@@ -7453,6 +14673,453 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SUBS x (SLLconst [c] y))
+ // cond:
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUBS (SLLconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUBS x (SRLconst [c] y))
+ // cond:
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUBS (SRLconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUBS x (SRAconst [c] y))
+ // cond:
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUBS (SRAconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (SUBS x (SLL y z))
+ // cond:
+ // result: (SUBSshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SLL y z) x)
+ // cond:
+ // result: (RSBSshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS x (SRL y z))
+ // cond:
+ // result: (SUBSshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SRL y z) x)
+ // cond:
+ // result: (RSBSshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS x (SRA y z))
+ // cond:
+ // result: (SUBSshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SRA y z) x)
+ // cond:
+ // result: (RSBSshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
return false
}
func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
@@ -7535,6 +15202,312 @@ func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ // match: (SUBshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ // match: (SUBshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ // match: (SUBshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpSelect0(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -7953,6 +15926,210 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (XOR x (SLLconst [c] y))
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (XOR (SLLconst [c] y) x)
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (XOR x (SRLconst [c] y))
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (XOR (SRLconst [c] y) x)
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (XOR x (SRAconst [c] y))
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (XOR (SRAconst [c] y) x)
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ // match: (XOR x (SLL y z))
+ // cond:
+ // result: (XORshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SLL y z) x)
+ // cond:
+ // result: (XORshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR x (SRL y z))
+ // cond:
+ // result: (XORshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SRL y z) x)
+ // cond:
+ // result: (XORshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR x (SRA y z))
+ // cond:
+ // result: (XORshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SRA y z) x)
+ // cond:
+ // result: (XORshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
// match: (XOR x x)
// cond:
// result: (MOVWconst [0])
@@ -8015,6 +16192,312 @@ func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMXORconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ return true
+ }
+ // match: (XORshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(int32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMXORconst)
+ v.AddArg(x)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ return true
+ }
+ // match: (XORshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ d := v.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AddArg(x)
+ v0.AuxInt = d
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ v.reset(OpARMXORconst)
+ v.AddArg(x)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ return true
+ }
+ // match: (XORshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ c := v_1.AuxInt
+ d := v.AuxInt
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -8365,11 +16848,16 @@ func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
_ = b
// match: (Zeromask x)
// cond:
- // result: (LoweredZeromask x)
+ // result: (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31])
for {
x := v.Args[0]
- v.reset(OpARMLoweredZeromask)
- v.AddArg(x)
+ v.reset(OpARMSRAconst)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ v.AuxInt = 31
return true
}
}