aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/rewriteARM.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/ssa/rewriteARM.go')
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go9044
1 files changed, 4522 insertions, 4522 deletions
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index eb000d7460..a4659e40bd 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -70,32 +70,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMANDshiftRL(v, config)
case OpARMANDshiftRLreg:
return rewriteValueARM_OpARMANDshiftRLreg(v, config)
- case OpAdd16:
- return rewriteValueARM_OpAdd16(v, config)
- case OpAdd32:
- return rewriteValueARM_OpAdd32(v, config)
- case OpAdd32F:
- return rewriteValueARM_OpAdd32F(v, config)
- case OpAdd32carry:
- return rewriteValueARM_OpAdd32carry(v, config)
- case OpAdd32withcarry:
- return rewriteValueARM_OpAdd32withcarry(v, config)
- case OpAdd64F:
- return rewriteValueARM_OpAdd64F(v, config)
- case OpAdd8:
- return rewriteValueARM_OpAdd8(v, config)
- case OpAddPtr:
- return rewriteValueARM_OpAddPtr(v, config)
- case OpAddr:
- return rewriteValueARM_OpAddr(v, config)
- case OpAnd16:
- return rewriteValueARM_OpAnd16(v, config)
- case OpAnd32:
- return rewriteValueARM_OpAnd32(v, config)
- case OpAnd8:
- return rewriteValueARM_OpAnd8(v, config)
- case OpAndB:
- return rewriteValueARM_OpAndB(v, config)
case OpARMBIC:
return rewriteValueARM_OpARMBIC(v, config)
case OpARMBICconst:
@@ -136,6 +110,258 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMCMPshiftRL(v, config)
case OpARMCMPshiftRLreg:
return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
+ case OpARMDIV:
+ return rewriteValueARM_OpARMDIV(v, config)
+ case OpARMDIVU:
+ return rewriteValueARM_OpARMDIVU(v, config)
+ case OpARMEqual:
+ return rewriteValueARM_OpARMEqual(v, config)
+ case OpARMGreaterEqual:
+ return rewriteValueARM_OpARMGreaterEqual(v, config)
+ case OpARMGreaterEqualU:
+ return rewriteValueARM_OpARMGreaterEqualU(v, config)
+ case OpARMGreaterThan:
+ return rewriteValueARM_OpARMGreaterThan(v, config)
+ case OpARMGreaterThanU:
+ return rewriteValueARM_OpARMGreaterThanU(v, config)
+ case OpARMLessEqual:
+ return rewriteValueARM_OpARMLessEqual(v, config)
+ case OpARMLessEqualU:
+ return rewriteValueARM_OpARMLessEqualU(v, config)
+ case OpARMLessThan:
+ return rewriteValueARM_OpARMLessThan(v, config)
+ case OpARMLessThanU:
+ return rewriteValueARM_OpARMLessThanU(v, config)
+ case OpARMMOVBUload:
+ return rewriteValueARM_OpARMMOVBUload(v, config)
+ case OpARMMOVBUreg:
+ return rewriteValueARM_OpARMMOVBUreg(v, config)
+ case OpARMMOVBload:
+ return rewriteValueARM_OpARMMOVBload(v, config)
+ case OpARMMOVBreg:
+ return rewriteValueARM_OpARMMOVBreg(v, config)
+ case OpARMMOVBstore:
+ return rewriteValueARM_OpARMMOVBstore(v, config)
+ case OpARMMOVDload:
+ return rewriteValueARM_OpARMMOVDload(v, config)
+ case OpARMMOVDstore:
+ return rewriteValueARM_OpARMMOVDstore(v, config)
+ case OpARMMOVFload:
+ return rewriteValueARM_OpARMMOVFload(v, config)
+ case OpARMMOVFstore:
+ return rewriteValueARM_OpARMMOVFstore(v, config)
+ case OpARMMOVHUload:
+ return rewriteValueARM_OpARMMOVHUload(v, config)
+ case OpARMMOVHUreg:
+ return rewriteValueARM_OpARMMOVHUreg(v, config)
+ case OpARMMOVHload:
+ return rewriteValueARM_OpARMMOVHload(v, config)
+ case OpARMMOVHreg:
+ return rewriteValueARM_OpARMMOVHreg(v, config)
+ case OpARMMOVHstore:
+ return rewriteValueARM_OpARMMOVHstore(v, config)
+ case OpARMMOVWload:
+ return rewriteValueARM_OpARMMOVWload(v, config)
+ case OpARMMOVWloadidx:
+ return rewriteValueARM_OpARMMOVWloadidx(v, config)
+ case OpARMMOVWloadshiftLL:
+ return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
+ case OpARMMOVWloadshiftRA:
+ return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
+ case OpARMMOVWloadshiftRL:
+ return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
+ case OpARMMOVWreg:
+ return rewriteValueARM_OpARMMOVWreg(v, config)
+ case OpARMMOVWstore:
+ return rewriteValueARM_OpARMMOVWstore(v, config)
+ case OpARMMOVWstoreidx:
+ return rewriteValueARM_OpARMMOVWstoreidx(v, config)
+ case OpARMMOVWstoreshiftLL:
+ return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
+ case OpARMMOVWstoreshiftRA:
+ return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
+ case OpARMMOVWstoreshiftRL:
+ return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
+ case OpARMMUL:
+ return rewriteValueARM_OpARMMUL(v, config)
+ case OpARMMULA:
+ return rewriteValueARM_OpARMMULA(v, config)
+ case OpARMMVN:
+ return rewriteValueARM_OpARMMVN(v, config)
+ case OpARMMVNshiftLL:
+ return rewriteValueARM_OpARMMVNshiftLL(v, config)
+ case OpARMMVNshiftLLreg:
+ return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
+ case OpARMMVNshiftRA:
+ return rewriteValueARM_OpARMMVNshiftRA(v, config)
+ case OpARMMVNshiftRAreg:
+ return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
+ case OpARMMVNshiftRL:
+ return rewriteValueARM_OpARMMVNshiftRL(v, config)
+ case OpARMMVNshiftRLreg:
+ return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
+ case OpARMNotEqual:
+ return rewriteValueARM_OpARMNotEqual(v, config)
+ case OpARMOR:
+ return rewriteValueARM_OpARMOR(v, config)
+ case OpARMORconst:
+ return rewriteValueARM_OpARMORconst(v, config)
+ case OpARMORshiftLL:
+ return rewriteValueARM_OpARMORshiftLL(v, config)
+ case OpARMORshiftLLreg:
+ return rewriteValueARM_OpARMORshiftLLreg(v, config)
+ case OpARMORshiftRA:
+ return rewriteValueARM_OpARMORshiftRA(v, config)
+ case OpARMORshiftRAreg:
+ return rewriteValueARM_OpARMORshiftRAreg(v, config)
+ case OpARMORshiftRL:
+ return rewriteValueARM_OpARMORshiftRL(v, config)
+ case OpARMORshiftRLreg:
+ return rewriteValueARM_OpARMORshiftRLreg(v, config)
+ case OpARMRSB:
+ return rewriteValueARM_OpARMRSB(v, config)
+ case OpARMRSBSshiftLL:
+ return rewriteValueARM_OpARMRSBSshiftLL(v, config)
+ case OpARMRSBSshiftLLreg:
+ return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
+ case OpARMRSBSshiftRA:
+ return rewriteValueARM_OpARMRSBSshiftRA(v, config)
+ case OpARMRSBSshiftRAreg:
+ return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
+ case OpARMRSBSshiftRL:
+ return rewriteValueARM_OpARMRSBSshiftRL(v, config)
+ case OpARMRSBSshiftRLreg:
+ return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
+ case OpARMRSBconst:
+ return rewriteValueARM_OpARMRSBconst(v, config)
+ case OpARMRSBshiftLL:
+ return rewriteValueARM_OpARMRSBshiftLL(v, config)
+ case OpARMRSBshiftLLreg:
+ return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
+ case OpARMRSBshiftRA:
+ return rewriteValueARM_OpARMRSBshiftRA(v, config)
+ case OpARMRSBshiftRAreg:
+ return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
+ case OpARMRSBshiftRL:
+ return rewriteValueARM_OpARMRSBshiftRL(v, config)
+ case OpARMRSBshiftRLreg:
+ return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
+ case OpARMRSCconst:
+ return rewriteValueARM_OpARMRSCconst(v, config)
+ case OpARMRSCshiftLL:
+ return rewriteValueARM_OpARMRSCshiftLL(v, config)
+ case OpARMRSCshiftLLreg:
+ return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
+ case OpARMRSCshiftRA:
+ return rewriteValueARM_OpARMRSCshiftRA(v, config)
+ case OpARMRSCshiftRAreg:
+ return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
+ case OpARMRSCshiftRL:
+ return rewriteValueARM_OpARMRSCshiftRL(v, config)
+ case OpARMRSCshiftRLreg:
+ return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
+ case OpARMSBC:
+ return rewriteValueARM_OpARMSBC(v, config)
+ case OpARMSBCconst:
+ return rewriteValueARM_OpARMSBCconst(v, config)
+ case OpARMSBCshiftLL:
+ return rewriteValueARM_OpARMSBCshiftLL(v, config)
+ case OpARMSBCshiftLLreg:
+ return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
+ case OpARMSBCshiftRA:
+ return rewriteValueARM_OpARMSBCshiftRA(v, config)
+ case OpARMSBCshiftRAreg:
+ return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
+ case OpARMSBCshiftRL:
+ return rewriteValueARM_OpARMSBCshiftRL(v, config)
+ case OpARMSBCshiftRLreg:
+ return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
+ case OpARMSLL:
+ return rewriteValueARM_OpARMSLL(v, config)
+ case OpARMSLLconst:
+ return rewriteValueARM_OpARMSLLconst(v, config)
+ case OpARMSRA:
+ return rewriteValueARM_OpARMSRA(v, config)
+ case OpARMSRAcond:
+ return rewriteValueARM_OpARMSRAcond(v, config)
+ case OpARMSRAconst:
+ return rewriteValueARM_OpARMSRAconst(v, config)
+ case OpARMSRL:
+ return rewriteValueARM_OpARMSRL(v, config)
+ case OpARMSRLconst:
+ return rewriteValueARM_OpARMSRLconst(v, config)
+ case OpARMSUB:
+ return rewriteValueARM_OpARMSUB(v, config)
+ case OpARMSUBS:
+ return rewriteValueARM_OpARMSUBS(v, config)
+ case OpARMSUBSshiftLL:
+ return rewriteValueARM_OpARMSUBSshiftLL(v, config)
+ case OpARMSUBSshiftLLreg:
+ return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
+ case OpARMSUBSshiftRA:
+ return rewriteValueARM_OpARMSUBSshiftRA(v, config)
+ case OpARMSUBSshiftRAreg:
+ return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
+ case OpARMSUBSshiftRL:
+ return rewriteValueARM_OpARMSUBSshiftRL(v, config)
+ case OpARMSUBSshiftRLreg:
+ return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
+ case OpARMSUBconst:
+ return rewriteValueARM_OpARMSUBconst(v, config)
+ case OpARMSUBshiftLL:
+ return rewriteValueARM_OpARMSUBshiftLL(v, config)
+ case OpARMSUBshiftLLreg:
+ return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
+ case OpARMSUBshiftRA:
+ return rewriteValueARM_OpARMSUBshiftRA(v, config)
+ case OpARMSUBshiftRAreg:
+ return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
+ case OpARMSUBshiftRL:
+ return rewriteValueARM_OpARMSUBshiftRL(v, config)
+ case OpARMSUBshiftRLreg:
+ return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
+ case OpARMXOR:
+ return rewriteValueARM_OpARMXOR(v, config)
+ case OpARMXORconst:
+ return rewriteValueARM_OpARMXORconst(v, config)
+ case OpARMXORshiftLL:
+ return rewriteValueARM_OpARMXORshiftLL(v, config)
+ case OpARMXORshiftLLreg:
+ return rewriteValueARM_OpARMXORshiftLLreg(v, config)
+ case OpARMXORshiftRA:
+ return rewriteValueARM_OpARMXORshiftRA(v, config)
+ case OpARMXORshiftRAreg:
+ return rewriteValueARM_OpARMXORshiftRAreg(v, config)
+ case OpARMXORshiftRL:
+ return rewriteValueARM_OpARMXORshiftRL(v, config)
+ case OpARMXORshiftRLreg:
+ return rewriteValueARM_OpARMXORshiftRLreg(v, config)
+ case OpAdd16:
+ return rewriteValueARM_OpAdd16(v, config)
+ case OpAdd32:
+ return rewriteValueARM_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValueARM_OpAdd32F(v, config)
+ case OpAdd32carry:
+ return rewriteValueARM_OpAdd32carry(v, config)
+ case OpAdd32withcarry:
+ return rewriteValueARM_OpAdd32withcarry(v, config)
+ case OpAdd64F:
+ return rewriteValueARM_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValueARM_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValueARM_OpAddPtr(v, config)
+ case OpAddr:
+ return rewriteValueARM_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValueARM_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValueARM_OpAnd32(v, config)
+ case OpAnd8:
+ return rewriteValueARM_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValueARM_OpAndB(v, config)
case OpClosureCall:
return rewriteValueARM_OpClosureCall(v, config)
case OpCom16:
@@ -180,10 +406,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpCvt64Fto32F(v, config)
case OpCvt64Fto32U:
return rewriteValueARM_OpCvt64Fto32U(v, config)
- case OpARMDIV:
- return rewriteValueARM_OpARMDIV(v, config)
- case OpARMDIVU:
- return rewriteValueARM_OpARMDIVU(v, config)
case OpDeferCall:
return rewriteValueARM_OpDeferCall(v, config)
case OpDiv16:
@@ -216,8 +438,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpEqB(v, config)
case OpEqPtr:
return rewriteValueARM_OpEqPtr(v, config)
- case OpARMEqual:
- return rewriteValueARM_OpARMEqual(v, config)
case OpGeq16:
return rewriteValueARM_OpGeq16(v, config)
case OpGeq16U:
@@ -254,14 +474,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpGreater8(v, config)
case OpGreater8U:
return rewriteValueARM_OpGreater8U(v, config)
- case OpARMGreaterEqual:
- return rewriteValueARM_OpARMGreaterEqual(v, config)
- case OpARMGreaterEqualU:
- return rewriteValueARM_OpARMGreaterEqualU(v, config)
- case OpARMGreaterThan:
- return rewriteValueARM_OpARMGreaterThan(v, config)
- case OpARMGreaterThanU:
- return rewriteValueARM_OpARMGreaterThanU(v, config)
case OpHmul16:
return rewriteValueARM_OpHmul16(v, config)
case OpHmul16u:
@@ -314,14 +526,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpLess8(v, config)
case OpLess8U:
return rewriteValueARM_OpLess8U(v, config)
- case OpARMLessEqual:
- return rewriteValueARM_OpARMLessEqual(v, config)
- case OpARMLessEqualU:
- return rewriteValueARM_OpARMLessEqualU(v, config)
- case OpARMLessThan:
- return rewriteValueARM_OpARMLessThan(v, config)
- case OpARMLessThanU:
- return rewriteValueARM_OpARMLessThanU(v, config)
case OpLoad:
return rewriteValueARM_OpLoad(v, config)
case OpLrot16:
@@ -354,74 +558,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValueARM_OpLsh8x8(v, config)
- case OpARMMOVBUload:
- return rewriteValueARM_OpARMMOVBUload(v, config)
- case OpARMMOVBUreg:
- return rewriteValueARM_OpARMMOVBUreg(v, config)
- case OpARMMOVBload:
- return rewriteValueARM_OpARMMOVBload(v, config)
- case OpARMMOVBreg:
- return rewriteValueARM_OpARMMOVBreg(v, config)
- case OpARMMOVBstore:
- return rewriteValueARM_OpARMMOVBstore(v, config)
- case OpARMMOVDload:
- return rewriteValueARM_OpARMMOVDload(v, config)
- case OpARMMOVDstore:
- return rewriteValueARM_OpARMMOVDstore(v, config)
- case OpARMMOVFload:
- return rewriteValueARM_OpARMMOVFload(v, config)
- case OpARMMOVFstore:
- return rewriteValueARM_OpARMMOVFstore(v, config)
- case OpARMMOVHUload:
- return rewriteValueARM_OpARMMOVHUload(v, config)
- case OpARMMOVHUreg:
- return rewriteValueARM_OpARMMOVHUreg(v, config)
- case OpARMMOVHload:
- return rewriteValueARM_OpARMMOVHload(v, config)
- case OpARMMOVHreg:
- return rewriteValueARM_OpARMMOVHreg(v, config)
- case OpARMMOVHstore:
- return rewriteValueARM_OpARMMOVHstore(v, config)
- case OpARMMOVWload:
- return rewriteValueARM_OpARMMOVWload(v, config)
- case OpARMMOVWloadidx:
- return rewriteValueARM_OpARMMOVWloadidx(v, config)
- case OpARMMOVWloadshiftLL:
- return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
- case OpARMMOVWloadshiftRA:
- return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
- case OpARMMOVWloadshiftRL:
- return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
- case OpARMMOVWreg:
- return rewriteValueARM_OpARMMOVWreg(v, config)
- case OpARMMOVWstore:
- return rewriteValueARM_OpARMMOVWstore(v, config)
- case OpARMMOVWstoreidx:
- return rewriteValueARM_OpARMMOVWstoreidx(v, config)
- case OpARMMOVWstoreshiftLL:
- return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
- case OpARMMOVWstoreshiftRA:
- return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
- case OpARMMOVWstoreshiftRL:
- return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
- case OpARMMUL:
- return rewriteValueARM_OpARMMUL(v, config)
- case OpARMMULA:
- return rewriteValueARM_OpARMMULA(v, config)
- case OpARMMVN:
- return rewriteValueARM_OpARMMVN(v, config)
- case OpARMMVNshiftLL:
- return rewriteValueARM_OpARMMVNshiftLL(v, config)
- case OpARMMVNshiftLLreg:
- return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
- case OpARMMVNshiftRA:
- return rewriteValueARM_OpARMMVNshiftRA(v, config)
- case OpARMMVNshiftRAreg:
- return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
- case OpARMMVNshiftRL:
- return rewriteValueARM_OpARMMVNshiftRL(v, config)
- case OpARMMVNshiftRLreg:
- return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
case OpMod16:
return rewriteValueARM_OpMod16(v, config)
case OpMod16u:
@@ -476,24 +612,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpNilCheck(v, config)
case OpNot:
return rewriteValueARM_OpNot(v, config)
- case OpARMNotEqual:
- return rewriteValueARM_OpARMNotEqual(v, config)
- case OpARMOR:
- return rewriteValueARM_OpARMOR(v, config)
- case OpARMORconst:
- return rewriteValueARM_OpARMORconst(v, config)
- case OpARMORshiftLL:
- return rewriteValueARM_OpARMORshiftLL(v, config)
- case OpARMORshiftLLreg:
- return rewriteValueARM_OpARMORshiftLLreg(v, config)
- case OpARMORshiftRA:
- return rewriteValueARM_OpARMORshiftRA(v, config)
- case OpARMORshiftRAreg:
- return rewriteValueARM_OpARMORshiftRAreg(v, config)
- case OpARMORshiftRL:
- return rewriteValueARM_OpARMORshiftRL(v, config)
- case OpARMORshiftRLreg:
- return rewriteValueARM_OpARMORshiftRLreg(v, config)
case OpOffPtr:
return rewriteValueARM_OpOffPtr(v, config)
case OpOr16:
@@ -504,48 +622,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpOr8(v, config)
case OpOrB:
return rewriteValueARM_OpOrB(v, config)
- case OpARMRSB:
- return rewriteValueARM_OpARMRSB(v, config)
- case OpARMRSBSshiftLL:
- return rewriteValueARM_OpARMRSBSshiftLL(v, config)
- case OpARMRSBSshiftLLreg:
- return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
- case OpARMRSBSshiftRA:
- return rewriteValueARM_OpARMRSBSshiftRA(v, config)
- case OpARMRSBSshiftRAreg:
- return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
- case OpARMRSBSshiftRL:
- return rewriteValueARM_OpARMRSBSshiftRL(v, config)
- case OpARMRSBSshiftRLreg:
- return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
- case OpARMRSBconst:
- return rewriteValueARM_OpARMRSBconst(v, config)
- case OpARMRSBshiftLL:
- return rewriteValueARM_OpARMRSBshiftLL(v, config)
- case OpARMRSBshiftLLreg:
- return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
- case OpARMRSBshiftRA:
- return rewriteValueARM_OpARMRSBshiftRA(v, config)
- case OpARMRSBshiftRAreg:
- return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
- case OpARMRSBshiftRL:
- return rewriteValueARM_OpARMRSBshiftRL(v, config)
- case OpARMRSBshiftRLreg:
- return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
- case OpARMRSCconst:
- return rewriteValueARM_OpARMRSCconst(v, config)
- case OpARMRSCshiftLL:
- return rewriteValueARM_OpARMRSCshiftLL(v, config)
- case OpARMRSCshiftLLreg:
- return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
- case OpARMRSCshiftRA:
- return rewriteValueARM_OpARMRSCshiftRA(v, config)
- case OpARMRSCshiftRAreg:
- return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
- case OpARMRSCshiftRL:
- return rewriteValueARM_OpARMRSCshiftRL(v, config)
- case OpARMRSCshiftRLreg:
- return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
case OpRsh16Ux16:
return rewriteValueARM_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
@@ -594,66 +670,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValueARM_OpRsh8x8(v, config)
- case OpARMSBC:
- return rewriteValueARM_OpARMSBC(v, config)
- case OpARMSBCconst:
- return rewriteValueARM_OpARMSBCconst(v, config)
- case OpARMSBCshiftLL:
- return rewriteValueARM_OpARMSBCshiftLL(v, config)
- case OpARMSBCshiftLLreg:
- return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
- case OpARMSBCshiftRA:
- return rewriteValueARM_OpARMSBCshiftRA(v, config)
- case OpARMSBCshiftRAreg:
- return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
- case OpARMSBCshiftRL:
- return rewriteValueARM_OpARMSBCshiftRL(v, config)
- case OpARMSBCshiftRLreg:
- return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
- case OpARMSLL:
- return rewriteValueARM_OpARMSLL(v, config)
- case OpARMSLLconst:
- return rewriteValueARM_OpARMSLLconst(v, config)
- case OpARMSRA:
- return rewriteValueARM_OpARMSRA(v, config)
- case OpARMSRAcond:
- return rewriteValueARM_OpARMSRAcond(v, config)
- case OpARMSRAconst:
- return rewriteValueARM_OpARMSRAconst(v, config)
- case OpARMSRL:
- return rewriteValueARM_OpARMSRL(v, config)
- case OpARMSRLconst:
- return rewriteValueARM_OpARMSRLconst(v, config)
- case OpARMSUB:
- return rewriteValueARM_OpARMSUB(v, config)
- case OpARMSUBS:
- return rewriteValueARM_OpARMSUBS(v, config)
- case OpARMSUBSshiftLL:
- return rewriteValueARM_OpARMSUBSshiftLL(v, config)
- case OpARMSUBSshiftLLreg:
- return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
- case OpARMSUBSshiftRA:
- return rewriteValueARM_OpARMSUBSshiftRA(v, config)
- case OpARMSUBSshiftRAreg:
- return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
- case OpARMSUBSshiftRL:
- return rewriteValueARM_OpARMSUBSshiftRL(v, config)
- case OpARMSUBSshiftRLreg:
- return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
- case OpARMSUBconst:
- return rewriteValueARM_OpARMSUBconst(v, config)
- case OpARMSUBshiftLL:
- return rewriteValueARM_OpARMSUBshiftLL(v, config)
- case OpARMSUBshiftLLreg:
- return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
- case OpARMSUBshiftRA:
- return rewriteValueARM_OpARMSUBshiftRA(v, config)
- case OpARMSUBshiftRAreg:
- return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
- case OpARMSUBshiftRL:
- return rewriteValueARM_OpARMSUBshiftRL(v, config)
- case OpARMSUBshiftRLreg:
- return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
case OpSignExt16to32:
return rewriteValueARM_OpSignExt16to32(v, config)
case OpSignExt8to16:
@@ -690,22 +706,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpTrunc32to16(v, config)
case OpTrunc32to8:
return rewriteValueARM_OpTrunc32to8(v, config)
- case OpARMXOR:
- return rewriteValueARM_OpARMXOR(v, config)
- case OpARMXORconst:
- return rewriteValueARM_OpARMXORconst(v, config)
- case OpARMXORshiftLL:
- return rewriteValueARM_OpARMXORshiftLL(v, config)
- case OpARMXORshiftLLreg:
- return rewriteValueARM_OpARMXORshiftLLreg(v, config)
- case OpARMXORshiftRA:
- return rewriteValueARM_OpARMXORshiftRA(v, config)
- case OpARMXORshiftRAreg:
- return rewriteValueARM_OpARMXORshiftRAreg(v, config)
- case OpARMXORshiftRL:
- return rewriteValueARM_OpARMXORshiftRL(v, config)
- case OpARMXORshiftRLreg:
- return rewriteValueARM_OpARMXORshiftRLreg(v, config)
case OpXor16:
return rewriteValueARM_OpXor16(v, config)
case OpXor32:
@@ -775,9 +775,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -794,9 +794,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -813,9 +813,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -832,9 +832,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -851,9 +851,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -870,9 +870,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1040,19 +1040,19 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1061,17 +1061,17 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1114,9 +1114,9 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1129,19 +1129,19 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1150,17 +1150,17 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADCconst x [int64(int32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1203,9 +1203,9 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1218,19 +1218,19 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1239,17 +1239,17 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1292,9 +1292,9 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1345,9 +1345,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SLLconst [c] y) x)
@@ -1362,9 +1362,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SRLconst [c] y))
@@ -1379,9 +1379,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SRLconst [c] y) x)
@@ -1396,9 +1396,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SRAconst [c] y))
@@ -1413,9 +1413,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SRAconst [c] y) x)
@@ -1430,9 +1430,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SLL y z))
@@ -1654,9 +1654,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SLLconst [c] y) x)
@@ -1671,9 +1671,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SRLconst [c] y))
@@ -1688,9 +1688,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SRLconst [c] y) x)
@@ -1705,9 +1705,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SRAconst [c] y))
@@ -1722,9 +1722,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SRAconst [c] y) x)
@@ -1739,9 +1739,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SLL y z))
@@ -1855,18 +1855,18 @@ func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1874,16 +1874,16 @@ func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -1922,9 +1922,9 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -1936,18 +1936,18 @@ func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1955,16 +1955,16 @@ func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2003,9 +2003,9 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2017,18 +2017,18 @@ func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2036,16 +2036,16 @@ func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2084,9 +2084,9 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2196,18 +2196,18 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADDconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2215,16 +2215,16 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADDconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2263,9 +2263,9 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2277,18 +2277,18 @@ func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADDconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2296,16 +2296,16 @@ func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADDconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2344,9 +2344,9 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2358,18 +2358,18 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADDconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2377,16 +2377,16 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADDconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2425,9 +2425,9 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2477,9 +2477,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SLLconst [c] y) x)
@@ -2494,9 +2494,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SRLconst [c] y))
@@ -2511,9 +2511,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SRLconst [c] y) x)
@@ -2528,9 +2528,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SRAconst [c] y))
@@ -2545,9 +2545,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SRAconst [c] y) x)
@@ -2562,9 +2562,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SLL y z))
@@ -2706,12 +2706,12 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
if v_1.Op != OpARMMVNshiftLL {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (MVNshiftRL y [c]))
@@ -2723,12 +2723,12 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
if v_1.Op != OpARMMVNshiftRL {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (MVNshiftRA y [c]))
@@ -2740,12 +2740,12 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
if v_1.Op != OpARMMVNshiftRA {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2817,18 +2817,18 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ANDconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2836,32 +2836,32 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ANDconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftLL x y:(SLLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSLLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -2906,9 +2906,9 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2920,18 +2920,18 @@ func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ANDconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2939,32 +2939,32 @@ func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ANDconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftRA x y:(SRAconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRAconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3009,9 +3009,9 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3023,18 +3023,18 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ANDconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -3042,32 +3042,32 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ANDconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftRL x y:(SRLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3112,209 +3112,12 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMANDshiftRL)
- v.AddArg(x)
- v.AddArg(y)
v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32F x y)
- // cond:
- // result: (ADDF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDF)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32carry x y)
- // cond:
- // result: (ADDS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32withcarry x y c)
- // cond:
- // result: (ADC x y c)
- for {
- x := v.Args[0]
- y := v.Args[1]
- c := v.Args[2]
- v.reset(OpARMADC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(c)
- return true
- }
-}
-func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64F x y)
- // cond:
- // result: (ADDD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add8 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AddPtr x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Addr {sym} base)
- // cond:
- // result: (MOVWaddr {sym} base)
- for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(OpARMMOVWaddr)
- v.Aux = sym
- v.AddArg(base)
- return true
- }
-}
-func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And16 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And32 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And8 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AndB x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
v.AddArg(x)
v.AddArg(y)
return true
}
+ return false
}
func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
b := v.Block
@@ -3346,9 +3149,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SRLconst [c] y))
@@ -3363,9 +3166,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SRAconst [c] y))
@@ -3380,9 +3183,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SLL y z))
@@ -3501,32 +3304,32 @@ func rewriteValueARM_OpARMBICshiftLL(v *Value, config *Config) bool {
// cond:
// result: (BICconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3551,9 +3354,9 @@ func rewriteValueARM_OpARMBICshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3565,32 +3368,32 @@ func rewriteValueARM_OpARMBICshiftRA(v *Value, config *Config) bool {
// cond:
// result: (BICconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3615,9 +3418,9 @@ func rewriteValueARM_OpARMBICshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3629,32 +3432,32 @@ func rewriteValueARM_OpARMBICshiftRL(v *Value, config *Config) bool {
// cond:
// result: (BICconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3679,9 +3482,9 @@ func rewriteValueARM_OpARMBICshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3693,11 +3496,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagEQ {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3720,11 +3523,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagLT_UGT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3747,11 +3550,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagGT_UGT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3760,17 +3563,17 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVWLSconst x flags [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMInvertFlags {
break
}
flags := v_1.Args[0]
- c := v.AuxInt
v.reset(OpARMCMOVWLSconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(flags)
- v.AuxInt = c
return true
}
return false
@@ -3782,11 +3585,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagEQ {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3795,11 +3598,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagLT_ULT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3822,11 +3625,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagGT_ULT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3849,17 +3652,17 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVWHSconst x flags [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMInvertFlags {
break
}
flags := v_1.Args[0]
- c := v.AuxInt
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(flags)
- v.AuxInt = c
return true
}
return false
@@ -3911,9 +3714,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SLLconst [c] y) x)
@@ -3929,9 +3732,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftLL, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -3947,9 +3750,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SRLconst [c] y) x)
@@ -3965,9 +3768,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftRL, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -3983,9 +3786,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SRAconst [c] y) x)
@@ -4001,9 +3804,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftRA, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -4166,12 +3969,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) == int32(y)) {
break
}
@@ -4182,12 +3985,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -4198,12 +4001,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -4214,12 +4017,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -4230,12 +4033,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -4246,11 +4049,11 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: 0xff < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVBUreg {
break
}
- c := v.AuxInt
if !(0xff < c) {
break
}
@@ -4261,11 +4064,11 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: 0xffff < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVHUreg {
break
}
- c := v.AuxInt
if !(0xffff < c) {
break
}
@@ -4276,12 +4079,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: 0 <= int32(m) && int32(m) < int32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMANDconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int32(m) && int32(m) < int32(n)) {
break
}
@@ -4292,12 +4095,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMSRLconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
break
}
@@ -4313,19 +4116,19 @@ func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
// cond:
// result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4334,16 +4137,16 @@ func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
// cond:
// result: (CMPconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4384,9 +4187,9 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -4398,19 +4201,19 @@ func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
// cond:
// result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4419,16 +4222,16 @@ func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
// cond:
// result: (CMPconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4469,9 +4272,9 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -4483,19 +4286,19 @@ func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
// cond:
// result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4504,16 +4307,16 @@ func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
// cond:
// result: (CMPconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4554,306 +4357,13 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
}
-func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMCALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com16 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com32 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com8 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const16 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32F [val])
- // cond:
- // result: (MOVFconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVFconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64F [val])
- // cond:
- // result: (MOVDconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVDconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const8 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstBool [b])
- // cond:
- // result: (MOVWconst [b])
- for {
- b := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = b
- return true
- }
-}
-func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstNil)
- // cond:
- // result: (MOVWconst [0])
- for {
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Convert x mem)
- // cond:
- // result: (MOVWconvert x mem)
- for {
- x := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVWconvert)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32 x)
- // cond:
- // result: (MOVFW x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFW)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32U x)
- // cond:
- // result: (MOVFWU x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFWU)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64F x)
- // cond:
- // result: (MOVFD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Uto32F x)
- // cond:
- // result: (MOVWUF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWUF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Uto64F x)
- // cond:
- // result: (MOVWUD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWUD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to32F x)
- // cond:
- // result: (MOVWF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to64F x)
- // cond:
- // result: (MOVWD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32 x)
- // cond:
- // result: (MOVDW x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDW)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32F x)
- // cond:
- // result: (MOVDF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32U x)
- // cond:
- // result: (MOVDWU x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDWU)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -4935,285 +4445,6 @@ func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (DeferCall [argwid] mem)
- // cond:
- // result: (CALLdefer [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpARMCALLdefer)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16 x y)
- // cond:
- // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16u x y)
- // cond:
- // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32 x y)
- // cond:
- // result: (DIV x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32F x y)
- // cond:
- // result: (DIVF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVF)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32u x y)
- // cond:
- // result: (DIVU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64F x y)
- // cond:
- // result: (DIVD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8 x y)
- // cond:
- // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8u x y)
- // cond:
- // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq16 x y)
- // cond:
- // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32 x y)
- // cond:
- // result: (Equal (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32F x y)
- // cond:
- // result: (Equal (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64F x y)
- // cond:
- // result: (Equal (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq8 x y)
- // cond:
- // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqB x y)
- // cond:
- // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMXORconst)
- v.AuxInt = 1
- v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqPtr x y)
- // cond:
- // result: (Equal (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -5292,336 +4523,6 @@ func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16 x y)
- // cond:
- // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16U x y)
- // cond:
- // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32 x y)
- // cond:
- // result: (GreaterEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32F x y)
- // cond:
- // result: (GreaterEqual (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32U x y)
- // cond:
- // result: (GreaterEqualU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64F x y)
- // cond:
- // result: (GreaterEqual (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8 x y)
- // cond:
- // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8U x y)
- // cond:
- // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetClosurePtr)
- // cond:
- // result: (LoweredGetClosurePtr)
- for {
- v.reset(OpARMLoweredGetClosurePtr)
- return true
- }
-}
-func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GoCall [argwid] mem)
- // cond:
- // result: (CALLgo [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpARMCALLgo)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16 x y)
- // cond:
- // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16U x y)
- // cond:
- // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32 x y)
- // cond:
- // result: (GreaterThan (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32F x y)
- // cond:
- // result: (GreaterThan (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32U x y)
- // cond:
- // result: (GreaterThanU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64F x y)
- // cond:
- // result: (GreaterThan (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8 x y)
- // cond:
- // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8U x y)
- // cond:
- // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -5934,495 +4835,6 @@ func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16 x y)
- // cond:
- // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
- // cond:
- // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
- // cond:
- // result: (HMUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMHMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
- // cond:
- // result: (HMULU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMHMULU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
- // cond:
- // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 8
- return true
- }
-}
-func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
- // cond:
- // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 8
- return true
- }
-}
-func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (InterCall [argwid] entry mem)
- // cond:
- // result: (CALLinter [argwid] entry mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMCALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
- // cond:
- // result: (LessThanU (CMP idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil ptr)
- // cond:
- // result: (NotEqual (CMPconst [0] ptr))
- for {
- ptr := v.Args[0]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v0.AuxInt = 0
- v0.AddArg(ptr)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
- // cond:
- // result: (LessEqualU (CMP idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16 x y)
- // cond:
- // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16U x y)
- // cond:
- // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32 x y)
- // cond:
- // result: (LessEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32F x y)
- // cond:
- // result: (GreaterEqual (CMPF y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32U x y)
- // cond:
- // result: (LessEqualU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64F x y)
- // cond:
- // result: (GreaterEqual (CMPD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8 x y)
- // cond:
- // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8U x y)
- // cond:
- // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16 x y)
- // cond:
- // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16U x y)
- // cond:
- // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32 x y)
- // cond:
- // result: (LessThan (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32F x y)
- // cond:
- // result: (GreaterThan (CMPF y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32U x y)
- // cond:
- // result: (LessThanU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64F x y)
- // cond:
- // result: (GreaterThan (CMPD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8 x y)
- // cond:
- // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8U x y)
- // cond:
- // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -6735,502 +5147,6 @@ func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Load <t> ptr mem)
- // cond: t.IsBoolean()
- // result: (MOVBUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean()) {
- break
- }
- v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is8BitInt(t) && isSigned(t))
- // result: (MOVBload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is8BitInt(t) && isSigned(t)) {
- break
- }
- v.reset(OpARMMOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is8BitInt(t) && !isSigned(t))
- // result: (MOVBUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is8BitInt(t) && !isSigned(t)) {
- break
- }
- v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is16BitInt(t) && isSigned(t))
- // result: (MOVHload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t) && isSigned(t)) {
- break
- }
- v.reset(OpARMMOVHload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is16BitInt(t) && !isSigned(t))
- // result: (MOVHUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t) && !isSigned(t)) {
- break
- }
- v.reset(OpARMMOVHUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is32BitInt(t) || isPtr(t))
- // result: (MOVWload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpARMMOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVFload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
- break
- }
- v.reset(OpARMMOVFload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVDload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
- break
- }
- v.reset(OpARMMOVDload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
- // cond:
- // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMOR)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
- v0.AddArg(x)
- v0.AuxInt = c & 15
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
- v1.AddArg(x)
- v1.AuxInt = 16 - c&15
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 x [c])
- // cond:
- // result: (SRRconst x [32-c&31])
- for {
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMSRRconst)
- v.AddArg(x)
- v.AuxInt = 32 - c&31
- return true
- }
-}
-func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
- // cond:
- // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMOR)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
- v0.AddArg(x)
- v0.AuxInt = c & 7
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
- v1.AddArg(x)
- v1.AuxInt = 8 - c&7
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh16x64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh32x64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh8x64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -8413,17 +6329,17 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftLL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8439,17 +6355,17 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftRL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8465,17 +6381,17 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftRA {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8550,13 +6466,13 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSLLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8568,14 +6484,14 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSLLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8588,13 +6504,13 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSRLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8606,14 +6522,14 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSRLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8626,13 +6542,13 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSRAconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8644,14 +6560,14 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSRAconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8664,18 +6580,18 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftLL {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8689,13 +6605,13 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
// cond:
// result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(uint32(c) << uint64(d))
@@ -8712,18 +6628,18 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftRA {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8737,13 +6653,13 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
// cond:
// result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(int32(c) >> uint64(d))
@@ -8760,18 +6676,18 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftRL {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8785,13 +6701,13 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
// cond:
// result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(uint32(c) >> uint64(d))
@@ -8920,18 +6836,18 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftLL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -8948,18 +6864,18 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftRL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -8976,18 +6892,18 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftRA {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9044,14 +6960,14 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSLLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9064,15 +6980,15 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSLLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9086,14 +7002,14 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSRLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9106,15 +7022,15 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSRLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9128,14 +7044,14 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSRAconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9148,15 +7064,15 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSRAconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9170,13 +7086,13 @@ func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
// cond:
// result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9195,13 +7111,13 @@ func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
// cond:
// result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9220,13 +7136,13 @@ func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
// cond:
// result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9323,9 +7239,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
break
}
v.reset(OpARMADDshiftLL)
+ v.AuxInt = log2(c - 1)
v.AddArg(x)
v.AddArg(x)
- v.AuxInt = log2(c - 1)
return true
}
// match: (MUL x (MOVWconst [c]))
@@ -9342,9 +7258,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
break
}
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = log2(c + 1)
v.AddArg(x)
v.AddArg(x)
- v.AuxInt = log2(c + 1)
return true
}
// match: (MUL x (MOVWconst [c]))
@@ -9363,9 +7279,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 1
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 1
v.AddArg(v0)
return true
}
@@ -9385,9 +7301,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 2
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 2
v.AddArg(v0)
return true
}
@@ -9407,9 +7323,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 7)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = 3
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 3
v.AddArg(v0)
return true
}
@@ -9429,9 +7345,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 3
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 3
v.AddArg(v0)
return true
}
@@ -9517,9 +7433,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
break
}
v.reset(OpARMADDshiftLL)
+ v.AuxInt = log2(c - 1)
v.AddArg(x)
v.AddArg(x)
- v.AuxInt = log2(c - 1)
return true
}
// match: (MUL (MOVWconst [c]) x)
@@ -9536,9 +7452,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
break
}
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = log2(c + 1)
v.AddArg(x)
v.AddArg(x)
- v.AuxInt = log2(c + 1)
return true
}
// match: (MUL (MOVWconst [c]) x)
@@ -9557,9 +7473,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 1
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 1
v.AddArg(v0)
return true
}
@@ -9579,9 +7495,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 2
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 2
v.AddArg(v0)
return true
}
@@ -9601,9 +7517,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 7)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = 3
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 3
v.AddArg(v0)
return true
}
@@ -9623,9 +7539,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 3
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 3
v.AddArg(v0)
return true
}
@@ -9744,9 +7660,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = log2(c - 1)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9767,9 +7683,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = log2(c + 1)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9792,9 +7708,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 1
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 1
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9818,9 +7734,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 2
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 2
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9844,9 +7760,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = 3
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 3
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9870,9 +7786,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 3
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 3
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9970,9 +7886,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = log2(c - 1)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9993,9 +7909,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = log2(c + 1)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -10018,9 +7934,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 1
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 1
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10044,9 +7960,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 2
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 2
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10070,9 +7986,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = 3
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 3
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10096,9 +8012,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 3
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 3
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10153,8 +8069,8 @@ func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftLL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SRLconst [c] x))
@@ -10168,8 +8084,8 @@ func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftRL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SRAconst [c] x))
@@ -10183,8 +8099,8 @@ func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftRA)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SLL x y))
@@ -10241,12 +8157,12 @@ func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(uint32(c) << uint64(d))
return true
@@ -10267,8 +8183,8 @@ func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftLL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -10280,12 +8196,12 @@ func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [^int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(int32(c) >> uint64(d))
return true
@@ -10306,8 +8222,8 @@ func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftRA)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -10319,12 +8235,12 @@ func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(uint32(c) >> uint64(d))
return true
@@ -10345,692 +8261,12 @@ func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftRL)
- v.AddArg(x)
v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16 x y)
- // cond:
- // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMOD)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16u x y)
- // cond:
- // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMODU)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32 x y)
- // cond:
- // result: (MOD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMOD)
v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32u x y)
- // cond:
- // result: (MODU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMODU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8 x y)
- // cond:
- // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMOD)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8u x y)
- // cond:
- // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMODU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpMove(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Move [s] _ _ mem)
- // cond: SizeAndAlign(s).Size() == 0
- // result: mem
- for {
- s := v.AuxInt
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 1
- // result: (MOVBstore dst (MOVBUload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 1) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
- // result: (MOVHstore dst (MOVHUload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
- break
- }
- v.reset(OpARMMOVHstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2
- // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AuxInt = 1
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 1
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
- // result: (MOVWstore dst (MOVWload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
- break
- }
- v.reset(OpARMMOVWstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
- // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
- break
- }
- v.reset(OpARMMOVHstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4
- // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AuxInt = 3
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 2
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AuxInt = 2
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v3.AuxInt = 1
- v3.AddArg(dst)
- v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v4.AuxInt = 1
- v4.AddArg(src)
- v4.AddArg(mem)
- v3.AddArg(v4)
- v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v5.AddArg(dst)
- v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v6.AddArg(src)
- v6.AddArg(mem)
- v5.AddArg(v6)
- v5.AddArg(mem)
- v3.AddArg(v5)
- v1.AddArg(v3)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 3
- // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 3) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 1
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AuxInt = 1
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v3.AddArg(dst)
- v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v4.AddArg(src)
- v4.AddArg(mem)
- v3.AddArg(v4)
- v3.AddArg(mem)
- v1.AddArg(v3)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
- // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
- break
- }
- v.reset(OpARMDUFFCOPY)
- v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
- // result: (LoweredMove [SizeAndAlign(s).Align()] dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
- break
- }
- v.reset(OpARMLoweredMove)
- v.AuxInt = SizeAndAlign(s).Align()
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
- v0.AddArg(src)
- v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
- v.AddArg(v0)
- v.AddArg(mem)
return true
}
return false
}
-func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul16 x y)
- // cond:
- // result: (MUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32 x y)
- // cond:
- // result: (MUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32F x y)
- // cond:
- // result: (MULF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULF)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32uhilo x y)
- // cond:
- // result: (MULLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64F x y)
- // cond:
- // result: (MULD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul8 x y)
- // cond:
- // result: (MUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg16 x)
- // cond:
- // result: (RSBconst [0] x)
- for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32 x)
- // cond:
- // result: (RSBconst [0] x)
- for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32F x)
- // cond:
- // result: (NEGF x)
- for {
- x := v.Args[0]
- v.reset(OpARMNEGF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64F x)
- // cond:
- // result: (NEGD x)
- for {
- x := v.Args[0]
- v.reset(OpARMNEGD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg8 x)
- // cond:
- // result: (RSBconst [0] x)
- for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq16 x y)
- // cond:
- // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32 x y)
- // cond:
- // result: (NotEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32F x y)
- // cond:
- // result: (NotEqual (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64F x y)
- // cond:
- // result: (NotEqual (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq8 x y)
- // cond:
- // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqB x y)
- // cond:
- // result: (XOR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMXOR)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqPtr x y)
- // cond:
- // result: (NotEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NilCheck ptr mem)
- // cond:
- // result: (LoweredNilCheck ptr mem)
- for {
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMLoweredNilCheck)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Not x)
- // cond:
- // result: (XORconst [1] x)
- for {
- x := v.Args[0]
- v.reset(OpARMXORconst)
- v.AuxInt = 1
- v.AddArg(x)
- return true
- }
-}
func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -11154,9 +8390,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR (SLLconst [c] y) x)
@@ -11171,9 +8407,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR x (SRLconst [c] y))
@@ -11188,9 +8424,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMORshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR (SRLconst [c] y) x)
@@ -11205,9 +8441,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMORshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR x (SRAconst [c] y))
@@ -11222,9 +8458,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR (SRAconst [c] y) x)
@@ -11239,9 +8475,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR x (SLL y z))
@@ -11428,18 +8664,18 @@ func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ORconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -11447,32 +8683,32 @@ func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ORconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMORconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (ORshiftLL x y:(SLLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSLLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -11517,9 +8753,9 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -11531,18 +8767,18 @@ func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ORconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -11550,32 +8786,32 @@ func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ORconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMORconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ORshiftRA x y:(SRAconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRAconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -11620,9 +8856,9 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -11634,18 +8870,18 @@ func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ORconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -11653,32 +8889,32 @@ func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ORconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMORconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ORshiftRL x y:(SRLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -11723,101 +8959,12 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMORshiftRL)
- v.AddArg(x)
- v.AddArg(y)
v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OffPtr [off] ptr:(SP))
- // cond:
- // result: (MOVWaddr [off] ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- if ptr.Op != OpSP {
- break
- }
- v.reset(OpARMMOVWaddr)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
- // match: (OffPtr [off] ptr)
- // cond:
- // result: (ADDconst [off] ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(OpARMADDconst)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
-}
-func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or16 x y)
- // cond:
- // result: (OR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or32 x y)
- // cond:
- // result: (OR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or8 x y)
- // cond:
- // result: (OR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OrB x y)
- // cond:
- // result: (OR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
v.AddArg(x)
v.AddArg(y)
return true
}
+ return false
}
func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
b := v.Block
@@ -11864,9 +9011,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB (SLLconst [c] y) x)
@@ -11881,9 +9028,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB x (SRLconst [c] y))
@@ -11898,9 +9045,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB (SRLconst [c] y) x)
@@ -11915,9 +9062,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB x (SRAconst [c] y))
@@ -11932,9 +9079,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB (SRAconst [c] y) x)
@@ -11949,9 +9096,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB x (SLL y z))
@@ -12077,18 +9224,18 @@ func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12096,16 +9243,16 @@ func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -12144,9 +9291,9 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12158,18 +9305,18 @@ func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12177,16 +9324,16 @@ func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBSconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -12225,9 +9372,9 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12239,18 +9386,18 @@ func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12258,16 +9405,16 @@ func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -12306,9 +9453,9 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12387,18 +9534,18 @@ func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SUBconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12406,32 +9553,32 @@ func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSBconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (RSBshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -12475,9 +9622,9 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12489,18 +9636,18 @@ func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SUBconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12508,32 +9655,32 @@ func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSBconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (RSBshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -12577,9 +9724,9 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12591,18 +9738,18 @@ func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SUBconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12610,32 +9757,32 @@ func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSBconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (RSBshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -12679,9 +9826,9 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12734,19 +9881,19 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -12755,17 +9902,17 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -12808,9 +9955,9 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMRSCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -12823,19 +9970,19 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -12844,17 +9991,17 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -12897,9 +10044,9 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMRSCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -12912,19 +10059,19 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -12933,17 +10080,17 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -12986,668 +10133,14 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMRSCshiftRL)
- v.AddArg(x)
- v.AddArg(y)
v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux16 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v3.AuxInt = 256
- v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v4.AddArg(y)
- v3.AddArg(v4)
- v.AddArg(v3)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux32 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v0.AddArg(y)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v2.AddArg(y)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = c + 16
- return true
- }
- // match: (Rsh16Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux8 x y)
- // cond:
- // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x16 x y)
- // cond:
- // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- return true
- }
-}
-func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x32 x y)
- // cond:
- // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(y)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = c + 16
- return true
- }
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = 31
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x8 x y)
- // cond:
- // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux16 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux32 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SRLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(OpARMSRLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh32Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux8 x y)
- // cond:
- // result: (SRL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x16 x y)
- // cond:
- // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x32 x y)
- // cond:
- // result: (SRAcond x y (CMPconst [256] y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v.AddArg(x)
- v.AddArg(y)
- v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v0.AuxInt = 256
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SRAconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (SRAconst x [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpARMSRAconst)
v.AddArg(x)
- v.AuxInt = 31
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x8 x y)
- // cond:
- // result: (SRA x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux16 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v3.AuxInt = 256
- v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v4.AddArg(y)
- v3.AddArg(v4)
- v.AddArg(v3)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux32 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v0.AddArg(y)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v2.AddArg(y)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = c + 24
- return true
- }
- // match: (Rsh8Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux8 x y)
- // cond:
- // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x16 x y)
- // cond:
- // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- return true
- }
-}
-func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x32 x y)
- // cond:
- // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
v.AddArg(y)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = c + 24
- return true
- }
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = 31
+ v.AddArg(flags)
return true
}
return false
}
-func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x8 x y)
- // cond:
- // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -13698,9 +10191,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMSBCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13717,9 +10210,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMRSCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13736,9 +10229,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMSBCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13755,9 +10248,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMRSCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13774,9 +10267,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMSBCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13793,9 +10286,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMRSCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13963,19 +10456,19 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -13984,17 +10477,17 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -14037,9 +10530,9 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMSBCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -14052,19 +10545,19 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -14073,17 +10566,17 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -14126,9 +10619,9 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMSBCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -14141,19 +10634,19 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -14162,17 +10655,17 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -14215,9 +10708,9 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMSBCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -14237,8 +10730,8 @@ func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMSLLconst)
- v.AddArg(x)
v.AuxInt = c & 31
+ v.AddArg(x)
return true
}
return false
@@ -14276,8 +10769,8 @@ func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = c & 31
+ v.AddArg(x)
return true
}
return false
@@ -14295,8 +10788,8 @@ func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
break
}
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
// match: (SRAcond x y (FlagLT_ULT))
@@ -14324,8 +10817,8 @@ func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
break
}
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
// match: (SRAcond x y (FlagGT_ULT))
@@ -14353,8 +10846,8 @@ func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
break
}
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
return false
@@ -14392,8 +10885,8 @@ func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMSRLconst)
- v.AddArg(x)
v.AuxInt = c & 31
+ v.AddArg(x)
return true
}
return false
@@ -14462,9 +10955,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB (SLLconst [c] y) x)
@@ -14479,9 +10972,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB x (SRLconst [c] y))
@@ -14496,9 +10989,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB (SRLconst [c] y) x)
@@ -14513,9 +11006,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB x (SRAconst [c] y))
@@ -14530,9 +11023,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB (SRAconst [c] y) x)
@@ -14547,9 +11040,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB x (SLL y z))
@@ -14713,9 +11206,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS (SLLconst [c] y) x)
@@ -14730,9 +11223,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS x (SRLconst [c] y))
@@ -14747,9 +11240,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS (SRLconst [c] y) x)
@@ -14764,9 +11257,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS x (SRAconst [c] y))
@@ -14781,9 +11274,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS (SRAconst [c] y) x)
@@ -14798,9 +11291,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS x (SLL y z))
@@ -14914,18 +11407,18 @@ func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -14933,16 +11426,16 @@ func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -14981,9 +11474,9 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -14995,18 +11488,18 @@ func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15014,16 +11507,16 @@ func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBSconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -15062,9 +11555,9 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -15076,18 +11569,18 @@ func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15095,16 +11588,16 @@ func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -15143,9 +11636,9 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -15237,18 +11730,18 @@ func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSBconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15256,32 +11749,32 @@ func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SUBconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (SUBshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -15325,9 +11818,9 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -15339,18 +11832,18 @@ func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSBconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15358,32 +11851,32 @@ func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SUBconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (SUBshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -15427,9 +11920,9 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -15441,18 +11934,18 @@ func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSBconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15460,32 +11953,32 @@ func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SUBconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (SUBshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -15529,353 +12022,12 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBshiftRL)
- v.AddArg(x)
- v.AddArg(y)
v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SignExt16to32 x)
- // cond:
- // result: (MOVHreg x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVHreg)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SignExt8to16 x)
- // cond:
- // result: (MOVBreg x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVBreg)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SignExt8to32 x)
- // cond:
- // result: (MOVBreg x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVBreg)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Signmask x)
- // cond:
- // result: (SRAconst x [31])
- for {
- x := v.Args[0]
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = 31
- return true
- }
-}
-func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sqrt x)
- // cond:
- // result: (SQRTD x)
- for {
- x := v.Args[0]
- v.reset(OpARMSQRTD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (StaticCall [argwid] {target} mem)
- // cond:
- // result: (CALLstatic [argwid] {target} mem)
- for {
- argwid := v.AuxInt
- target := v.Aux
- mem := v.Args[0]
- v.reset(OpARMCALLstatic)
- v.AuxInt = argwid
- v.Aux = target
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Store [1] ptr val mem)
- // cond:
- // result: (MOVBstore ptr val mem)
- for {
- if v.AuxInt != 1 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVBstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (Store [2] ptr val mem)
- // cond:
- // result: (MOVHstore ptr val mem)
- for {
- if v.AuxInt != 2 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVHstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (Store [4] ptr val mem)
- // cond: !is32BitFloat(val.Type)
- // result: (MOVWstore ptr val mem)
- for {
- if v.AuxInt != 4 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(!is32BitFloat(val.Type)) {
- break
- }
- v.reset(OpARMMOVWstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (Store [4] ptr val mem)
- // cond: is32BitFloat(val.Type)
- // result: (MOVFstore ptr val mem)
- for {
- if v.AuxInt != 4 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32BitFloat(val.Type)) {
- break
- }
- v.reset(OpARMMOVFstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (Store [8] ptr val mem)
- // cond: is64BitFloat(val.Type)
- // result: (MOVDstore ptr val mem)
- for {
- if v.AuxInt != 8 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is64BitFloat(val.Type)) {
- break
- }
- v.reset(OpARMMOVDstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub16 x y)
- // cond:
- // result: (SUB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32 x y)
- // cond:
- // result: (SUB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32F x y)
- // cond:
- // result: (SUBF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUBF)
v.AddArg(x)
v.AddArg(y)
return true
}
-}
-func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32carry x y)
- // cond:
- // result: (SUBS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUBS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32withcarry x y c)
- // cond:
- // result: (SBC x y c)
- for {
- x := v.Args[0]
- y := v.Args[1]
- c := v.Args[2]
- v.reset(OpARMSBC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(c)
- return true
- }
-}
-func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub64F x y)
- // cond:
- // result: (SUBD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUBD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub8 x y)
- // cond:
- // result: (SUB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SubPtr x y)
- // cond:
- // result: (SUB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Trunc16to8 x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Trunc32to16 x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Trunc32to8 x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
+ return false
}
func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
b := v.Block
@@ -15922,9 +12074,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR (SLLconst [c] y) x)
@@ -15939,9 +12091,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR x (SRLconst [c] y))
@@ -15956,9 +12108,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR (SRLconst [c] y) x)
@@ -15973,9 +12125,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR x (SRAconst [c] y))
@@ -15990,9 +12142,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR (SRAconst [c] y) x)
@@ -16007,9 +12159,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR x (SLL y z))
@@ -16183,18 +12335,18 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
// cond:
// result: (XORconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16202,32 +12354,32 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
// cond:
// result: (XORconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMXORconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (XORshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -16271,9 +12423,9 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -16285,18 +12437,18 @@ func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
// cond:
// result: (XORconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16304,32 +12456,32 @@ func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
// cond:
// result: (XORconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMXORconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (XORshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -16373,9 +12525,9 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -16387,18 +12539,18 @@ func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
// cond:
// result: (XORconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16406,32 +12558,32 @@ func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
// cond:
// result: (XORconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMXORconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (XORshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -16475,13 +12627,3861 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32carry x y)
+ // cond:
+ // result: (ADDS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32withcarry x y c)
+ // cond:
+ // result: (ADC x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(OpARMADC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMCALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVFconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVFconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVWconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert x mem)
+ // cond:
+ // result: (MOVWconvert x mem)
+ for {
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVWconvert)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (MOVFW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32U x)
+ // cond:
+ // result: (MOVFWU x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFWU)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (MOVFD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto32F x)
+ // cond:
+ // result: (MOVWUF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWUF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto64F x)
+ // cond:
+ // result: (MOVWUD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWUD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (MOVWF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (MOVWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (MOVDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (MOVDF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32U x)
+ // cond:
+ // result: (MOVDWU x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDWU)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARMCALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (Equal (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (Equal (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (GreaterEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpARMLoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARMCALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (GreaterThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (GreaterThan (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (GreaterThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (GreaterThan (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMULU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMCALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil ptr)
+ // cond:
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v.Args[0]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (LessThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (GreaterThan (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (GreaterThan (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AuxInt = c & 15
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AuxInt = 16 - c&15
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 x [c])
+ // cond:
+ // result: (SRRconst x [32-c&31])
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMSRRconst)
+ v.AuxInt = 32 - c&31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AuxInt = c & 7
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AuxInt = 8 - c&7
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MOD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
v.AddArg(x)
v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (MODU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 1
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 1
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 2
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4.AuxInt = 1
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v5.AddArg(dst)
+ v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v6.AddArg(src)
+ v6.AddArg(mem)
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 1
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARMDUFFCOPY)
+ v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
+ // result: (LoweredMove [SizeAndAlign(s).Align()] dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredMove)
+ v.AuxInt = SizeAndAlign(s).Align()
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32uhilo x y)
+ // cond:
+ // result: (MULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (NEGF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMNEGF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (NEGD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMNEGD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (NotEqual (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (NotEqual (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMLoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr:(SP))
+ // cond:
+ // result: (MOVWaddr [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v3.AuxInt = 256
+ v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c + 16
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 x y)
+ // cond:
+ // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 x y)
+ // cond:
+ // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c + 16
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 x y)
+ // cond:
+ // result: (SRL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 x y)
+ // cond:
+ // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 x y)
+ // cond:
+ // result: (SRAcond x y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v.AddArg(x)
+ v.AddArg(y)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 256
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 x y)
+ // cond:
+ // result: (SRA x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v3.AuxInt = 256
+ v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c + 24
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
return true
}
return false
}
+func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 x y)
+ // cond:
+ // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 x y)
+ // cond:
+ // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c + 24
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 x y)
+ // cond:
+ // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Signmask x)
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sqrt x)
+ // cond:
+ // result: (SQRTD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSQRTD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(OpARMCALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
+ // cond:
+ // result: (MOVHstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(!is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (SUBF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32carry x y)
+ // cond:
+ // result: (SUBS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32withcarry x y c)
+ // cond:
+ // result: (SBC x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(OpARMSBC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (SUBD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16756,8 +16756,8 @@ func rewriteValueARM_OpZero(v *Value, config *Config) bool {
v.AuxInt = SizeAndAlign(s).Align()
v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
- v0.AddArg(ptr)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(ptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
v1.AuxInt = 0
@@ -16815,12 +16815,12 @@ func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
for {
x := v.Args[0]
v.reset(OpARMSRAconst)
+ v.AuxInt = 31
v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
+ v0.AuxInt = 1
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 1
v.AddArg(v0)
- v.AuxInt = 31
return true
}
}